input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
#!/usr/bin/env python3
# encoding: UTF-8
from collections import namedtuple
import datetime
import operator
import re
import sqlite3
import tempfile
import textwrap
import unittest
import unittest.mock
import uuid
import bcrypt
from pyramid import testing
from pyramid.exceptions import Forbidden
from pyramid.exceptions import NotFound
from pyramid.httpexceptions import HTTPBadRequest
from pyramid.httpexceptions import HTTPFound
from pyramid.httpexceptions import HTTPInternalServerError
from pyramid.httpexceptions import HTTPNotFound
import cloudhands.common
from cloudhands.common.connectors import initialise
from cloudhands.common.connectors import Registry
from cloudhands.common.schema import Appliance
from cloudhands.common.schema import BcryptedPassword
from cloudhands.common.schema import CatalogueItem
from cloudhands.common.schema import EmailAddress
from cloudhands.common.schema import Label
from cloudhands.common.schema import Organisation
from cloudhands.common.schema import PosixUId
from cloudhands.common.schema import PosixUIdNumber
from cloudhands.common.schema import Provider
from cloudhands.common.schema import PublicKey
from cloudhands.common.schema import Membership
from cloudhands.common.schema import Registration
from cloudhands.common.schema import Resource
from cloudhands.common.schema import State
from cloudhands.common.schema import Touch
from cloudhands.common.schema import User
from cloudhands.common.states import MembershipState
from cloudhands.common.states import RegistrationState
import cloudhands.web
from cloudhands.web.indexer import create as create_index
from cloudhands.web.indexer import indexer
from cloudhands.web.indexer import ldap_types
from cloudhands.web.main import appliance_modify
from cloudhands.web.main import appliance_read
from cloudhands.web.main import authenticate_user
from cloudhands.web.main import login_read
from cloudhands.web.main import login_update
from cloudhands.web.main import membership_read
from cloudhands.web.main import membership_update
from cloudhands.web.main import organisation_appliances_create
from cloudhands.web.main import organisation_catalogue_read
from cloudhands.web.main import organisation_memberships_create
from cloudhands.web.main import organisation_read
from cloudhands.web.main import parser
from cloudhands.web.main import people_read
from cloudhands.web.main import RegistrationForbidden
from cloudhands.web.main import registration_passwords
from cloudhands.web.main import registration_keys
from cloudhands.web.main import top_read
@unittest.skip("Not doing it yet")
class ACLTests(unittest.TestCase):
def test_access_control(self):
raise NotImplementedError
class ServerTests(unittest.TestCase):
@classmethod
def setUpClass(class_):
def testuser_email(req=None):
return "<EMAIL>"
class_.auth_unpatch = cloudhands.web.main.authenticated_userid
cloudhands.web.main.authenticated_userid = testuser_email
if class_.auth_unpatch is cloudhands.web.main.authenticated_userid:
class_.skipTest("Authentication badly patched")
@classmethod
def teardownClass(class_):
cloudhands.web.main.authenticated_userid = class_.auth_unpatch
def setUp(self):
self.session = Registry().connect(sqlite3, ":memory:").session
initialise(self.session)
self.assets = {
"paths.assets": dict(
css = "cloudhands.web:static/css",
html = "cloudhands.web:static/html",
img = "cloudhands.web:static/img",
js = "cloudhands.web:static/js")
}
self.request = testing.DummyRequest()
self.request.registry.settings = {"cfg": self.assets}
self.config = testing.setUp(request=self.request)
self.config.add_static_view(
name="css", path="cloudhands.web:static/css")
self.config.add_static_view(
name="html", path="cloudhands.web:static/html")
self.config.add_static_view(
name="js", path="cloudhands.web:static/js")
self.config.add_static_view(
name="img", path="cloudhands.web:static/img")
def tearDown(self):
Registry().disconnect(sqlite3, ":memory:")
@staticmethod
def make_test_user(session):
just_registered = session.query(RegistrationState).filter(
RegistrationState.name == "pre_registration_inetorgperson_cn").one()
reg = Registration(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__)
user = User(handle="TestUser", uuid=uuid.uuid4().hex)
hash = bcrypt.hashpw("TestPa$$w0rd", bcrypt.gensalt(12))
now = datetime.datetime.utcnow()
act = Touch(artifact=reg, actor=user, state=just_registered, at=now)
pwd = BcryptedPassword(touch=act, value=hash)
ea = EmailAddress(
touch=act,
value=cloudhands.web.main.authenticated_userid())
session.add_all((pwd, ea))
session.commit()
return act
@staticmethod
def make_test_user_role_user(session):
session.add(
Provider(
name="testcloud.io", uuid=uuid.uuid4().hex)
)
user = ServerTests.make_test_user(session).actor
org = Organisation(
uuid=uuid.uuid4().hex,
name="TestOrg")
userMp = Membership(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
role="user")
# Make the authenticated user an admin
active = session.query(
MembershipState).filter(MembershipState.name == "active").one()
now = datetime.datetime.utcnow()
act = Touch(artifact=userMp, actor=user, state=active, at=now)
session.add(act)
session.commit()
return act
@staticmethod
def make_test_user_role_admin(session):
admin = ServerTests.make_test_user(session).actor
org = Organisation(
uuid=uuid.uuid4().hex,
name="TestOrg")
provider = Provider(
name="testcloud.io", uuid=uuid.uuid4().hex)
adminMp = Membership(
uuid=uuid.uuid4().hex,
model=cloudhands.common.__version__,
organisation=org,
role="admin")
# Make the authenticated user an admin
active = session.query(
MembershipState).filter(MembershipState.name == "active").one()
now = datetime.datetime.utcnow()
act = Touch(artifact=adminMp, actor=admin, state=active, at=now)
session.add(act)
session.commit()
return act
class VersionInfoTests(ServerTests):
def test_version_option(self):
p = parser()
rv = p.parse_args(["--version"])
self.assertTrue(rv.version)
def test_version_json(self):
self.assertEqual(
cloudhands.web.__version__,
top_read(self.request)["info"]["versions"]["cloudhands.web"])
self.assertEqual(
cloudhands.common.__version__,
top_read(self.request)["info"]["versions"]["cloudhands.common"])
class AppliancePageTests(ServerTests):
def setUp(self):
super().setUp()
self.config.add_route(
"appliance", "/appliance/{app_uuid}")
self.config.add_route("organisation", "/organisation/{org_name}")
self.config.add_route(
"organisation_appliances", "/organisation/{org_name}/appliances")
act = ServerTests.make_test_user_role_user(self.session)
org = act.artifact.organisation
self.session.add_all((
CatalogueItem(
uuid=uuid.uuid4().hex,
name="nfs-client",
description="Headless VM for file transfer operations",
note=textwrap.dedent("""
<p>This VM runs CentOS 6.5 with a minimal amount of RAM and
no X server. It is used for file transfer operations from the
command line.</p>
"""),
logo="headless",
natrouted=False,
organisation=org
),
CatalogueItem(
uuid=uuid.uuid4().hex,
name="Web-Server",
description="Headless VM with Web server",
note=textwrap.dedent("""
<p>This VM runs Apache on CentOS 6.5.
It has 8GB RAM and 4 CPU cores.
It is used for hosting websites and applications with a
Web API.</p>
"""),
logo="headless",
natrouted=True,
organisation=org
)
))
self.session.commit()
def test_organisation_appliances_create(self):
self.assertEqual(0, self.session.query(Appliance).count())
org = self.session.query(Organisation).one()
ci = self.session.query(CatalogueItem).first()
self.assertIn(ci, org.catalogue)
request = testing.DummyRequest(post={"uuid": ci.uuid})
request.matchdict.update({"org_name": org.name})
self.assertRaises(
HTTPFound, organisation_appliances_create, request)
self.assertEqual(1, self.session.query(Appliance).count())
def test_organisation_appliances_create_then_view_appliance(self):
self.test_organisation_appliances_create()
self.assertEqual(1, self.session.query(Appliance).count())
app = self.session.query(Appliance).one()
request = testing.DummyRequest()
request.matchdict.update({"app_uuid": app.uuid})
page = appliance_read(request)
items = list(page["items"].values())
catalogueChoice = items[0]["_links"][0]
self.assertEqual("collection", catalogueChoice.rel)
blankLabel = items[1]
self.assertIn("uuid", blankLabel)
def test_appliance_read_with_missing_uuid(self):
request = testing.DummyRequest()
request.matchdict.update({"app_uuid": uuid.uuid4().hex})
self.assertRaises(
HTTPNotFound, appliance_read, request)
def test_appliance_modify_validates_label(self):
self.test_organisation_appliances_create()
self.assertEqual(1, self.session.query(Appliance).count())
self.assertEqual(0, self.session.query(Label).count())
app = self.session.query(Appliance).one()
request = testing.DummyRequest(post={"name": "No blanks"})
request.matchdict.update({"app_uuid": app.uuid})
self.assertRaises(
HTTPBadRequest, appliance_modify, request)
self.assertEqual("configuring", app.changes[-1].state.name)
def test_appliance_modify_adds_label(self):
self.test_organisation_appliances_create()
self.assertEqual(1, self.session.query(Appliance).count())
self.assertEqual(0, self.session.query(Label).count())
app = self.session.query(Appliance).one()
request = testing.DummyRequest(
post={"name": "Test_name", "description": "Test description"})
request.matchdict.update({"app_uuid": app.uuid})
self.assertRaises(
HTTPFound, appliance_modify, request)
app = self.session.query(Appliance).one()
self.assertEqual(1, self.session.query(Label).count())
def test_appliance_label_permits_hyphens_in_name(self):
self.test_organisation_appliances_create()
self.assertEqual(1, self.session.query(Appliance).count())
self.assertEqual(0, self.session.query(Label).count())
app = self.session.query(Appliance).one()
request = testing.DummyRequest(
post={"name": "Test-name", "description": "Test description"})
request.matchdict.update({"app_uuid": app.uuid})
self.assertRaises(
HTTPFound, appliance_modify, request)
app = self.session.query(Appliance).one()
self.assertEqual(1, self.session.query(Label).count())
def test_appliance_appears_in_organisation(self):
self.test_organisation_appliances_create()
self.assertEqual(1, self.session.query(Appliance).count())
self.assertEqual(0, self.session.query(Label).count())
app = self.session.query(Appliance).one()
request = testing.DummyRequest(
post={"name": "Test_name", "description": "Test description"})
request.matchdict.update({"app_uuid": app.uuid})
self.assertRaises(
HTTPFound, appliance_modify, request)
app = self.session.query(Appliance).one()
self.assertEqual(1, self.session.query(Label).count())
request = testing.DummyRequest()
request.matchdict.update({"org_name": app.organisation.name})
page = organisation_read(request)
self.assertEqual(1, len(page["items"]))
class CataloguePageTests(ServerTests):
def setUp(self):
super().setUp()
self.config.add_route(
"catalogue", "/organisation/{org_name}/catalogue")
def test_no_options_seen_in_catalogue_view(self):
act = ServerTests.make_test_user_role_user(self.session)
org = act.artifact.organisation
self.session.add_all((
CatalogueItem(
uuid=uuid.uuid4().hex,
name="nfs-client",
description="Headless VM for file transfer operations",
note=textwrap.dedent("""
<p>This VM runs CentOS 6.5 with a minimal amount of RAM and
no X server. It is used for file transfer operations from the
command line.</p>
"""),
logo="headless",
natrouted=False,
organisation=org
),
CatalogueItem(
uuid=uuid.uuid4().hex,
name="Web-Server",
description="Headless VM with Web server",
note=textwrap.dedent("""
<p>This VM runs Apache on CentOS 6.5.
It has 8GB RAM and 4 CPU cores.
It is used for hosting websites and applications with a
Web API.</p>
"""),
logo="headless",
natrouted=True,
organisation=org
)
))
self.session.commit()
request = testing.DummyRequest()
request.matchdict.update({"org_name": org.name})
page = organisation_catalogue_read(request)
self.assertFalse(list(page["options"].values()))
items = list(page["items"].values())
self.assertEqual(2, len(items))
self.assertTrue(any(i["name"] == "nfs-client" for i in items))
self.assertTrue(any(i["name"] == "Web-Server" for i in items))
class LoginAndOutTests(ServerTests):
def setUp(self):
super().setUp()
self.config.add_route("top", "/")
def test_we_can_read_login_from_test(self):
request = testing.DummyRequest()
self.assertTrue(login_read(request))
def test_we_can_log_in_from_test(self):
act = ServerTests.make_test_user_role_user(self.session)
request = testing.DummyRequest(
post={"username": "TestUser", "password": "<PASSWORD>"})
self.assertRaises(HTTPFound, login_update, request)
def test_registration_lifecycle_pre_registration_inet_orgperson_cn(self):
act = ServerTests.make_test_user_role_user(self.session)
request = testing.DummyRequest(
post={"username": "Test User", "password": "<PASSWORD>"})
self.assertRaises(HTTPFound, login_update, request)
self.assertEqual(1, self.session.query(User).count())
self.assertEqual(1, self.session.query(Registration).count())
self.assertEqual(1, self.session.query(BcryptedPassword).count())
self.assertEqual(1, self.session.query(EmailAddress).count())
self.assertEqual(0, self.session.query(PosixUId).count())
self.assertEqual(0, self.session.query(PosixUIdNumber).count())
reg = self.session.query(Registration).one()
self.assertEqual(
"pre_registration_inetorgperson_cn",
reg.changes[-1].state.name)
def test_registration_lifecycle_pre_registration_inet_orgperson_cn(self):
act = ServerTests.make_test_user_role_user(self.session)
user = act.actor
prvdr = self.session.query(Provider).one()
reg = self.session.query(Registration).one()
state = self.session.query(State).filter(
State.name == "pre_user_posixaccount").one()
now = datetime.datetime.utcnow()
act = Touch(artifact=reg, actor=user, state=state, at=now)
self.session.add(
PosixUId(value="testuser", touch=act, provider=prvdr))
self.session.commit()
request = testing.DummyRequest(
post={"username": user.handle, "password": "<PASSWORD>"})
noUidNumber = unittest.mock.patch(
"cloudhands.web.main.next_uidnumber",
autospec=True, return_value = 7654321)
noPasswordChange = unittest.mock.patch(
"cloudhands.web.main.change_password",
autospec=True, return_value = 0)
with noUidNumber, noPasswordChange:
self.assertRaises(HTTPFound, login_update, request)
self.assertEqual(1, self.session.query(User).count())
self.assertEqual(1, self.session.query(Registration).count())
self.assertEqual(1, self.session.query(BcryptedPassword).count())
self.assertEqual(1, self.session.query(EmailAddress).count())
self.assertEqual(1, self.session.query(PosixUId).count())
self.assertEqual(1, self.session.query(PosixUIdNumber).count())
self.assertEqual(
"user_posixaccount",
reg.changes[-1].state.name)
class MembershipPageTests(ServerTests):
def setUp(self):
super().setUp()
self.config.add_route("membership", "/membership/{mship_uuid}")
self.config.add_route("registration", "/registration/{reg_uuid}")
#self.config.add_route("organisation", "/organisation")
#self.config.add_route("people", "/people")
def test_authenticate_nonuser_raises_not_found(self):
request = testing.DummyRequest()
self.assertRaises(NotFound, authenticate_user, request, NotFound)
def test_authenticate_nonuser_attaches_userid(self):
request = testing.DummyRequest()
try:
authenticate_user(request)
except NotFound as e:
self.assertEqual(
cloudhands.web.main.authenticated_userid(), e.userId)
def test_guest_membership_read_activates_membership(self):
def newuser_email(request=None):
return "<EMAIL>"
# Create an admin
self.assertEqual(0, self.session.query(User).count())
self.assertEqual(0, self.session.query(Membership).count())
act = ServerTests.make_test_user_role_admin(self.session)
admin = act.actor
org = act.artifact.organisation
self.assertEqual(1, self.session.query(User).count())
self.assertEqual(1, self.session.query(Registration).count())
self.assertEqual(1, self.session.query(Membership).count())
# Create a new invite
request = testing.DummyRequest(post={
"username": "someonew",
"surname": "New",
"email": newuser_email()})
request.registry.settings = {"cfg": self.assets}
request.matchdict.update({"org_name": org.name})
self.assertRaises(HTTPFound, organisation_memberships_create, request)
self.assertEqual(2, self.session.query(User).count())
self.assertEqual(2, self.session.query(Registration).count())
self.assertEqual(2, self.session.query(Membership).count())
mship = self.session.query(
Membership).join(Touch).join(State).join(Organisation).filter(
Organisation.id == org.id).filter(State.name == "created").one()
testuser_email, cloudhands.web.main.authenticated_userid = (
cloudhands.web.main.authenticated_userid, newuser_email)
try:
# Email is sent
invited = self.session.query(MembershipState).filter(
MembershipState.name == "invited").one()
act = Touch(
artifact=mship, actor=admin, state=invited,
at=datetime.datetime.utcnow())
self.session.add(act)
self.session.commit()
# New person visits membership
request = testing.DummyRequest()
request.matchdict.update({"mship_uuid": mship.uuid})
self.assertRaises(HTTPFound, membership_read, request)
# Check new user added
self.assertEqual("accepted", mship.changes[-1].state.name)
self.assertTrue(self.session.query(EmailAddress).filter(
EmailAddress.value == newuser_email()).first())
finally:
cloudhands.web.main.authenticated_userid = testuser_email
def test_user_membership_update_post_returns_forbidden(self):
act = ServerTests.make_test_user_role_user(self.session)
mship = act.artifact
request = testing.DummyRequest()
request.matchdict.update({"mship_uuid": mship.uuid})
self.assertRaises(Forbidden, membership_update, request)
def test_admin_membership_update_post_adds_resources(self):
dn = "cn=testadmin,ou=ceda,ou=People,o=hpc,dc=rl,dc=ac,dc=uk"
uid = "2345"
gid = "6200"
key = "tu3+" * 100
with tempfile.TemporaryDirectory() as td:
Args = namedtuple("Arguments", ["index"])
self.config.add_settings({"args": Args(td)})
ix = create_index(td, **ldap_types)
wrtr = ix.writer()
wrtr.add_document(id=dn, uidNumber=uid, gidNumber=gid,
sshPublicKey=key)
wrtr.commit()
act = ServerTests.make_test_user_role_admin(self.session)
self.assertEqual(2, self.session.query(Resource).count())
mship = act.artifact
request = testing.DummyRequest(post={"designator": dn})
request.matchdict.update({"mship_uuid": mship.uuid})
# NB: admin is updating his own membership here
self.assertRaises(
HTTPFound, membership_update, request)
n = self.session.query(
Resource).join(Touch).join(Membership).filter(
Membership.id == mship.id).count()
self.assertEqual(3, n)
class OrganisationPageTests(ServerTests):
def setUp(self):
super().setUp()
self.config.add_route("organisation", "/organisation")
def test_nonadmin_user_cannot_add_membership(self):
act = ServerTests.make_test_user_role_user(self.session)
org = act.artifact.organisation
request = testing.DummyRequest()
request.matchdict.update({"org_name": org.name})
page = organisation_read(request)
options = page["options"].values()
mships = [i for i in options if "role" in i]
self.assertTrue(mships)
self.assertEqual(org.name, mships[0]["organisation"])
invite = list(i for o in options if "_links" in o
for i in o["_links"] if i.name.startswith("Invit"))
self.assertFalse(invite)
def test_admin_user_can_add_membership(self):
act = ServerTests.make_test_user_role_admin(self.session)
admin = act.actor
org = act.artifact.organisation
| |
#
# Copyright (c) 2019-2020 Mike's Pub, see https://github.com/mikespub-org
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
import logging
import os.path
import pickle
import time
import flask.json
from flask import Flask, jsonify, request
from flask.views import MethodView
from . import db
from .config import KNOWN_MODELS, LIST_CONFIG, PAGE_SIZE, get_list_config
def create_app(debug=True, base_url="/api/v1/data"):
"""Create main Flask app if this module is the entrypoint, e.g. python3 -m data.api"""
app = Flask(__name__)
app.debug = debug
configure_app(app, base_url=base_url)
return app
# app = create_app()
def configure_app(app, base_url="/api/v1/data", authorize_wrap=None):
"""Configure existing Flask app with datastore view functions, template filters and global functions"""
if authorize_wrap:
app.add_url_rule(
base_url + "/", view_func=authorize_wrap(HomeAPI.as_view("home_api"))
)
app.add_url_rule(
base_url + "/<string:name>/",
view_func=authorize_wrap(ListAPI.as_view("list_api")),
)
app.add_url_rule(
base_url + "/<string:parent>/<path:item>",
view_func=authorize_wrap(ItemAPI.as_view("item_api")),
)
else:
app.add_url_rule(base_url + "/", view_func=HomeAPI.as_view("home_api"))
app.add_url_rule(
base_url + "/<string:name>/", view_func=ListAPI.as_view("list_api")
)
app.add_url_rule(
base_url + "/<string:parent>/<path:item>",
view_func=ItemAPI.as_view("item_api"),
)
# TODO: check for conflict in existing ruleset
app.add_url_rule(os.path.dirname(base_url) + "/", view_func=data_api)
app.json_encoder = MyJSONEncoder
class MyJSONEncoder(flask.json.JSONEncoder):
def default(self, obj):
# if isinstance(obj, db.Entity):
# return item_to_dict(obj)
if isinstance(obj, db.Key):
return item_to_path(obj)
# if isinstance(obj, datetime.datetime):
# return obj.isoformat(" ")
if isinstance(obj, bytes):
# TODO: we should use base64 encoding here
return repr(obj)
return super().default(obj)
def data_api():
with open(os.path.join(os.path.dirname(__file__), "openapi.json")) as fp:
info = flask.json.load(fp)
return info
def item_to_path(key):
if key.kind in ("Path", "Dir", "File"):
# drop the first / of the id_or_name = path
return f"{key.kind}/{key.id_or_name[1:]}"
# elif key.kind in ("Chunk") and key.parent:
elif (
key.kind in LIST_CONFIG
and LIST_CONFIG[key.kind].get("parent", None)
and key.parent
):
# add the :parent:path
return "{}/{}:{}:{}".format(
key.kind,
key.id_or_name,
key.parent.kind,
key.parent.id_or_name,
)
return f"{key.kind}/{key.id_or_name}"
def item_to_dict(entity, truncate=False):
info = dict(entity)
info["_key"] = entity.key
if entity.kind in LIST_CONFIG and LIST_CONFIG[entity.kind].get("parent", None):
info["_parent"] = entity.key.parent
elif entity.key and entity.key.parent:
info["_parent"] = entity.key.parent
if truncate:
if entity.kind in LIST_CONFIG:
truncate_list = LIST_CONFIG[entity.kind].get("truncate_list", [])
else:
truncate_list = list(info.keys())
for attr in truncate_list:
if attr in info and isinstance(info[attr], bytes) and len(info[attr]) > 20:
info[attr] = f"{info[attr][:20]}... ({len(info[attr])} bytes)"
return info
def instance_to_dict(instance, truncate=False):
info = instance.to_dict(True)
# if instance._kind == "Chunk":
if instance._kind in LIST_CONFIG and LIST_CONFIG[instance._kind].get(
"parent", None
):
info["_parent"] = instance.key().parent
# if truncate and instance._kind == "Chunk" and len(info["data"]) > 20:
if truncate:
if instance._kind in LIST_CONFIG:
truncate_list = LIST_CONFIG[instance._kind].get("truncate_list", [])
else:
truncate_list = list(info.keys())
for attr in truncate_list:
if attr in info and isinstance(info[attr], bytes) and len(info[attr]) > 20:
info[attr] = f"{info[attr][:20]}... ({len(info[attr])} bytes)"
return info
def get_models():
models_list = sorted(KNOWN_MODELS.keys())
models_list.append("Others")
return models_list
list_names = []
list_stats = {}
list_filters = {}
def get_lists(reset=False):
global list_names
if len(list_names) > 0 and not reset:
return list_names
list_names = get_models()
for kind in sorted(db.list_kinds()):
if kind not in list_names:
list_names.append(kind)
return list_names
def get_stats(reset=False):
global list_stats
if len(list_stats) > 0 and not reset:
return list_stats
list_stats = {}
# list_stats['Stats'] = stats.GlobalStat.list_all(1)
kind = "__Stat_Total__"
id_or_name = "total_entity_usage"
key = db.get_key(kind, id_or_name)
entity = db.get_entity(key)
if entity:
info = item_to_dict(entity)
else:
info = {"timestamp": time.time()}
list_stats["Stats"] = {}
list_stats["Stats"][kind] = info
kind = "__Stat_Kind__"
list_stats["Stats"][kind] = {}
for entity in db.ilist_entities(kind):
info = item_to_dict(entity)
list_stats["Stats"][kind][info["kind_name"]] = info
# for stat in stats.KindPropertyNamePropertyTypeStat.list_all():
# list_stats['Stats'].append(stat)
kind = "__Stat_PropertyType_PropertyName_Kind__"
for entity in db.ilist_entities(kind):
info = item_to_dict(entity)
if info["kind_name"] not in list_stats["Stats"]:
list_stats["Stats"][info["kind_name"]] = {}
list_stats["Stats"][info["kind_name"]][info["property_name"]] = info
for model in KNOWN_MODELS:
list_stats[model] = get_list_stats(KNOWN_MODELS[model])
return list_stats
def get_list_stats(model, limit=1000):
# count only on demand now
stats = {
"kind": model.kind(),
"properties": model.properties(),
# "count": model.get_count(limit),
"count": None,
}
if stats["count"] == limit:
stats["count"] = str(limit) + "+"
return stats
def get_list_count(model, reset=False):
global list_stats
if model in list_stats and list_stats[model]["count"] is not None and not reset:
return list_stats[model]["count"]
if model not in KNOWN_MODELS:
# initialize stats if needed
stats = get_stats()
if (
"Stats" in stats
and "__Stat_Kind__" in stats["Stats"]
and model in stats["Stats"]["__Stat_Kind__"]
):
stats_count = stats["Stats"]["__Stat_Kind__"][model]["count"]
if stats_count > 0:
return stats_count
return
if model not in list_stats:
list_stats[model] = get_list_stats(KNOWN_MODELS[model])
list_stats[model]["count"] = KNOWN_MODELS[model].get_count()
return list_stats[model]["count"]
def get_filters(reset=False):
global list_filters
if len(list_filters) > 0 and not reset:
return list_filters
list_filters = {}
# TODO: load filters from Datastore + check timestamp
# coll_ref = db.get_coll_ref("_Filter_Kind_")
# for doc in coll_ref.stream():
# info = doc.to_dict()
# list_filters[info["name"]] = info["filters"]
for name in get_lists():
if name not in list_filters:
get_list_filters(name)
return list_filters
def get_list_filters(name, reset=False):
global list_filters
if name not in list_filters or reset:
list_filters[name] = {}
filter_list = get_list_config(name, "filters")
for filter in filter_list:
list_filters[name][filter] = {}
return list_filters[name]
def set_list_filters(name, filter_dict):
global list_filters
list_filters[name] = filter_dict
def parse_filter_args(args, kind):
filters = None
for key, value in list(args.items()):
if not key.startswith("filters."):
continue
if filters is None:
filters = []
prop_name = key[8:]
# TODO: look at first char for <, >, etc.
operator = "="
# CHECKME: assuming this is a Path entity here!?
if "/" in value:
parent = "Path"
parent_key = item_get_key(parent, value)
filters.append((prop_name, operator, parent_key))
continue
# /data/Picture/?filters[album]=3846051 -> parent = "Album"
found = False
for parent in LIST_CONFIG:
ref_dict = get_list_config(parent, "references")
if not ref_dict:
continue
for ref in ref_dict.keys():
if ref == kind and ref_dict[ref] == prop_name:
# print("Found", parent, ref, prop_name)
parent_key = item_get_key(parent, value)
filters.append((prop_name, operator, parent_key))
found = True
break
if not found:
filters.append((prop_name, operator, value))
return filters
class HomeAPI(MethodView):
def get(self):
"""Get all models/kinds"""
result = home_get()
return jsonify(result)
def post(self):
"""Create new kind"""
info = request.get_json()
result = home_post(info)
return result
def delete(self):
"""Delete all kinds"""
result = home_delete()
return result
def home_get(name=None):
"""Get all models/kinds"""
# when dealing with Others coming from list_get
if name == "Others":
kinds_list = get_lists()
else:
kinds_list = get_models()
return kinds_list
def home_post(info):
"""Create new kind"""
raise NotImplementedError("Create new kind")
def home_delete():
"""Delete all kinds"""
raise NotImplementedError("Delete all kinds!?")
class ListAPI(MethodView):
def get(self, name):
"""Get all entities of kind"""
# by default we only show known models here
if name not in KNOWN_MODELS:
if name == "Others":
return jsonify(home_get(name))
kinds_list = get_lists()
if name not in kinds_list:
raise ValueError("Invalid Kind %r" % name)
page = int(request.args.get("page", 1))
sort = request.args.get("sort", None)
fields = request.args.get("fields", None)
filters = parse_filter_args(request.args, name)
if filters:
result = list_get(name, page, sort, fields, filters=filters)
else:
result = list_get(name, page, sort, fields)
return jsonify(result)
def post(self, name):
"""Create new entity of kind"""
info = request.get_json()
result = list_post(name, info)
return result
def delete(self, name):
"""Delete kind (and all its entities)"""
result = list_delete(name)
return result
def list_get(name, page=1, sort=None, fields=None, truncate=True, filters=None):
"""Get all entities of kind"""
return list(
ilist_get(
name,
page=page,
sort=sort,
fields=fields,
truncate=truncate,
filters=filters,
)
)
def ilist_get(name, page=1, sort=None, fields=None, truncate=True, filters=None):
if page < 1:
page = 1
limit = PAGE_SIZE
offset = (page - 1) * limit
kwargs = {}
if sort:
if not isinstance(sort, list):
sort = sort.split(",")
if len(sort) > 0:
kwargs["order"] = sort
if fields:
if not isinstance(fields, list):
fields = fields.split(",")
if len(fields) > 0:
kwargs["projection"] = fields
if filters:
if len(filters) > 0:
kwargs["filters"] = filters
if name not in KNOWN_MODELS:
for entity in db.ilist_entities(name, limit, offset, **kwargs):
info = item_to_dict(entity, truncate=truncate)
yield info
else:
for instance in KNOWN_MODELS[name].ilist_all(limit, offset, **kwargs):
info = instance_to_dict(instance, truncate=truncate)
yield info
def list_post(name, info):
"""Create new entity of kind"""
# is item id_or_name in info or not?
# key = item_get_key(name, item)
# key = db.get_key(name, int(id_or_name), *path_args)
# entity = db.make_entity(key, **info)
# db.put_entity(entity)
raise NotImplementedError("Create new entity of kind")
def list_delete(name):
"""Delete kind (and all its entities)"""
raise NotImplementedError("Delete kind (and all its entities)")
class ItemAPI(MethodView):
def get(self, parent, item):
"""Get entity"""
if parent not in KNOWN_MODELS:
if parent == "Others":
return jsonify(home_get(parent))
kinds_list = get_lists()
if parent not in kinds_list:
raise ValueError("Invalid Kind %r" % parent)
fields = request.args.get("fields", None)
children = request.args.get("children", True)
unpickle = request.args.get("unpickle", True)
result = item_get(
parent, item, fields=fields, children=children, unpickle=unpickle
)
# return individual property of this item!?
if fields and isinstance(fields, str) and "," not in fields:
result = result[fields]
# TODO: specify content-type if available/known
if isinstance(result, str):
return result, 200, {"Content-Type": "text/plain"}
# https://stackoverflow.com/questions/20508788/do-i-need-content-type-application-octet-stream-for-file-download
if isinstance(result, bytes):
if fields in get_list_config(parent, "image"):
return result, 200, {"Content-Type": "image/png"}
# return result, 200, {"Content-Type": "application/octet-stream"}
if fields in get_list_config(parent, "pickled"):
return jsonify(result)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
io.py
/io/ module in the /egp/ package.
Created by <NAME>.
Copyright (c) 2012. All rights reserved.
"""
# imports
import struct, os, stat, datetime, numpy as np
# try:
# import pyublas
# import crunch
# except:
# print("pyublas and egp.crunch not imported!")
import pickle
import egp.toolbox, egp.icgen, egp.basic_types, egp.cosmology
import tarfile
import egp.MMF_io as MMF
# constants
__version__ = "0.x, April 2014; version number no longer updated, just check the Mercurial revision number"
# exception classes
# interface functions
# classes
#
# TODO: Remove GadgetData class altogether and replace with factory:
# OrderedParticles_from_gadget_snapshot.
# Same for other particle data classes, though we might want to
# make a special class for haloes/clusters.
#
class GadgetData(egp.basic_types.OrderedParticles):
"""
An instance of existing Gadget data, loaded from a Gadget type binary file.
Instantiation argument is firstfile: the name of the first file of the data
set (e.g. snapshot_000, snapshot_000.0, lcdm_gas, lcdm_gas.0, etc.),
including path.
"""
def __init__(self, firstfile):
super(GadgetData, self).__init__()
self.firstfile = os.path.abspath(firstfile)
self.detectGType()
self.loadHeaders()
self.Ntotal = sum(self.header[0]['Nall'])
self.posSphCalculated = False
self.velSphCalculated = False
self.redshiftCalculated = False
self.posZCalculated = False
self.posSliced = False
self.posZSliced = False
self.densityGridsize = None
self.densityZGridsize = None
self.originCentered = True
self.parent = egp.basic_types.OrderedParticles
# http://stackoverflow.com/questions/7019643/overriding-properties-in-python
@egp.basic_types.OrderedParticles.order.getter
def order(self):
try:
return self._order
except AttributeError:
# Load particle IDs and use them to build an ordering array that
# will be used to order the other data by ID.
idarray = np.empty(self.Ntotal, dtype='uint32')
Ncount = 0
for header in self.header:
Np = sum(header['Npart'])
if self.gtype == 2:
offset = (4*16 + (8 + 256) + (8 + Np*3*4)*2)
else:
offset = ((8 + 256) + (8 + Np*3*4)*2)
memmap = np.memmap(header['filename'], dtype='uint32',
mode='r', offset=offset)
idarray[Ncount:Ncount+Np] = memmap[1:1+Np]
Ncount += Np
del memmap
self.order = np.argsort(idarray).astype('uint32')
del idarray
return self._order
@egp.basic_types.OrderedParticles.pos.getter
def pos(self):
try:
return self._pos
except AttributeError:
# Load the particle positions into a NumPy array called self._pos,
# ordered by ID number.
self.pos = np.empty((3, self.Ntotal), dtype='float32').T
Ncount = 0
for header in self.header:
Np = sum(header['Npart'])
if self.gtype == 2:
offset = (2*16 + (8 + 256))
else:
offset = (8 + 256)
memmap = np.memmap(header['filename'], dtype='float32',
mode='r', offset=offset)
self.pos[Ncount:Ncount+Np] = memmap[1:1+3*Np].reshape((Np, 3))
Ncount += Np
del memmap
self.pos = self.pos[self.order]
return self._pos
@egp.basic_types.OrderedParticles.vel.getter
def vel(self):
try:
return self._vel
except AttributeError:
# Load the particle velocities into a NumPy array called self._vel,
# ordered by ID number.
self.vel = np.empty((3, self.Ntotal), dtype='float32').T
Ncount = 0
for header in self.header:
Np = sum(header['Npart'])
if self.gtype == 2:
offset = 3*16 + (8 + 256) + (8 + 3*4*Np)
else:
offset = (8 + 256) + (8 + 3*4*Np)
memmap = np.memmap(header['filename'], dtype='float32',
mode='r', offset=offset)
self.vel[Ncount:Ncount+Np] = memmap[1:1+3*Np].reshape((Np, 3))
Ncount += Np
del memmap
self.vel = self.vel[self.order]
return self._vel
def detectGType(self):
"""Detects Gadget file type (type 1 or 2; resp. without or with the 16
byte block headers)."""
filename = self.firstfile
f = open(filename, 'rb')
firstbytes = struct.unpack('I', f.read(4))
if firstbytes[0] == 8:
self.gtype = 2
else:
self.gtype = 1
f.close()
def loadHeaders(self):
"""Loads file headers of all files in the dataset into memory."""
self.header = []
filename = self.firstfile
self.header.append(getheader(filename, self.gtype))
if self.header[0]['NumFiles'] > 1:
basename = filename[:-1]
for filenr in range(self.header[0]['NumFiles'])[1:]:
self.header.append(getheader(basename+str(filenr), self.gtype))
sphOrigin = property()
@sphOrigin.getter
def sphOrigin(self):
try:
return self._sphOrigin
except AttributeError:
self.sphOrigin = 1
center = self.header[0]['BoxSize']/2
self.sphOrigin = np.array((center, center, center))
return self._sphOrigin
@sphOrigin.setter
def sphOrigin(self, sphOrigin):
self._sphOrigin = sphOrigin
def calcPosSphGadget(self, centerOrigin=True):
self.parent.calcPosSph(self, self.sphOrigin,
self.header[0]['BoxSize'],
centerOrigin=centerOrigin)
self.originCentered = centerOrigin
def calcRedshiftGadget(self, centerOrigin=True):
H = self.header[0]['HubbleParam']*100
self.parent.calcRedshift(self, self.sphOrigin,
self.header[0]['BoxSize'], H,
centerOrigin=centerOrigin)
def calcRedshiftSpaceGadget(self, centerOrigin=True):
H = self.header[0]['HubbleParam']*100
self.parent.calcRedshiftSpace(self, self.sphOrigin,
self.header[0]['BoxSize'], H,
centerOrigin=centerOrigin)
def calcVelSph(self, origin=None):
"""Calculate the velocities of particles in spherical coordinates. The
origin is by default at the center of the box, but can be specified by
supplying an origin=(x,y,z) argument."""
# Need both spherical positions and velocities
if not (self.posSphCalculated and self.velloaded):
self.calcPosSph(origin=origin)
self.loadVel()
x = self.pos[:,0]
y = self.pos[:,1]
z = self.pos[:,2]
r = self.posSph[:,0]
phi = self.posSph[:,1]
theta = self.posSph[:,2]
# self HIERONDER WEER WEGHALEN; HOEFT NIET WORDEN OPGESLAGEN
self.unitvector_r = np.array([x/r, y/r, z/r]).T
self.unitvector_phi = np.array([-np.sin(phi), np.cos(phi), np.zeros(len(phi))]).T
self.unitvector_theta = np.array([np.cos(theta)*np.cos(phi), np.cos(theta)*np.sin(phi), -np.sin(theta)]).T
def slicePos(self, origin=None, thickness=0.1, plane=None):
"""Make three slices of the particle positions around the origin
(defaults to the center of the box). Thickness is given in fraction of
boxsize. Origin can be changed by giving an origin=(x,y,z) argument.
If spherical coordinates are calculated the origin of those coordinates
will be taken.
TO BE IMPLEMENTED:
The three slices are taken in three perpendicular planes. If the
plane=((phi1,theta1),(phi2,theta2)) argument is given these
vectors will be taken to span one of the planes on, through the origin.
The others will then again be perpendicular to this."""
if not self.posloaded:
self.loadPos()
if not (origin or self.posSphCalculated):
center = self.header[0]['BoxSize']/2
origin = np.array((center,center,center))
elif self.posSphCalculated:
origin = self.sphOrigin
else:
origin = np.asarray(origin)
self.posSlice1, self.posSlice2, self.posSlice3 = takeOrthSlices(self.pos, self.header[0]['BoxSize']*thickness, origin)
self.posSliced = True
def slicePosZ(self, origin=None, thickness=0.1, plane=None):
"""Make three slices of the particle positions around the origin
(defaults to the center of the box). Thickness is given in fraction of
boxsize. Origin can be changed by giving an origin=(x,y,z) argument.
If spherical coordinates are calculated the origin of those coordinates
will be taken.
TO BE IMPLEMENTED:
The three slices are taken in three perpendicular planes. If the
plane=((phi1,theta1),(phi2,theta2)) argument is given these
vectors will be taken to span one of the planes on, through the origin.
The others will then again be perpendicular to this."""
if not self.posZCalculated:
self.calcRedshiftSpace()
if not (origin or self.posSphCalculated):
center = self.header[0]['BoxSize']/2
origin = np.array((center,center,center))
elif self.posSphCalculated:
origin = self.sphOrigin
else:
origin = np.asarray(origin)
self.posZSlice1, self.posZSlice2, self.posZSlice3 = takeOrthSlices(self.posZ, self.header[0]['BoxSize']*thickness, origin)
self.posZSliced = True
def saveNormalPosAsErwin(self, filename):
"""Save the position data as an Erwin-type binary file (Npart|x|y|z|
[m]; normalized coordinates)."""
self.loadPos()
savePosAsErwin(self.pos, self.header[0]['BoxSize'], filename)
def saveNormalPosAsIfrit(self, filename, sample=None):
"""Saves as an IFrIT binary file."""
self.loadPos()
boxsize = self.header[0]['BoxSize']
if sample:
savePosAsIfrit(self.pos[np.random.randint(0, len(self.pos),
sample)], boxsize, filename)
else:
savePosAsIfrit(self.pos, boxsize, filename)
class CubeP3MData(egp.basic_types.OrderedParticles):
"""
Load a CubeP3M checkpoint file and gather related meta-data from the
parameter files present in the run directory. The run directory is
assumed to be one directory up from the checkpoint's location. If not,
you need to specify the run_path in the initialization.
Default instantiation argument is filename, including full path.
"""
def __init__(self, filename, run_path = None):
super(GadgetData, self).__init__()
self.filename = os.path.abspath(filename)
if not run_path:
self.run_path = os.path.dirname(self.filename)[:-6] # cut off "output"
self.load_metadata()
self.Ntotal = self.metadata['N']
self.offset = 11 + self.metadata['pp_run'] # file offset due to header
xvint = np.memmap(self.filename, dtype='int32', mode='r')
N = xvint[0]
if N != self.Ntotal:
self.Ntotal = N
print("N.B.: particles have been deleted from the ICs!\nAdjusted particle number from %i to %i." % (self.metadata['N'], N))
self.xv = np.memmap(self.filename, dtype='float32', mode='r', offset = self.offset*4)
@egp.basic_types.OrderedParticles.order.getter
def order(self):
try:
return self._order
except AttributeError:
# Load particle IDs and use them to build an ordering array that
# will be used to order the other data by ID.
if self.metadata['pid_flag']:
pidarray = self.get_pid_array()
self.order = np.argsort(pidarray).astype('uint32')
else:
self.order = np.arange(self.Ntotal)
return self._order
@egp.basic_types.OrderedParticles.pos.getter
def pos(self):
try:
return self._pos
except AttributeError:
# Load the particle positions into a NumPy array called self._pos,
# ordered by ID number.
# N.B.: +0.5 (and %boxlen later on) because of the way the ICs are set up.
self.pos = ((self.xv.reshape(self.Ntotal, 6)[:,:3]+0.5) * self.metadata['boxlen']/self.metadata['nc'])%self.metadata['boxlen'] # Mpc h^-1
self.pos = self.pos[self.order]
return self._pos
@egp.basic_types.OrderedParticles.vel.getter
def vel(self):
try:
return self._vel
except AttributeError:
# Load the particle velocities into a NumPy array called self._vel,
# ordered by ID number.
self.vel = self.xv.reshape(self.Ntotal, 6)[:,3:] * (150*(1+self.metadata['redshift']) * self.metadata['boxlen'] / self.metadata['nc'] * np.sqrt(self.metadata['omega_m'])) # km/s
self.vel = self.vel[self.order]
return self._vel
def load_metadata(self):
"""Loads the pickled parameters. Assumes that simulation was setup with
this code, which saves parameters as a Python pickle file."""
self.metadata = pickle.load(open(self.run_path+'parameters.pickle', 'rb'))
def get_pid_array(self):
pid_filename = self.filename[:self.filename.find('xv')]+'PID0.dat'
idarray = np.memmap(pid_filename, dtype='int64', offset=self.offset*4)
return idarray
class SubFindHaloes(object):
def | |
# pylint: disable=missing-docstring,invalid-name
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, Group
from django.contrib.contenttypes.models import ContentType
from django.db.models.query import QuerySet
from guardian.compat import get_user_permission_full_codename
from guardian.exceptions import MixedContentTypeError, WrongAppError
from guardian.shortcuts import assign_perm, get_objects_for_group, remove_perm
from rest_framework import status
from rest_framework.test import APIRequestFactory, force_authenticate
from resolwe.flow.models import Collection, Data, Process
from resolwe.flow.views import StorageViewSet
from resolwe.permissions.shortcuts import (
get_object_perms,
get_objects_for_user,
get_user_group_perms,
)
from resolwe.test import TestCase
factory = APIRequestFactory()
class UserGroupTestCase(TestCase):
def setUp(self):
super().setUp()
self.group1 = Group.objects.create(name="Test group 1")
self.group2 = Group.objects.create(name="Test group 2")
self.collection = Collection.objects.create(
contributor=self.contributor, name="Test collection",
)
# This collection is here to make sure that other permissions
# don't affect tested queries.
collection2 = Collection.objects.create(
contributor=self.contributor, name="Test collection 2",
)
assign_perm("view_collection", self.contributor, collection2)
assign_perm("view_collection", self.group1, collection2)
def test_user(self):
assign_perm("view_collection", self.contributor, self.collection)
assign_perm("edit_collection", self.contributor, self.collection)
user_perms, group_perms = get_user_group_perms(
self.contributor, self.collection
)
self.assertEqual(len(group_perms), 0)
self.assertCountEqual(user_perms, ["view_collection", "edit_collection"])
def test_user_in_group(self):
self.group1.user_set.add(self.contributor)
assign_perm("view_collection", self.group1, self.collection)
assign_perm("edit_collection", self.group1, self.collection)
user_perms, group_perms = get_user_group_perms(
self.contributor, self.collection
)
self.assertEqual(len(group_perms), 1)
self.assertCountEqual(group_perms[0][2], ["view_collection", "edit_collection"])
self.assertEqual(len(user_perms), 0)
assign_perm("view_collection", self.contributor, self.collection)
user_perms, group_perms = get_user_group_perms(
self.contributor, self.collection
)
self.assertEqual(len(group_perms), 1)
self.assertCountEqual(group_perms[0][2], ["view_collection", "edit_collection"])
self.assertEqual(len(user_perms), 1)
self.assertCountEqual(user_perms, ["view_collection"])
def test_user_in_multiple_groups(self):
self.group1.user_set.add(self.contributor)
self.group2.user_set.add(self.contributor)
assign_perm("view_collection", self.group1, self.collection)
assign_perm("edit_collection", self.group1, self.collection)
assign_perm("view_collection", self.group2, self.collection)
user_perms, group_perms = get_user_group_perms(
self.contributor, self.collection
)
self.assertEqual(len(group_perms), 2)
self.assertEqual(group_perms[0][0], self.group1.pk)
self.assertCountEqual(group_perms[0][2], ["view_collection", "edit_collection"])
self.assertEqual(group_perms[1][0], self.group2.pk)
self.assertCountEqual(group_perms[1][2], ["view_collection"])
self.assertEqual(len(user_perms), 0)
def test_group(self):
assign_perm("view_collection", self.group1, self.collection)
assign_perm("edit_collection", self.group1, self.collection)
user_perms, group_perms = get_user_group_perms(self.group1, self.collection)
self.assertEqual(len(group_perms), 1)
self.assertCountEqual(group_perms[0][2], ["view_collection", "edit_collection"])
self.assertEqual(len(user_perms), 0)
class ObjectPermsTestCase(TestCase):
def setUp(self):
super().setUp()
self.admin.delete()
self.user1 = get_user_model().objects.create(username="test_user1")
self.user2 = get_user_model().objects.create(username="test_user2")
self.group1 = Group.objects.create(name="Test group 1")
self.group2 = Group.objects.create(name="Test group 2")
self.anonymous = AnonymousUser()
self.collection = Collection.objects.create(
contributor=self.user1, name="Test collection",
)
def _sort_perms(self, perms):
for elm in perms:
elm["permissions"] = sorted(elm["permissions"])
return perms
def test_all_permissions(self):
self.group1.user_set.add(self.user1)
perms = get_object_perms(self.collection)
self.assertEqual(len(perms), 0)
assign_perm("view_collection", self.user1, self.collection)
assign_perm("edit_collection", self.user1, self.collection)
assign_perm("view_collection", self.user2, self.collection)
expected_perms = [
{
"permissions": ["edit", "view"],
"type": "user",
"id": self.user1.pk,
"name": "test_user1",
"username": "test_user1",
},
{
"permissions": ["view"],
"type": "user",
"id": self.user2.pk,
"name": "test_user2",
"username": "test_user2",
},
]
perms = get_object_perms(self.collection)
self.assertCountEqual(self._sort_perms(expected_perms), self._sort_perms(perms))
assign_perm("view_collection", self.group1, self.collection)
assign_perm("edit_collection", self.group1, self.collection)
assign_perm("view_collection", self.group2, self.collection)
expected_perms.extend(
[
{
"permissions": ["edit", "view"],
"type": "group",
"id": self.group1.pk,
"name": "Test group 1",
},
{
"permissions": ["view"],
"type": "group",
"id": self.group2.pk,
"name": "Test group 2",
},
]
)
perms = get_object_perms(self.collection)
self.assertCountEqual(self._sort_perms(expected_perms), self._sort_perms(perms))
assign_perm("view_collection", self.anonymous, self.collection)
expected_perms.append({"permissions": ["view"], "type": "public"},)
perms = get_object_perms(self.collection)
self.assertCountEqual(self._sort_perms(expected_perms), self._sort_perms(perms))
def test_user_permissions(self):
self.group1.user_set.add(self.user1)
assign_perm("view_collection", self.user1, self.collection)
assign_perm("edit_collection", self.user1, self.collection)
assign_perm("view_collection", self.user2, self.collection)
assign_perm("view_collection", self.group1, self.collection)
assign_perm("edit_collection", self.group1, self.collection)
assign_perm("view_collection", self.group2, self.collection)
expected_perms = [
{
"permissions": ["edit", "view"],
"type": "user",
"id": self.user1.pk,
"name": "test_user1",
"username": "test_user1",
},
{
"permissions": ["edit", "view"],
"type": "group",
"id": self.group1.pk,
"name": "Test group 1",
},
]
perms = get_object_perms(self.collection, self.user1)
self.assertCountEqual(self._sort_perms(expected_perms), self._sort_perms(perms))
self.group2.user_set.add(self.user1)
expected_perms.append(
{
"permissions": ["view"],
"type": "group",
"id": self.group2.pk,
"name": "Test group 2",
},
)
perms = get_object_perms(self.collection, self.user1)
self.assertCountEqual(self._sort_perms(expected_perms), self._sort_perms(perms))
assign_perm("view_collection", self.anonymous, self.collection)
expected_perms.append({"permissions": ["view"], "type": "public"},)
perms = get_object_perms(self.collection, self.user1)
self.assertCountEqual(self._sort_perms(expected_perms), self._sort_perms(perms))
class StoragePermsTestCase(TestCase):
def setUp(self):
super().setUp()
proc = Process.objects.create(name="Test process", contributor=self.contributor)
self.data = Data.objects.create(
name="Test data", contributor=self.contributor, process=proc
)
dummy_data = Data.objects.create(
name="Dummy data", contributor=self.contributor, process=proc
)
self.storage1 = self.data.storages.create(
name="Test storage", json={}, contributor=self.contributor,
)
self.storage2 = self.data.storages.create(
name="Test storage 2", json={}, contributor=self.contributor,
)
dummy_data.storages.create(
name="Dummy storage", json={}, contributor=self.contributor,
)
self.user = get_user_model().objects.create(username="test_user")
self.group = Group.objects.create(name="test_group")
self.storage_list_viewset = StorageViewSet.as_view(actions={"get": "list",})
self.storage_detail_viewset = StorageViewSet.as_view(
actions={"get": "retrieve",}
)
def test_detail_permissons(self):
request = factory.get("/", content_type="application/json")
force_authenticate(request, self.user)
resp = self.storage_detail_viewset(request, pk=self.storage1.pk)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
assign_perm("view_data", self.user, self.data)
resp = self.storage_detail_viewset(request, pk=self.storage1.pk)
self.assertEqual(resp.data["name"], "Test storage")
remove_perm("view_data", self.user, self.data)
resp = self.storage_detail_viewset(request, pk=self.storage1.pk)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_not_allowed_methods(self):
self.assertFalse(hasattr(StorageViewSet, "update"))
self.assertFalse(hasattr(StorageViewSet, "partial_update"))
self.assertFalse(hasattr(StorageViewSet, "destroy"))
self.assertFalse(hasattr(StorageViewSet, "create"))
# tests copied from guardina.testapp.tests.test_shortcuts
class GetObjectsForUser(TestCase):
def setUp(self):
super().setUp()
self.group = Group.objects.create(name="group")
self.ctype = ContentType.objects.create(
model="bar", app_label="fake-for-guardian-tests"
)
def test_superuser(self):
ctypes = ContentType.objects.all()
objects = get_objects_for_user(
self.admin, ["contenttypes.change_contenttype"], ctypes
)
self.assertEqual(set(ctypes), set(objects))
def test_with_superuser_true(self):
ctypes = ContentType.objects.all()
objects = get_objects_for_user(
self.admin, ["contenttypes.change_contenttype"], ctypes, with_superuser=True
)
self.assertEqual(set(ctypes), set(objects))
def test_with_superuser_false(self):
ctypes = ContentType.objects.all()
obj1 = ContentType.objects.create(model="foo", app_label="guardian-tests")
assign_perm("change_contenttype", self.admin, obj1)
objects = get_objects_for_user(
self.admin,
["contenttypes.change_contenttype"],
ctypes,
with_superuser=False,
)
self.assertEqual(set([obj1]), set(objects))
def test_anonymous(self):
ctypes = ContentType.objects.all()
objects = get_objects_for_user(
self.contributor, ["contenttypes.change_contenttype"], ctypes
)
obj1 = ContentType.objects.create(model="foo", app_label="guardian-tests")
assign_perm("change_contenttype", self.contributor, obj1)
objects = get_objects_for_user(
self.contributor, ["contenttypes.change_contenttype"], ctypes
)
self.assertEqual(set([obj1]), set(objects))
def test_mixed_perms(self):
codenames = [
get_user_permission_full_codename("change"),
"auth.change_permission",
]
self.assertRaises(
MixedContentTypeError, get_objects_for_user, self.contributor, codenames
)
def test_perms_with_mixed_apps(self):
codenames = [
get_user_permission_full_codename("change"),
"contenttypes.change_contenttype",
]
self.assertRaises(
MixedContentTypeError, get_objects_for_user, self.contributor, codenames
)
def test_mixed_perms_and_klass(self):
self.assertRaises(
MixedContentTypeError,
get_objects_for_user,
self.contributor,
["auth.change_group"],
get_user_model(),
)
def test_no_app_label_nor_klass(self):
self.assertRaises(
WrongAppError, get_objects_for_user, self.contributor, ["change_group"]
)
def test_empty_perms_sequence(self):
objects = get_objects_for_user(self.contributor, [], Group.objects.all())
self.assertEqual(set(objects), set())
def test_perms_single(self):
perm = "auth.change_group"
assign_perm(perm, self.contributor, self.group)
self.assertEqual(
set(get_objects_for_user(self.contributor, perm)),
set(get_objects_for_user(self.contributor, [perm])),
)
def test_klass_as_model(self):
assign_perm("contenttypes.change_contenttype", self.contributor, self.ctype)
objects = get_objects_for_user(
self.contributor, ["contenttypes.change_contenttype"], ContentType
)
self.assertEqual([obj.name for obj in objects], [self.ctype.name])
def test_klass_as_manager(self):
assign_perm("auth.change_group", self.contributor, self.group)
objects = get_objects_for_user(
self.contributor, ["auth.change_group"], Group.objects
)
self.assertEqual([obj.name for obj in objects], [self.group.name])
def test_klass_as_queryset(self):
assign_perm("auth.change_group", self.contributor, self.group)
objects = get_objects_for_user(
self.contributor, ["auth.change_group"], Group.objects.all()
)
self.assertEqual([obj.name for obj in objects], [self.group.name])
def test_ensure_returns_queryset(self):
objects = get_objects_for_user(self.contributor, ["auth.change_group"])
self.assertTrue(isinstance(objects, QuerySet))
def test_simple(self):
group_names = ["group1", "group2", "group3"]
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
assign_perm("change_group", self.contributor, group)
objects = get_objects_for_user(self.contributor, ["auth.change_group"])
self.assertEqual(len(objects), len(groups))
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(set(objects), set(groups))
def test_multi_perms(self):
group_names = ["group1", "group2", "group3"]
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
assign_perm("auth.change_group", self.contributor, group)
assign_perm("auth.delete_group", self.contributor, groups[1])
objects = get_objects_for_user(
self.contributor, ["auth.change_group", "auth.delete_group"]
)
self.assertEqual(len(objects), 1)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects.values_list("name", flat=True)), set([groups[1].name])
)
def test_multi_perms_no_groups(self):
group_names = ["group1", "group2", "group3"]
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
assign_perm("auth.change_group", self.contributor, group)
assign_perm("auth.delete_group", self.contributor, groups[1])
objects = get_objects_for_user(
self.contributor,
["auth.change_group", "auth.delete_group"],
use_groups=False,
)
self.assertEqual(len(objects), 1)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects.values_list("name", flat=True)), set([groups[1].name])
)
def test_any_of_multi_perms(self):
group_names = ["group1", "group2", "group3"]
groups = [Group.objects.create(name=name) for name in group_names]
assign_perm("auth.change_group", self.contributor, groups[0])
assign_perm("auth.delete_group", self.contributor, groups[2])
objects = get_objects_for_user(
self.contributor, ["auth.change_group", "auth.delete_group"], any_perm=True
)
self.assertEqual(len(objects), 2)
self.assertTrue(isinstance(objects, QuerySet))
self.assertEqual(
set(objects.values_list("name", flat=True)),
set([groups[0].name, groups[2].name]),
)
def test_groups_perms(self):
group1 = Group.objects.create(name="group1")
group2 = Group.objects.create(name="group2")
group3 = Group.objects.create(name="group3")
groups = [group1, group2, group3]
for group in groups:
self.contributor.groups.add(group)
# Objects to operate on
ctypes = list(ContentType.objects.all().order_by("id"))
assign_perm("auth.change_group", self.contributor)
assign_perm("change_contenttype", self.contributor, ctypes[0])
assign_perm("change_contenttype", self.contributor, ctypes[1])
assign_perm("delete_contenttype", self.contributor, ctypes[1])
assign_perm("delete_contenttype", self.contributor, ctypes[2])
assign_perm("change_contenttype", groups[0], ctypes[3])
assign_perm("change_contenttype", groups[1], ctypes[3])
assign_perm("change_contenttype", groups[2], ctypes[4])
assign_perm("delete_contenttype", groups[0], ctypes[0])
objects = get_objects_for_user(
self.contributor, ["contenttypes.change_contenttype"]
)
self.assertEqual(
set(objects.values_list("id", flat=True)),
set(ctypes[i].id for i in [0, 1, 3, 4]),
)
objects = get_objects_for_user(
self.contributor,
["contenttypes.change_contenttype", "contenttypes.delete_contenttype"],
)
self.assertEqual(
set(objects.values_list("id", flat=True)), set(ctypes[i].id for i in [0, 1])
)
objects = get_objects_for_user(
self.contributor, ["contenttypes.change_contenttype"]
)
self.assertEqual(
set(objects.values_list("id", flat=True)),
set(ctypes[i].id for i in [0, 1, 3, 4]),
)
def test_has_global_permission_only(self):
group_names = ["group1", "group2", "group3"]
for name in group_names:
Group.objects.create(name=name)
# global permission to change any group
perm = "auth.change_group"
assign_perm(perm, self.contributor)
objects = get_objects_for_user(self.contributor, perm)
remove_perm(perm, self.contributor)
self.assertEqual(set(objects), set(Group.objects.all()))
def test_has_global_permission_and_object_based_permission(self):
group_names = ["group1", "group2", "group3"]
groups = [Group.objects.create(name=name) for name in group_names]
# global permission to change any group
perm_global = "auth.change_group"
perm_obj = "delete_group"
assign_perm(perm_global, self.contributor)
assign_perm(perm_obj, self.contributor, groups[0])
objects = get_objects_for_user(self.contributor, [perm_global, perm_obj])
remove_perm(perm_global, self.contributor)
self.assertEqual(
set(objects.values_list("name", flat=True)), set([groups[0].name])
)
def test_has_global_permission_and_object_based_permission_any_perm(self):
group_names = ["group1", "group2", "group3"]
groups = [Group.objects.create(name=name) for name in group_names]
# global permission to change any group
perm_global = "auth.change_group"
# object based permission to change only a specific group
perm_obj = "auth.delete_group"
assign_perm(perm_global, self.contributor)
assign_perm(perm_obj, self.contributor, groups[0])
objects = get_objects_for_user(
self.contributor,
[perm_global, perm_obj],
any_perm=True,
accept_global_perms=True,
)
remove_perm(perm_global, self.contributor)
self.assertEqual(set(objects), set(Group.objects.all()))
def test_object_based_permission_without_global_permission(self):
group_names = ["group1", "group2", "group3"]
groups = [Group.objects.create(name=name) for name in group_names]
# global permission to delete any group
perm_global = "auth.delete_group"
perm_obj = "auth.delete_group"
assign_perm(perm_global, self.contributor)
assign_perm(perm_obj, self.contributor, groups[0])
objects = get_objects_for_user(
self.contributor, [perm_obj], accept_global_perms=False
)
remove_perm(perm_global, self.contributor)
self.assertEqual(
set(objects.values_list("name", flat=True)), set([groups[0].name])
)
def test_object_based_permission_with_groups_2perms(self):
group_names = ["group1", "group2", "group3"]
groups = [Group.objects.create(name=name) for name in group_names]
for group in groups:
self.contributor.groups.add(group)
# Objects to operate on
ctypes = list(ContentType.objects.all().order_by("id"))
assign_perm("contenttypes.change_contenttype", self.contributor)
assign_perm("change_contenttype", self.contributor, ctypes[0])
assign_perm("change_contenttype", self.contributor, ctypes[1])
assign_perm("delete_contenttype", self.contributor, ctypes[1])
assign_perm("delete_contenttype", self.contributor, ctypes[2])
assign_perm("change_contenttype", groups[0], ctypes[3])
assign_perm("change_contenttype", groups[1], ctypes[3])
assign_perm("change_contenttype", groups[2], ctypes[4])
assign_perm("delete_contenttype", groups[0], | |
specified
with defaults.
.. note:: The actual C structure is gotten by calling an instance. This is auto-generated when
called, based on the parameters in the class.
.. warning:: This class will *not* deal well with parameters of the struct which are pointers.
All parameters should be primitive types, except for strings, which are dealt with
specially.
Parameters
----------
ffi : cffi object
The ffi object from any cffi-wrapped library.
"""
_defaults_ = {}
def __init__(self, *args, **kwargs):
super().__init__()
if args:
if len(args) > 1:
raise TypeError(
"%s takes up to one position argument, %s were given"
% (self.__class__.__name__, len(args))
)
elif args[0] is None:
pass
elif isinstance(args[0], self.__class__):
kwargs.update(args[0].self)
elif isinstance(args[0], dict):
kwargs.update(args[0])
else:
raise TypeError(
f"optional positional argument for {self.__class__.__name__} must be"
f" None, dict, or an instance of itself"
)
for k, v in self._defaults_.items():
# Prefer arguments given to the constructor.
_v = kwargs.pop(k, None)
if _v is not None:
v = _v
try:
setattr(self, k, v)
except AttributeError:
# The attribute has been defined as a property, save it as a hidden variable
setattr(self, "_" + k, v)
if kwargs:
logger.warning(
"The following parameters to {thisclass} are not supported: {lst}".format(
thisclass=self.__class__.__name__, lst=list(kwargs.keys())
)
)
def convert(self, key, val):
"""Make any conversions of values before saving to the instance."""
return val
def update(self, **kwargs):
"""
Update the parameters of an existing class structure.
This should always be used instead of attempting to *assign* values to instance attributes.
It consistently re-generates the underlying C memory space and sets some book-keeping
variables.
Parameters
----------
kwargs:
Any argument that may be passed to the class constructor.
"""
# Start a fresh cstruct.
if kwargs:
self.refresh_cstruct()
for k in self._defaults_:
# Prefer arguments given to the constructor.
if k in kwargs:
v = kwargs.pop(k)
try:
setattr(self, k, v)
except AttributeError:
# The attribute has been defined as a property, save it as a hidden variable
setattr(self, "_" + k, v)
# Also ensure that parameters that are part of the class, but not the defaults, are set
# this will fail if these parameters cannot be set for some reason, hence doing it
# last.
for k in list(kwargs.keys()):
if hasattr(self, k):
setattr(self, k, kwargs.pop(k))
if kwargs:
warnings.warn(
"The following arguments to be updated are not compatible with this class: %s"
% kwargs
)
def clone(self, **kwargs):
"""Make a fresh copy of the instance with arbitrary parameters updated."""
new = self.__class__(self.self)
new.update(**kwargs)
return new
def __call__(self):
"""Return a filled C Structure corresponding to this instance."""
for key, val in self.pystruct.items():
# Find the value of this key in the current class
if isinstance(val, str):
# If it is a string, need to convert it to C string ourselves.
val = self.ffi.new("char[]", getattr(self, key).encode())
try:
setattr(self._cstruct, key, val)
except TypeError:
logger.info(f"For key {key}, value {val}:")
raise
return self._cstruct
@property
def pystruct(self):
"""A pure-python dictionary representation of the corresponding C structure."""
return {fld: self.convert(fld, getattr(self, fld)) for fld in self.fieldnames}
@property
def defining_dict(self):
"""
Pure python dictionary representation of this class, as it would appear in C.
.. note:: This is not the same as :attr:`pystruct`, as it omits all variables that don't
need to be passed to the constructor, but appear in the C struct (some can be
calculated dynamically based on the inputs). It is also not the same as
:attr:`self`, as it includes the 'converted' values for each variable, which are
those actually passed to the C code.
"""
return {k: self.convert(k, getattr(self, k)) for k in self._defaults_}
@property
def self(self):
"""
Dictionary which if passed to its own constructor will yield an identical copy.
.. note:: This differs from :attr:`pystruct` and :attr:`defining_dict` in that it uses the
hidden variable value, if it exists, instead of the exposed one. This prevents
from, for example, passing a value which is 10**10**val (and recurring!).
"""
# Try to first use the hidden variable before using the non-hidden variety.
dct = {}
for k in self._defaults_:
if hasattr(self, "_" + k):
dct[k] = getattr(self, "_" + k)
else:
dct[k] = getattr(self, k)
return dct
def __repr__(self):
"""Full unique representation of the instance."""
return (
self.__class__.__name__
+ "("
+ ", ".join(sorted(k + ":" + str(v) for k, v in self.defining_dict.items()))
+ ")"
)
def __eq__(self, other):
"""Check whether this instance is equal to another object (by checking the __repr__)."""
return self.__repr__() == repr(other)
def __hash__(self):
"""Generate a unique hsh for the instance."""
return hash(self.__repr__())
def __str__(self):
"""Human-readable string representation of the object."""
biggest_k = max(len(k) for k in self.defining_dict)
params = "\n ".join(
sorted(f"{k:<{biggest_k}}: {v}" for k, v in self.defining_dict.items())
)
return f"""{self.__class__.__name__}:
{params}
"""
def snake_to_camel(word: str, publicize: bool = True):
"""Convert snake case to camel case."""
if publicize:
word = word.lstrip("_")
return "".join(x.capitalize() or "_" for x in word.split("_"))
def camel_to_snake(word: str, depublicize: bool = False):
"""Convert came case to snake case."""
word = "".join("_" + i.lower() if i.isupper() else i for i in word)
if not depublicize:
word = word.lstrip("_")
return word
def get_all_subclasses(cls):
"""Get a list of all subclasses of a given class, recursively."""
all_subclasses = []
for subclass in cls.__subclasses__():
all_subclasses.append(subclass)
all_subclasses.extend(get_all_subclasses(subclass))
return all_subclasses
class OutputStruct(StructWrapper, metaclass=ABCMeta):
"""Base class for any class that wraps a C struct meant to be output from a C function."""
_meta = True
_fields_ = []
_global_params = None
_inputs = ("user_params", "cosmo_params", "_random_seed")
_filter_params = ["external_table_path", "wisdoms_path"]
_c_based_pointers = ()
_c_compute_function = None
_TYPEMAP = bidict({"float32": "float *", "float64": "double *", "int32": "int *"})
def __init__(self, *, random_seed=None, dummy=False, initial=False, **kwargs):
"""
Base type for output structures from C functions.
Parameters
----------
random_seed
Seed associated with the output.
dummy
Specify this as a dummy struct, in which no arrays are to be
initialized or computed.
initial
Specify this as an initial struct, where arrays are to be
initialized, but do not need to be computed to pass into another
struct's compute().
"""
super().__init__()
self.version = ".".join(__version__.split(".")[:2])
self.patch_version = ".".join(__version__.split(".")[2:])
self._paths = []
self._random_seed = random_seed
for k in self._inputs:
if k not in self.__dict__:
try:
setattr(self, k, kwargs.pop(k))
except KeyError:
raise KeyError(
f"{self.__class__.__name__} requires the keyword argument {k}"
)
if kwargs:
warnings.warn(
f"{self.__class__.__name__} received the following unexpected "
f"arguments: {list(kwargs.keys())}"
)
self.dummy = dummy
self.initial = initial
self._array_structure = self._get_box_structures()
self._array_state = {k: ArrayState() for k in self._array_structure}
self._array_state.update({k: ArrayState() for k in self._c_based_pointers})
for k in self._array_structure:
if k not in self.pointer_fields:
raise TypeError(f"Key {k} in {self} not a defined pointer field in C.")
@property
def path(self) -> Tuple[None, Path]:
"""The path to an on-disk version of this object."""
if not self._paths:
return None
for pth in self._paths:
if pth.exists():
return pth
logger.info("All paths that defined {self} have been deleted on disk.")
return None
@abstractmethod
def _get_box_structures(self) -> Dict[str, Union[Dict, Tuple[int]]]:
"""Return a dictionary of names mapping to shapes for each array in the struct.
The reason this is a function, not a simple attribute, is that we may need to
decide on what arrays need to be initialized based on the inputs (eg. if USE_2LPT
is True or False).
Each actual OutputStruct subclass needs to implement this. Note that the arrays
are not actually initialized here -- that's done automatically by :func:`_init_arrays`
using this information. This function means that the names of the actually required
arrays can be accessed without doing any actual initialization.
Note also that this only contains arrays allocated *by Python* not C. Arrays
allocated by C are specified in :func:`_c_shape`.
"""
pass
def _c_shape(self, cstruct) -> Dict[str, Tuple[int]]:
"""Return a dictionary of field: shape for arrays allocated within C."""
return {}
@classmethod
def _implementations(cls):
all_classes = get_all_subclasses(cls)
return [c for c in all_classes if not c._meta]
def _init_arrays(self):
for k, state in self._array_state.items():
if k == "lowres_density":
logger.debug("THINKING ABOUT INITING LOWRES_DENSITY")
logger.debug(state.initialized, state.computed_in_mem, state.on_disk)
# Don't initialize C-based pointers or | |
if truth is not None and xi == 0: cplt.legend()
if xi == 0: cplt.set_ylabel(col_to_name[yc], fontsize=font)
if yi == 0: cplt.set_title(col_to_name[xc], fontsize=font)
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
def plot_all_features_hist_by_component(dfs, save_path=None, font=DEFAULT_FONT):
nrows = len(dfs)
col_to_name = {"rvir": "virial radius", "Mvir": "log_10(virial mass)", "redshift": "redshift", "tSZ": "log_10(tSZ)"}
nc = len(col_to_name)
#f = plt.figure(figsize=(4 * ro, 4 * nc))
fig, axes = plt.subplots(nrows=nrows, ncols=nc, figsize=(3 * nc, 3 * nrows))
for row_i, k in enumerate(dfs.keys()):
df = dfs[k]
df = df.reindex().set_index('cutout_id')
df['Mvir'] = df['Mvir'].map(np.log) / np.log(10)
df['tSZ'] = df['tSZ'].map(np.log) / np.log(10)
assert all([df[list(col_to_name.keys())[i]].count() == df[list(col_to_name.keys())[i-1]].count() for i in range(1, len(col_to_name))])
print("Size is %d"%df[list(col_to_name.keys())[0]].count())
#outliers_idx = df['tSZ'].sort_values(inplace=False).dropna().tail(2).index
#print("There are two outliers for tSZ: {}. For readability, removing these two clusters.".format(df['tSZ'].reindex(outliers_idx)))
#df = df.reindex(df.index.difference(outliers_idx))
assert all([c in df.columns for c in col_to_name.keys()])
for xi, xc in enumerate(col_to_name.keys()):
#each column share the same x
axes[row_i, xi].hist(df[xc])
#axes[row_i, xi].xlabel("hist of %s" % (col_to_name[xc]), fontsize=int(font / 1))
if xi == 0: axes[row_i, xi].set_ylabel(k, fontsize=font)
if row_i == 0: axes[row_i, xi].set_title(col_to_name[xc], fontsize=font)
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
def _make_bins(x, log=False, nbins=50):
if isinstance(x, list):
return _make_bins(np.concatenate(x), log=log, nbins=nbins)
#ipdb.set_trace()
x = x[~np.isnan(x)]
v = np.log10(x) if log else x
bins = np.linspace(np.min(v), np.max(v), nbins)
bins = np.power(10, bins) if log else bins
return bins
def _hist2d(ax, x, y, nbins=50, logx=False,logy=False):
#nan_idx = np.isnan(x)
#assert np.equal(nan_idx, np.isnan(y))
#x = x[~nan_idx]
#y = y[~nan_idx]
xbins = _make_bins(x, logx, nbins)
ybins = _make_bins(y, logy, nbins)
counts, _, _ = np.histogram2d(x, y, bins=(xbins, ybins))
counts = np.log(counts)
ax.pcolormesh(xbins, ybins, counts.T, cmap='Greys')
if logx: ax.set_xscale("log")
if logy: ax.set_yscale("log")
return xbins, ybins
def _need_log(f):
return f in {"Mvir", "tSZ"}
def _mid(bins):
if bins[1] - bins[0] == bins[2] - bins[1]:
return 0.5 * bins[1:] + 0.5 * bins[:-1]
else:
bins = np.log10(bins)
return np.power(10, 0.5 * bins[1:] + 0.5 * bins[:-1])
def plot_all_features_corr_by_component_fullset_hist2d_proportion(df, idxs, save_path=None, font=DEFAULT_FONT, min_cutouts_per_pixel=1, nbins=50, pairs=None):
assert isinstance(idxs, dict)
plt.rcParams.update({'font.size': font})
df = df.reindex().dropna(subset=['redshift'])
ncol = len(idxs)
col_to_name = {"rvir": "Virial Radius (Mpc)",
"angle":"Angular Size (arcmin)",
"Mvir": "Virial Mass ($M_\\odot$)",
"redshift": "Redshift",
"tSZ": "tSZ (arcmin^2)"}
assert all([df[list(col_to_name.keys())[i]].count() == df[list(col_to_name.keys())[i - 1]].count() for i in
range(1, len(col_to_name))])
assert all([c in df.columns for c in col_to_name.keys()])
#assert all([c in df.columns for c in show])
if pairs is None:
pairs = [(list(col_to_name.keys())[i], list(col_to_name.keys())[j]) for i in range(len(col_to_name)) for j in range(i+1, len(col_to_name))]
nrow = len(pairs)
#f = plt.figure(figsize=(4 * ro, 4 * nc))
fig, axes = plt.subplots(nrows=nrow, ncols=ncol, figsize=(6 * ncol, 5 * nrow))
"""
grid = ImageGrid(fig, 111,
nrows_ncols=(3, nc),
axes_pad=0.1,
share_all=True,
cbar_location="right",
cbar_size="4%",
cbar_pad=0.1,
cbar_mode='edge')
"""
#for col_i, k in enumerate(thresholds.keys()):
for col_i, subset in enumerate(sorted(idxs)):
tdf = df[idxs[subset]]
for xi, pair in enumerate(pairs):
cplt = axes[xi, col_i] if ncol > 1 else axes[xi]
xf, yf = pair
logx = xf in {"Mvir", "tSZ"}
logy = yf in {"Mvir", "tSZ"}
xbins = _make_bins(df[xf], logx, nbins=nbins)
ybins = _make_bins(df[yf], logy, nbins=nbins)
ccounts, _, _ = np.histogram2d(tdf[xf], tdf[yf], bins=(xbins, ybins))
totalcounts, _, _ = np.histogram2d(df[xf], df[yf], bins=(xbins, ybins))
totalcounts[totalcounts < min_cutouts_per_pixel] = 0.
sc = cplt.pcolormesh(_mid(xbins), _mid(ybins), ((ccounts * 100.0) / totalcounts).T, cmap='Greens', vmin=0., vmax=100.)
cntr = cplt.contour(_mid(xbins), _mid(ybins), totalcounts.T, extend=[xbins.min(), xbins.max(), ybins.min(), ybins.max()], levels=[min_cutouts_per_pixel-1], colors='black')
if logx: cplt.set_xscale("log")
if logy: cplt.set_yscale("log")
if xi == 0: cplt.set_title(subset)#, fontsize=font)
if _need_log(xf): cplt.set_xscale("log")
if _need_log(yf): cplt.set_yscale("log")
if yf == 'tSZ': cplt.set_ylim((1e-10, 1e-1))
if yf == 'Mvir' or xf == 'Mvir':
getattr(cplt, 'axhline' if yf == 'Mvir' else 'axvline')(2e14, color='black', linestyle='--')
cplt.set_xlabel(col_to_name[xf])#, fontsize=font)
cplt.set_ylabel(col_to_name[yf])#, fontsize=font)
fig.tight_layout()
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.95, 0.1, 0.02, 0.8])
cbar = fig.colorbar(sc, cax=cbar_ax)
cbar.ax.set_title('% Positive', fontsize=font - 4)
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
plt.show(block=True)
def plot_completeness(df, methods=['CNN', 'MF', 'EnsemblePROD'],
save_path=None, font=DEFAULT_FONT,
col='Mvir', q0=0.2, q1=1.):
import matplotlib.ticker as ticker
colors = CB_color_cycle.copy()
col_name = {"Mvir": "Mass", "redshift": "Redshift"}[col]
plt.rcParams.update({'font.size': font})
all_vals = df[col].dropna()
mmin, mmax = all_vals.quantile(q0), all_vals.quantile(q1)
df = df[(df[col] > mmin) & (df[col] < mmax)]
if col == 'Mvir':
all_thres = np.logspace(np.log10(mmin), np.log10(mmax), 20)
else:
all_thres = np.linspace(mmin, mmax, 20)
print(all_thres)
get_cnt = lambda vals, thres=all_thres: pd.Series({m: vals[(vals >= m)&(vals <thres[i+1])].count() for i,m in enumerate(thres[:-1])})
all_vals = df[col].dropna()
df2 = df[(df['redshift'] > 0.25) if col == 'Mvir' else (df['Mvir'] > 2e14)]
from matplotlib import gridspec
fig = plt.figure(figsize=(12, 6 * (1.3 if col == 'Mvir' else 1.)))
#fig, ax = plt.subplots(figsize=(20,10))
if col == 'Mvir':
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
ax0 = plt.subplot(gs[0])
#ax.xaxis.set_major_locator(ticker.LogLocator(base=10, numticks=5))
suffix = " (%s)"%("redshift>0.25" if col == 'Mvir' else "mass>2e14")
lns, legends = [], []
curves = {method: get_cnt(df[df['%s_pred'%method]][col].dropna()) / get_cnt(df[col].dropna()) for method in ['CNN', 'MF', 'EnsemblePROD']}
curves.update({'%s%s'%(method, suffix): get_cnt(df2[df2['%s_pred'%method]][col].dropna()) / get_cnt(df2[col].dropna()) for method in ['CNN', 'MF', 'EnsemblePROD']})
for method in methods:
color = colors.pop()
if col == 'Mvir':
lns.extend(plt.plot(_mid(all_thres), curves[method], label=method, color=color))
legends.append(method)
lns.extend(plt.plot(_mid(all_thres), curves['%s%s'%(method, suffix)], label='%s%s'%(method, suffix), linestyle='dashed', color=color))
legends.append('%s%s'%(method, suffix))
plt.vlines(2e14 if col == 'Mvir' else 0.25, 0, 1, label='Threshold', color='red')
plt.ylabel("Completeness (Recall)")
plt.hlines(1,0,2e15 if col == 'Mvir' else 2,alpha=0.2)
#lns.extend(plt.vlines(2e14, 0, 1, label='Threshold'))
plt.xlabel(col_name)
ax2 = plt.twinx()
plt.hist(all_vals, bins=all_thres, log=True, alpha=0.4)
plt.ylabel("Counts of Objects")
if col == 'Mvir':
y_labels = []
else:
y_labels = []
#ax2.set_yticklabels(y_labels)
plt.legend(lns, legends, loc='right', prop={'size': 12})
#plt.title('Completeness (Recall) vs %s' % col_name)
print('Completeness (Re call) vs %s' % col_name)
if col == 'Mvir':
plt.xscale('log')
#cplt = fig.add_axes([0.12, -0.2, 0.78, 0.2])
cplt = plt.subplot(gs[1], sharex=ax0)
colors = CB_color_cycle.copy()
assert methods == ['CNN', 'MF', 'EnsemblePROD'], "This is only for the default methods"
lns2, legends2 = [], []
for method in methods[:-1]:
color = colors.pop()
lns2.extend(cplt.plot(_mid(all_thres)[-12:], (curves[method] / curves['EnsemblePROD']).iloc[-12:], label='%s/EnsemblePROD'%method, color=color))
lns2.extend(cplt.plot(_mid(all_thres)[-12:], (curves['%s%s'%(method, suffix)] / curves['EnsemblePROD']).iloc[-12:], label='%s/EnsemblePROD%s'%(method, suffix), color=color, linestyle='dashed'))
legends2.extend(["%s/EnsemblePROD"%method, '%s/EnsemblePROD%s'%(method, suffix)])
plt.setp(ax0.get_xticklabels(), visible=True)
plt.legend(lns2, legends2, loc='upper left', fontsize=10)
#plt.setp(cplt.get_xticklabels(), visible=False)
plt.subplots_adjust(hspace=.0)
cplt.set_xscale('log')
#cplt.xaxis.tick_top()
cplt.xaxis.set_label_position('bottom')
cplt.set_xlabel(col_name + ' ($M_\\odot$)')
#cplt.set_xlim(ax0.get_xlim())
#plt.ylabel("As a % of EnsemblePROD")
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
plt.show(block=True)
return curves
def plot_all_hists_by_features(df, thresholds, save_path=None, font=DEFAULT_FONT, fullset='truth', nbins=10):
print("the height of the black bar is the all positives (TP+FN), so the unmasked portion is TP")
df = df.reindex()
assert fullset in{"pred", "truth"}
nrows = len(thresholds)
col_to_name = {"rvir": "virial radius", "Mvir": "log_10(virial mass)", "redshift": "redshift", "tSZ": "log_10(tSZ)"}
assert all([df[list(col_to_name.keys())[i]].count() == df[list(col_to_name.keys())[i - 1]].count() for i in
range(1, len(col_to_name))])
assert all([c in df.columns for c in col_to_name.keys()])
ncols = len(col_to_name)
#f = plt.figure(figsize=(4 * ro, 4 * nc))
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(5 * ncols, 5 * nrows))
for row_i, k in enumerate(thresholds.keys()):
#print("Size of dataframe is %d"%df[list(col_to_name.keys())[0]].count())
pred_y = df['pred_%s'%k] > thresholds[k]
TP_df = df[(pred_y) & df['y']]
for xi, feature in enumerate(col_to_name.keys()):
cplt = axes[row_i, xi]
colors = CB_color_cycle.copy()
#each column share the same x
bins = _make_bins(df[feature], _need_log(feature), nbins=nbins)
if fullset == 'pred':
cplt.hist(df[pred_y][feature], bins=bins, color=colors.pop(), label='FP')
else:
cplt.hist(df[df['y']][feature], bins=bins, color=colors.pop(), label='FN')
cplt.hist(TP_df[feature], bins=bins, color=colors.pop(), label='TP')
#_, bins, _ = cplt.hist(df[feature], color= colors.pop(), label='TP')
#cplt.hist(FN_df[feature], bins=bins, color=colors.pop(), label='FN')
if xi == 0 and row_i == 0: cplt.legend()
#axes[ xi, col_i].scatter(TP_df[xf], TP_df[yf], color=colors.pop(), label='TP', alpha=0.5)
#axes[xi, col_i].scatter(FN_df[xf], FN_df[yf], color=colors.pop(), label='FN', alpha=0.5)
#if xi == 0 and col_i == 0: axes[xi, col_i].legend()
if row_i == 0: cplt.set_title(col_to_name[feature], fontsize=font)
if xi == 0: cplt.set_ylabel(k, fontsize=font)
if _need_log(feature): cplt.set_xscale("log")
if save_path is not None: plt.savefig(save_path, dpi=500, bbox_inches="tight")
def plot_all_hists_by_features_stack_components(df, thresholds, save_path=None, font=DEFAULT_FONT, nbins=6):
print("Where the bars sit is the middle point of the interval")
assert df['y'].sum() == len(df)
print("These are all positive samples")
df = df.reindex()
#df['Mvir'] = df['Mvir'].map(np.log) / np.log(10)
#df['tSZ'] = df['tSZ'].map(np.log) / np.log(10)
#bins = {"Mvir": np.linspace}
#nrows = len(thresholds)
nrows = 1
col_to_name = {"rvir": "virial radius", "Mvir": "virial mass", "redshift": "redshift", "tSZ": "tSZ"}
assert all([df[list(col_to_name.keys())[i]].count() == df[list(col_to_name.keys())[i - 1]].count() for i in
range(1, len(col_to_name))])
assert all([c in df.columns for c in col_to_name.keys()])
ncols = len(col_to_name)
#f = plt.figure(figsize=(4 * ro, 4 * nc))
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(5 * ncols, 5 * nrows))
for xi, feature in enumerate(col_to_name.keys()):
cplt = axes[xi]
all_xs = [df[df['pred_%s'%k] > thresholds[k]][feature] for k in thresholds.keys()]
bins = _make_bins(all_xs, _need_log(feature), nbins=nbins)
cplt.hist(all_xs, bins=bins, label=list(thresholds.keys()), color=CB_color_cycle.copy())
#print("Bins:{}".format(bins))
cplt.legend()
cplt.set_title(col_to_name[feature], fontsize=font)
if _need_log(feature): cplt.set_xscale("log")
return
def _translate_one(x):
if x == | |
await r.set('c', 3)
assert await r.mget('a', 'other', 'b', 'c') == [b('1'), None, b('2'), b('3')]
@pytest.mark.asyncio
async def test_mset(self, r):
await r.flushdb()
d = {'a': b('1'), 'b': b('2'), 'c': b('3')}
assert await r.mset(d)
for k, v in iteritems(d):
assert await r.mget(k) == [v]
@pytest.mark.asyncio
async def test_mset_kwargs(self, r):
await r.flushdb()
d = {'a': b('1'), 'b': b('2'), 'c': b('3')}
assert await r.mset(**d)
for k, v in iteritems(d):
assert await r.get(k) == v
@pytest.mark.asyncio
async def test_msetnx(self, r):
await r.flushdb()
d = {'a': b('1'), 'b': b('2'), 'c': b('3')}
assert await r.msetnx(d)
d2 = {'a': b('x'), 'd': b('4')}
assert not await r.msetnx(d2)
for k, v in iteritems(d):
assert await r.get(k) == v
assert await r.get('d') is None
@pytest.mark.asyncio
async def test_msetnx_kwargs(self, r):
await r.flushdb()
d = {'a': b('1'), 'b': b('2'), 'c': b('3')}
assert await r.msetnx(**d)
d2 = {'a': b('x'), 'd': b('4')}
assert not await r.msetnx(**d2)
for k, v in iteritems(d):
assert await r.get(k) == v
assert await r.get('d') is None
@pytest.mark.asyncio
async def test_pexpire(self, r):
await r.flushdb()
assert not await r.pexpire('a', 60000)
await r.set('a', 'foo')
assert await r.pexpire('a', 60000)
assert 0 < await r.pttl('a') <= 60000
assert await r.persist('a')
# redis-py tests seemed to be for older version of redis?
# redis-2.8+ returns -1 if key exists but is non-expiring: http://redis.io/commands/pttl
assert await r.pttl('a') == -1
@pytest.mark.asyncio
async def test_pexpireat_datetime(self, r):
await r.flushdb()
expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1)
await r.set('a', 'foo')
assert await r.pexpireat('a', expire_at)
assert 0 < await r.pttl('a') <= 61000
@pytest.mark.asyncio
async def test_pexpireat_no_key(self, r):
await r.flushdb()
expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1)
assert not await r.pexpireat('a', expire_at)
@pytest.mark.asyncio
async def test_pexpireat_unixtime(self, r):
await r.flushdb()
expire_at = await redis_server_time(r) + datetime.timedelta(minutes=1)
await r.set('a', 'foo')
expire_at_seconds = int(time.mktime(expire_at.timetuple())) * 1000
assert await r.pexpireat('a', expire_at_seconds)
assert 0 < await r.pttl('a') <= 61000
@pytest.mark.asyncio
async def test_psetex(self, r):
await r.flushdb()
assert await r.psetex('a', 1000, 'value')
assert await r.get('a') == b('value')
assert 0 < await r.pttl('a') <= 1000
@pytest.mark.asyncio
async def test_psetex_timedelta(self, r):
await r.flushdb()
expire_at = datetime.timedelta(milliseconds=1000)
assert await r.psetex('a', expire_at, 'value')
assert await r.get('a') == b('value')
assert 0 < await r.pttl('a') <= 1000
@pytest.mark.asyncio
async def test_randomkey(self, r):
await r.flushdb()
assert await r.randomkey() is None
for key in ('a', 'b', 'c'):
await r.set(key, 1)
assert await r.randomkey() in (b('a'), b('b'), b('c'))
@pytest.mark.asyncio
async def test_rename(self, r):
await r.flushdb()
await r.set('a', '1')
assert await r.rename('a', 'b')
assert await r.get('a') is None
assert await r.get('b') == b('1')
with pytest.raises(ResponseError) as ex:
await r.rename("foo", "foo")
assert str(ex.value).startswith("source and destination objects are the same")
assert await r.get("foo") is None
with pytest.raises(ResponseError) as ex:
await r.rename("foo", "bar")
assert str(ex.value).startswith("no such key")
@pytest.mark.asyncio
async def test_renamenx(self, r):
await r.flushdb()
await r.set('a', '1')
await r.set('b', '2')
assert not await r.renamenx('a', 'b')
assert await r.get('a') == b('1')
assert await r.get('b') == b('2')
assert await r.renamenx('a', 'c')
assert await r.get('c') == b('1')
@pytest.mark.asyncio
async def test_set_nx(self, r):
await r.flushdb()
assert await r.set('a', '1', nx=True)
assert not await r.set('a', '2', nx=True)
assert await r.get('a') == b('1')
@pytest.mark.asyncio
async def test_set_xx(self, r):
await r.flushdb()
assert not await r.set('a', '1', xx=True)
assert await r.get('a') is None
await r.set('a', 'bar')
assert await r.set('a', '2', xx=True)
assert await r.get('a') == b('2')
@pytest.mark.asyncio
async def test_set_px(self, r):
await r.flushdb()
assert await r.set('a', '1', px=10000)
assert await r.get('a') == b('1')
assert 0 < await r.pttl('a') <= 10000
assert 0 < await r.ttl('a') <= 10
@pytest.mark.asyncio
async def test_set_px_timedelta(self, r):
await r.flushdb()
expire_at = datetime.timedelta(milliseconds=1000)
assert await r.set('a', '1', px=expire_at)
assert 0 < await r.pttl('a') <= 1000
assert 0 < await r.ttl('a') <= 1
@pytest.mark.asyncio
async def test_set_ex(self, r):
await r.flushdb()
assert await r.set('a', '1', ex=10)
assert 0 < await r.ttl('a') <= 10
@pytest.mark.asyncio
async def test_set_ex_timedelta(self, r):
await r.flushdb()
expire_at = datetime.timedelta(seconds=60)
assert await r.set('a', '1', ex=expire_at)
assert 0 < await r.ttl('a') <= 60
@pytest.mark.asyncio
async def test_set_multipleoptions(self, r):
await r.flushdb()
await r.set('a', 'val')
assert await r.set('a', '1', xx=True, px=10000)
assert 0 < await r.ttl('a') <= 10
@pytest.mark.asyncio
async def test_setex(self, r):
await r.flushdb()
assert await r.setex('a', 60, '1')
assert await r.get('a') == b('1')
assert 0 < await r.ttl('a') <= 60
@pytest.mark.asyncio
async def test_setnx(self, r):
await r.flushdb()
assert await r.setnx('a', '1')
assert await r.get('a') == b('1')
assert not await r.setnx('a', '2')
assert await r.get('a') == b('1')
@pytest.mark.asyncio
async def test_setrange(self, r):
await r.flushdb()
assert await r.setrange('a', 5, 'foo') == 8
assert await r.get('a') == b('\0\0\0\0\0foo')
await r.set('a', 'abcasync defghijh')
assert await r.setrange('a', 6, '12345') == 17
assert await r.get('a') == b('abcasy12345fghijh')
@pytest.mark.asyncio
async def test_strlen(self, r):
await r.flushdb()
await r.set('a', 'foo')
assert await r.strlen('a') == 3
@pytest.mark.asyncio
async def test_substr(self, r):
await r.flushdb()
await r.set('a', '0123456789')
assert await r.substr('a', 0) == b('0123456789')
assert await r.substr('a', 2) == b('23456789')
assert await r.substr('a', 3, 5) == b('345')
assert await r.substr('a', 3, -2) == b('345678')
@pytest.mark.asyncio
async def test_type(self, r):
await r.flushdb()
assert await r.type('a') == b('none')
await r.set('a', '1')
assert await r.type('a') == b('string')
await r.delete('a')
await r.lpush('a', '1')
assert await r.type('a') == b('list')
await r.delete('a')
await r.sadd('a', '1')
assert await r.type('a') == b('set')
await r.delete('a')
await r.zadd('a', **{'1': 1})
assert await r.type('a') == b('zset')
# LIST COMMANDS
@pytest.mark.asyncio
async def test_blpop(self, r):
await r.flushdb()
await r.rpush('a{foo}', '1', '2')
await r.rpush('b{foo}', '3', '4')
assert await r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b('b{foo}'), b('3'))
assert await r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b('b{foo}'), b('4'))
assert await r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b('a{foo}'), b('1'))
assert await r.blpop(['b{foo}', 'a{foo}'], timeout=1) == (b('a{foo}'), b('2'))
assert await r.blpop(['b{foo}', 'a{foo}'], timeout=1) is None
await r.rpush('c{foo}', '1')
assert await r.blpop('c{foo}', timeout=1) == (b('c{foo}'), b('1'))
@pytest.mark.asyncio
async def test_brpop(self, r):
await r.flushdb()
await r.rpush('a{foo}', '1', '2')
await r.rpush('b{foo}', '3', '4')
assert await r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b('b{foo}'), b('4'))
assert await r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b('b{foo}'), b('3'))
assert await r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b('a{foo}'), b('2'))
assert await r.brpop(['b{foo}', 'a{foo}'], timeout=1) == (b('a{foo}'), b('1'))
assert await r.brpop(['b{foo}', 'a{foo}'], timeout=1) is None
await r.rpush('c{foo}', '1')
assert await r.brpop('c{foo}', timeout=1) == (b('c{foo}'), b('1'))
@pytest.mark.asyncio
async def test_brpoplpush(self, r):
await r.flushdb()
await r.rpush('a{foo}', '1', '2')
await r.rpush('b{foo}', '3', '4')
assert await r.brpoplpush('a{foo}', 'b{foo}') == b('2')
assert await r.brpoplpush('a{foo}', 'b{foo}') == b('1')
assert await r.brpoplpush('a{foo}', 'b{foo}', timeout=1) is None
assert await r.lrange('a{foo}', 0, -1) == []
assert await r.lrange('b{foo}', 0, -1) == [b('1'), b('2'), b('3'), b('4')]
@pytest.mark.asyncio
async def test_brpoplpush_empty_string(self, r):
await r.flushdb()
await r.rpush('a', '')
assert await r.brpoplpush('a', 'b') == b('')
@pytest.mark.asyncio
async def test_lindex(self, r):
await r.flushdb()
await r.rpush('a', '1', '2', '3')
assert await r.lindex('a', '0') == b('1')
assert await r.lindex('a', '1') == b('2')
assert await r.lindex('a', '2') == b('3')
@pytest.mark.asyncio
async def test_linsert(self, r):
await r.flushdb()
await r.rpush('a', '1', '2', '3')
assert await r.linsert('a', 'after', '2', '2.5') == 4
assert await r.lrange('a', 0, -1) == [b('1'), b('2'), b('2.5'), b('3')]
assert await r.linsert('a', 'before', '2', '1.5') == 5
assert await r.lrange('a', 0, -1) == \
[b('1'), b('1.5'), b('2'), b('2.5'), b('3')]
@pytest.mark.asyncio
async def test_llen(self, r):
await r.flushdb()
await r.rpush('a', '1', '2', '3')
assert await r.llen('a') == 3
@pytest.mark.asyncio
async def test_lpop(self, r):
await r.flushdb()
await r.rpush('a', '1', '2', '3')
assert await r.lpop('a') == b('1')
assert await r.lpop('a') == b('2')
assert await r.lpop('a') == b('3')
assert await r.lpop('a') is None
@pytest.mark.asyncio
async def test_lpush(self, r):
await r.flushdb()
assert await r.lpush('a', '1') == 1
assert await r.lpush('a', '2') == 2
assert await r.lpush('a', '3', '4') == 4
assert await r.lrange('a', 0, -1) == [b('4'), b('3'), b('2'), b('1')]
@pytest.mark.asyncio
async def test_lpushx(self, r):
await r.flushdb()
assert await r.lpushx('a', '1') == 0
assert await r.lrange('a', 0, -1) == []
await r.rpush('a', '1', '2', '3')
assert await r.lpushx('a', '4') == 4
assert await r.lrange('a', 0, -1) == [b('4'), b('1'), b('2'), b('3')]
@pytest.mark.asyncio
async def test_lrange(self, r):
await r.flushdb()
await r.rpush('a', '1', '2', '3', '4', '5')
assert await r.lrange('a', 0, 2) == [b('1'), b('2'), b('3')]
assert await r.lrange('a', 2, 10) == [b('3'), b('4'), b('5')]
assert await r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4'), b('5')]
@pytest.mark.asyncio
async def test_lrem(self, r):
await r.flushdb()
await | |
normalization_vector = None
selected_struc = None
normalization_status = -1
no_peak = 0
no_sec_peak_false = 0
no_sec_peak_true = 1
ctx = dash.callback_context
element_id = ctx.triggered[0]['prop_id'].split('.')[0]
# if custom rates given, check input
if element_id == "opt_btn_apply" and norm_option == 'custom_vals':
normalization_status = 1
custom_rates = [ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs]
# if input contains non-digits, prevent update
labels = ["EE", "SS", "II", "MM", "BB", "SI", "IS", "SM", "MS", "ES", "SE", "HH", "HS", "SH", "SB", "BS"]
if None in custom_rates:
return dash.no_update
check_sum_passed = check_sum(ee, ss, ii, mm, bb, si, i_s, sm, ms, es, se, hh, hs, sh, sb, bs)
# if sum of custom rates is one, do normalization
if check_sum_passed:
normalization_vector = dict(zip(labels, custom_rates))
# otherwise prevent update
else:
return dash.no_update
elif element_id == "opt_btn_apply" and norm_option == 'at_db':
normalization_status = 0
elif not element_id == "opt_btn_apply" and data is not None:
sec_peak = data['last_sec_peak']
normalization_status = data['last_norm_stat']
# translate dropdown value into real value
top_opt_val = {'0': 10, '1': 20, '2': 50, '3': 100}
top = top_opt_val[top]
if peak == no_peak:
peak = None
if sec_peak == ['peaking']:
no_sec_peak = no_sec_peak_false # =False
else:
no_sec_peak = no_sec_peak_true # =True
# initialize (structural) data for calculations
if data is None:
selected = [file_list[0], file_list[1]]
if struct_data is not None:
if len(struct_data) > 1:
selected_struc = [struct_data[0], struct_data[1]]
else:
selected_struc = [struct_data[0]]
else:
selected = [file_list[int(f1)], file_list[int(f2)]]
if struct_data is not None:
if len(struct_data) > 1:
selected_struc = [struct_data[int(f3)], struct_data[int(f4)]]
else:
selected_struc = [struct_data[int(f3)]]
new_process = initializeData.initData(file_list, selected, k, peak, top, pca_feature, selected_struc, no_sec_peak)
# calculate top-table
top_k = Processing.getTopKmer(new_process).copy()
kmer = top_k.index
top_k["K-Mer"] = kmer
top_k[""] = ["" for i in range(0, len(top_k))]
top_k = top_k[["", "K-Mer", "Frequency", "File"]]
top_k = top_k.sort_values(by="Frequency", ascending=False)
top_k_table = [
dash_table.DataTable(columns=[{"name": i, "id": i} for i in top_k.columns], data=top_k.to_dict('records'),
style_table={'overflow-x': 'hidden'},
style_cell={'textAlign': 'center'},
export_format="csv",
sort_action='native')]
# calculate MSA
algn1, algn2, f1_name, f2_name = initializeData.getAlignmentData(new_process)
# if columns differ in their length, need to do some adaptions
if (len(algn1) > 1 and len(algn2) > 1) or (len(algn1) <= 1 and len(algn2) <= 1):
if len(algn1) <= 1 and len(algn2) <= 1:
algn1_df = pd.DataFrame(columns=[f1_name], data=['No data to align'])
algn2_df = pd.DataFrame(columns=[f2_name], data=['No data to align'])
else:
algn1_df = pd.DataFrame(columns=[f1_name], data=algn1)
algn2_df = pd.DataFrame(columns=[f2_name], data=algn2)
algn1_df = pd.concat([algn1_df, algn2_df], ignore_index=False, axis=1)
msas = [
dash_table.DataTable(columns=[{"name": i, "id": i} for i in algn1_df.columns],
data=algn1_df.to_dict('records'),
style_table={'overflow-x': 'hidden'},
style_cell={'textAlign': 'center'},
export_format="csv")]
else:
if len(algn1) <= 1:
algn1 = ['No data to align']
algn1_df = pd.DataFrame(columns=[f1_name], data=algn1)
algn2_df = pd.DataFrame(columns=[f2_name], data=algn2)
algn1_df = pd.concat([algn1_df, algn2_df], ignore_index=False, axis=1)
else:
algn2 = ['No data to align']
algn1_df = pd.DataFrame(columns=[f1_name], data=algn1)
algn2_df = pd.DataFrame(columns=[f2_name], data=algn2)
algn1_df = pd.concat([algn1_df, algn2_df], ignore_index=False, axis=1)
msas = [dash_table.DataTable(columns=[{"name": i, "id": i} for i in algn1_df.columns],
data=algn1_df.to_dict('records'),
style_table={'overflow-x': 'hidden'},
style_cell={'textAlign': 'center'},
export_format="csv")]
# calculate scatterplot
scatter = initializeData.getScatterPlot(new_process)
# calculate PCAs
pca_12, file1, file2 = initializeData.getPCA(new_process)
pcas = [pca_12, file1, file2]
seq_len = new_process.getSeqLen()
# calculate RNA-Template(s), dotbracket-string(s), color-vector, color-scale
# and color-domain(s) (highest value in color-vector)
if struct_data is not None:
structure_info = initializeData.getTemplateSecondaryStructure(new_process, normalization_vector,
normalization_status, no_sec_peak)
struct1, struct2, color1, color2, color_domain_max1, color_domain_max2, color_scale = structure_info
if struct1 is not None and struct2 is not None:
templates = [struct1[0], struct2[0]]
dbs = [struct1[1], struct2[1]]
elif struct1 is not None:
templates = [struct1[0]]
dbs = [struct1[1]]
else:
templates = []
dbs = []
else:
templates = None
dbs = None
color1 = None
color2 = None
color_domain_max1 = None
color_domain_max2 = None
color_scale = None
data = {'topK': top_k_table, 'msas': msas, 'scatter': scatter, 'pcas': pcas, 'seqLen': seq_len,
'templates': templates, 'dbs': dbs, 'colors': [color1, color2],
'color_max': [color_domain_max1, color_domain_max2], 'color_scale': color_scale,
'last_sec_peak': sec_peak, 'last_norm_stat': normalization_status}
return [data]
# --------------------------------------- File Dropdown Updater --------------------------------------------------------
@app.callback([
dash.dependencies.Output("file1", "options"),
dash.dependencies.Input("file2", "value"),
])
# returns new option for dropdown based on selection
# f2: second selected file
def updateFile1Dropdown(f2):
return updateFileList(f2, False)
@app.callback([
dash.dependencies.Output("file2", "options"),
dash.dependencies.Input("file1", "value"),
])
# returns new option for dropdown based on selection
# f1: first selected file
def updateFile2Dropdown(f1):
return updateFileList(f1, False)
# disables already selected file in other dropdown
# val: (structural) file
# struct: bool (True= structural file is given)
def updateFileList(val, struct):
if struct and struct_data is not None:
files = struct_data
elif struct and struct_data is None:
return [{"label": "-", "value": "0"}]
else:
files = file_list
option = [
{'label': os.path.basename(files[i]), 'value': str(i)} if not (str(i) == val)
else {'label': os.path.basename(files[i]), 'value': str(i), 'disabled': True}
for i in range(0, len(files))]
return [option]
# --------------------------------------- Structure File Dropdown Updater ----------------------------------------------
@app.callback([
dash.dependencies.Output("file3", "options"),
dash.dependencies.Input("file4", "value"),
])
# returns new option for dropdown based on selection
# f4: second selected structural file
def updateFile4Dropdown(f4):
return updateFileList(f4, True)
@app.callback([
dash.dependencies.Output("file4", "options"),
dash.dependencies.Input("file3", "value"),
])
# returns new option for dropdown based on selection
# f1: first selected structural file
def updateFile3Dropdown(f3):
if struct_data is not None and len(struct_data) > 1:
return updateFileList(f3, True)
else:
raise PreventUpdate
# --------------------------------------- Slider Values Updater --------------------------------------------------------
@app.callback(
[
dash.dependencies.Output("k", "min"),
dash.dependencies.Output("k", "max"),
dash.dependencies.Output("k", "marks"),
dash.dependencies.Output("peak", "min"),
dash.dependencies.Output("peak", "max"),
dash.dependencies.Output("peak", "marks"),
],
[
dash.dependencies.Input('memory', 'modified_timestamp'),
dash.dependencies.State('memory', 'data'),
],
)
# calculates slider ranges (marks)
# fil1/file2: input file
# ts: timestamp when data was modified
# data: storage to share data between callbacks
def updateSliderRange(ts, data):
if ts is None:
raise PreventUpdate
k_p_slider_max = data['seqLen']
k_p_slider_min = 2
k_slider_max = k_p_slider_max - 1
peak_min = 0
# calculation of new slider ranges if files were changed
k_range = markSliderRange(k_p_slider_min, k_slider_max, False)
peak_range = markSliderRange(peak_min, k_p_slider_max, True)
return k_p_slider_min, k_slider_max, k_range, peak_min, k_p_slider_max, peak_range
# ----------------------------------------- Forna-Container Update -----------------------------------------------------
@app.callback(
dash.dependencies.Output('forna', 'sequences'),
dash.dependencies.Output('forna', 'customColors'),
dash.dependencies.Output('s-tab2', 'label'),
dash.dependencies.Output('s-tab2', 'disabled'),
dash.dependencies.Output('forna2', 'sequences'),
dash.dependencies.Output('forna2', 'customColors'),
dash.dependencies.Output('s-tab3', 'label'),
dash.dependencies.Output('s-tab3', 'disabled'),
[dash.dependencies.Input('memory', 'data'),
dash.dependencies.Input('file3', 'value'),
dash.dependencies.Input('file4', 'value'),
]
)
# create RNA Structure Heatmap visualizations
# data: store
# f3: first selected structural file
# f4: second selected structural file
def show_selected_sequences(data, f3, f4):
if data is None:
raise PreventUpdate
template_list = data['templates']
dotbracket_list = data['dbs']
color_domain_max1 = data['color_max'][0]
color_domain_max2 = data['color_max'][1]
# if only one structural file is given, color_domain_max and color_domain_min are not changed
domain_nbr = 2
if color_domain_max1 is None:
color_domain_max1 = 0
domain_nbr = 1
if color_domain_max2 is None:
color_domain_max2 = 0
domain_nbr = 1
color_domain_max = ((color_domain_max1 + color_domain_max2) / domain_nbr)
if data['colors'][0] is not None:
color_vals1 = list(set(data['colors'][0].values()))
if 0 in color_vals1:
color_vals1.remove(0)
color_domain_min1 = min(color_vals1)
else:
color_domain_min1 = 0
if data['colors'][1] is not None:
color_vals2 = list(set(data['colors'][1].values()))
if 0 in color_vals2:
color_vals2.remove(0)
color_domain_min2 = min(color_vals2)
else:
color_domain_min2 = 0
color_domain_min = (color_domain_min1 + color_domain_min2) / domain_nbr
color_range = data['color_scale']
if color_range is None:
# prevents divideByZero error
# has no effect because if scale is None then there is not structural data
color_range_length = 2
else:
color_range_length = len(color_range)
steps = ((color_domain_max - color_domain_min) / (color_range_length - 1))
if steps == 0:
steps = 1
color_domain = [i for i in float_range(color_domain_min, steps, (color_range_length - 1))]
color_domain.append(color_domain_max)
# disable tab for files if no or only one structural file is given
disable_t1 = False
disable_t2 = False
# color-vector
custom_colors = None
custom_colors2 = None
tab1_label = "RNA-Structure Heatmap 1"
tab2_label = "RNA-Structure Heatmap 2"
if struct_data is not None:
color1 = data['colors'][0]
tab1_label = os.path.basename(struct_data[int(f3)]) + " Structure Heatmap"
# create color-vector-object for FornaContainer
custom_colors = {
'domain': color_domain,
'range': color_range,
'colorValues': {
'template1': color1,
}
}
# create sequence-object for FornaContainer
template1 = [{
'sequence': template_list[0],
'structure': dotbracket_list[0],
'options': {'name': 'template1'}
}]
if len(template_list) > 1: # more than one structure file committed
color2 = data['colors'][1]
tab2_label = os.path.basename(struct_data[int(f4)]) + " Structure Heatmap"
custom_colors2 = {
'domain': color_domain,
'range': color_range,
'colorValues': {
'template2': color2,
}
}
template2 = [{
'sequence': template_list[1],
'structure': dotbracket_list[1],
'options': {'name': 'template2'}
}]
else: # if no second structural file is available
template2 = [{
'sequence': "",
'structure': ""
}]
disable_t2 = True
else: # if not structural data is available
template1 = [{
'sequence': "",
'structure': ""
}]
template2 = [{
'sequence': "",
'structure': ""
}]
disable_t1 = True
disable_t2 = True
return template1, custom_colors, tab1_label, | |
<reponame>Merglyn/pycolor<gh_stars>0
#!/usr/bin/python3 -B
# a few import things
import random
import sys
from sys import stdout
VERSION = '1.2.0'
escape = '\033['
## General things about Ansii escape sequences
# 256 color sequence
# foreground \033[38;5;NNNm
# background \033[48;5;NNNm
# where NNN is a integer between 0t o 256, inclusive
#
# 3 sets of colors in the 256 color sequence
# first set is basic color set, from 0-15.
# second set is a cube of distributed colors from 16-231.
# third set is a grayscale set, from 232-256.
# black: 0
# white: 256
# attribute sequences \033[Nm
# where N is:
#reset = 0
#bold = 1
#dim = 2
#foreground = 3x (0 <= x <= 9, not 8)
#background = 4x (0 <= x <= 9, not 8)
#underline = 4
#blink = 5
#negative = 7
#hidden = 8
# Full colors is a list of all of the 256 colors
full_colors = {'black': '0', 'red': '1', 'green': '2', 'yellow': '3',
'blue': '4', 'magenta': '5', 'cyan': '6', 'white': '7',
'lightred': '9', 'lightgreen': '10',
'lightyellow': '11', 'lightblue': '12',
'lightmagenta': '13', 'lightcyan': '14',
'grey0': '016', 'navyblue': '017', 'darkblue': '018',
'blue3': '019', 'blue3': '020', 'blue1': '021',
'darkgreen': '022', 'deepskyblue4': '023',
'deepskyblue4': '024', 'deepskyblue4': '025',
'dodgerblue3': '026', 'dodgerblue2': '027',
'green4': '028', 'springgreen4': '029',
'turquoise4': '030', 'deepskyblue3': '031',
'deepskyblue3': '032', 'dodgerblue1': '033',
'green3': '034', 'springgreen3': '035',
'darkcyan': '036', 'lightseagreen': '037',
'deepskyblue2': '038', 'deepskyblue1': '039',
'green3': '040', 'springgreen3': '041',
'springgreen2': '042', 'cyan3': '043',
'darkturquoise': '044', 'turquoise2': '045',
'green1': '046', 'springgreen2': '047',
'springgreen1': '048', 'mediumspringgreen': '049',
'cyan2': '050', 'cyan1': '051', 'darkred': '052',
'deeppink4': '053', 'purple4': '054', 'purple4': '055',
'purple3': '056', 'blueviolet': '057',
'orange4': '058', 'grey37': '059',
'mediumpurple4': '060', 'slateblue3': '061',
'slateblue3': '062', 'royalblue1': '063',
'chartreuse4': '064', 'darkseagreen4': '065',
'paleturquoise4': '066', 'steelblue': '067',
'steelblue3': '068', 'cornflowerblue': '069',
'chartreuse3': '070', 'darkseagreen4': '071',
'cadetblue': '072', 'cadetblue': '073',
'skyblue3': '074', 'steelblue1': '075',
'chartreuse3': '076', 'palegreen3': '077',
'seagreen3': '078', 'aquamarine3': '079',
'mediumturquoise': '080', 'steelblue1': '081',
'chartreuse2': '082', 'seagreen2': '083',
'seagreen1': '084', 'seagreen1': '085',
'aquamarine1': '086', 'darkslategray2': '087',
'darkred': '088', 'deeppink4': '089',
'darkmagenta': '090', 'darkmagenta': '091',
'darkviolet': '092', 'purple': '093', 'orange4': '094',
'lightpink4': '095', 'plum4': '096',
'mediumpurple3': '097', 'mediumpurple3': '098',
'slateblue1': '099', 'yellow4': '100', 'wheat4': '101',
'grey53': '102', 'lightslategrey': '103',
'mediumpurple': '104', 'lightslateblue': '105',
'yellow4': '106', 'darkolivegreen3': '107',
'darkseagreen': '108', 'lightskyblue3': '109',
'lightskyblue3': '110', 'skyblue2': '111',
'chartreuse2': '112', 'darkolivegreen3': '113',
'palegreen3': '114', 'darkseagreen3': '115',
'darkslategray3': '116', 'skyblue1': '117',
'chartreuse1': '118', 'lightgreen': '119',
'lightgreen': '120', 'palegreen1': '121',
'aquamarine1': '122', 'darkslategray1': '123',
'red3': '124', 'deeppink4': '125',
'mediumvioletred': '126', 'magenta3': '127',
'darkviolet': '128', 'purple': '129',
'darkorange3': '130', 'indianred': '131',
'hotpink3': '132', 'mediumorchid3': '133',
'mediumorchid': '134', 'mediumpurple2': '135',
'darkgoldenrod': '136', 'lightsalmon3': '137',
'rosybrown': '138', 'grey63': '139',
'mediumpurple2': '140', 'mediumpurple1': '141',
'gold3': '142', 'darkkhaki': '143',
'navajowhite3': '144', 'grey69': '145',
'lightsteelblue3': '146', 'lightsteelblue': '147',
'yellow3': '148', 'darkolivegreen3': '149',
'darkseagreen3': '150', 'darkseagreen2': '151',
'lightcyan3': '152', 'lightskyblue1': '153',
'greenyellow': '154', 'darkolivegreen2': '155',
'palegreen1': '156', 'darkseagreen2': '157',
'darkseagreen1': '158', 'paleturquoise1': '159',
'red3': '160', 'deeppink3': '161', 'deeppink3': '162',
'magenta3': '163', 'magenta3': '164',
'magenta2': '165', 'darkorange3': '166',
'indianred': '167', 'hotpink3': '168',
'hotpink2': '169', 'orchid': '170',
'mediumorchid1': '171', 'orange3': '172',
'lightsalmon3': '173', 'lightpink3': '174',
'pink3': '175', 'plum3': '176', 'violet': '177',
'gold3': '178', 'lightgoldenrod3': '179', 'tan': '180',
'mistyrose3': '181', 'thistle3': '182', 'plum2': '183',
'yellow3': '184', 'khaki3': '185',
'lightgoldenrod2': '186', 'lightyellow3': '187',
'grey84': '188', 'lightsteelblue1': '189',
'yellow2': '190', 'darkolivegreen1': '191',
'darkolivegreen1': '192', 'darkseagreen1': '193',
'honeydew2': '194', 'lightcyan1': '195', 'red1': '196',
'deeppink2': '197', 'deeppink1': '198',
'deeppink1': '199', 'magenta2': '200',
'magenta1': '201', 'orangered1': '202',
'indianred1': '203', 'indianred1': '204',
'hotpink': '205', 'hotpink': '206',
'mediumorchid1': '207', 'darkorange': '208',
'salmon1': '209', 'lightcoral': '210',
'palevioletred1': '211', 'orchid2': '212',
'orchid1': '213', 'orange1': '214',
'sandybrown': '215', 'lightsalmon1': '216',
'lightpink1': '217', 'pink1': '218', 'plum1': '219',
'gold1': '220', 'lightgoldenrod2': '221',
'lightgoldenrod2': '222', 'navajowhite1': '223',
'mistyrose1': '224', 'thistle1': '225',
'yellow1': '226', 'lightgoldenrod1': '227',
'khaki1': '228', 'wheat1': '229', 'cornsilk1': '230',
'grey100': '231', 'grey3': '232', 'grey7': '233',
'grey11': '234', 'grey15': '235', 'grey19': '236',
'grey23': '237', 'grey27': '238', 'grey30': '239',
'grey35': '240', 'grey39': '241', 'grey42': '242',
'grey46': '243', 'grey50': '244', 'grey54': '245',
'grey58': '246', 'grey62': '247', 'grey66': '248',
'grey70': '249', 'grey74': '250', 'grey78': '251',
'grey82': '252', 'grey85': '253', 'grey89': '254',
'grey93': '255'}
# these are the 256 colors that go in a sorta rainbow order
_rainbow_256 = [196, 202, 208, 214, 220, 226, 190, 154, 118, 82, 46,
47, 48, 49, 50, 51, 45, 39, 33, 27, 21, 57, 93, 129,
165, 201, 129, 21, 33, 45, 50, 47, 82, 154, 226, 208]
class fg(object):
'''A set of ansi escape sequences
for foreground colors'''
# escape sequence, need m at the end of the color
fg_escape = escape + '38;5;'
# reset sequence
reset = escape + '39m'
re = escape + '39m'
# long names and sequences
black = '\033[38;5;0m'
red = '\033[38;5;1m'
green = '\033[38;5;2m'
yellow = '\033[38;5;3m'
blue = '\033[38;5;4m'
magenta = '\033[38;5;5m'
cyan = '\033[38;5;6m'
white = '\033[38;5;7m'
# long light colors
lightred = '\033[38;5;9m'
lightgreen = '\033[38;5;10m'
lightyellow = '\033[38;5;11m'
lightblue = '\033[38;5;12m'
lightmagenta = '\033[38;5;13m'
lightcyan = '\033[38;5;14m'
# short names
b = '\033[38;5;0m'
r = '\033[38;5;1m'
g = '\033[38;5;2m'
y = '\033[38;5;3m'
bl = '\033[38;5;4m'
m = '\033[38;5;5m'
c = '\033[38;5;6m'
w = '\033[38;5;7m'
# short light colors
lr = '\033[38;5;9m'
lg = '\033[38;5;10m'
ly = '\033[38;5;11m'
lb = '\033[38;5;12m'
lm = '\033[38;5;13m'
lc = '\033[38;5;14m'
class bg(object):
'''A set of ansi escape sequences
for background colors'''
# escape sequence, need m at the end of the color
bg_escape = escape + '48;5;'
# reset sequence/ reset shortname
reset = escape + '49m'
re = escape + '49m'
# long names
black = '\033[48;5;0m'
red = '\033[48;5;1m'
green = '\033[48;5;2m'
yellow = '\033[48;5;3m'
blue = '\033[48;5;4m'
magenta = '\033[48;5;5m'
cyan = '\033[48;5;6m'
white = '\033[48;5;7m'
# long light colors
lightred = '\033[48;5;9m'
lightgreen = '\033[48;5;10m'
lightyellow = '\033[48;5;11m'
lightblue = '\033[48;5;12m'
lightmagenta = '\033[48;5;13m'
lightcyan = '\033[48;5;14m'
# short names
b = '\033[48;5;0m'
r = '\033[48;5;1m'
g = '\033[48;5;2m'
y = '\033[48;5;3m'
bl = '\033[48;5;4m'
m = '\033[48;5;5m'
c = '\033[48;5;6m'
w = '\033[48;5;7m'
# short light colors
lr = '\033[48;5;9m'
lg = '\033[48;5;10m'
ly = '\033[48;5;11m'
lb = '\033[48;5;12m'
lm = '\033[48;5;13m'
lc = '\033[48;5;14m'
class atr(object):
'''various attribute escape sequences'''
# long names
reset_all = escape + '0m'
bold = escape + '1m'
dim = escape + '2m'
underline = escape + '4m'
blink = escape + '6m'
negative = escape + '2m'
# short names
ra = escape + '0m'
bo = escape + '1m'
d = escape + '2m'
ul = escape + '4m'
bl = escape + '6m'
n = escape + '2m'
def reset_all():
'''reset all attributes, formatting and colors'''
#return reset sequence
return escape + '0' + 'm'
def get_bold():
'''returns a bold (aka bright) color sequence (string)'''
return escape + '1' + 'm'
def get_dim():
'''returns a dim color sequence'''
return escape + '2' + 'm'
def get_underline():
'''returns an "underlined text" sequence '''
return escape + '4' + 'm'
def get_blink(speed):
'''returns a blinking text sequence with a specified speed'''
if speed == 1:
return escape + '6' + 'm'
else:
return escape + '5' + 'm'
def get_negative():
'''returns a "negative" escape sequence'''
return escape + '2' + 'm'
def get_color_esc(attribute, color):
'''returns a terminal escape sequence (string)
for the specified attribute and color
attribute is either fg or bg
color is any valid xterm 256 color name or
a number between 0 and 256
'''
if attribute == 'fg':
coloresc = escape + '38;5;'
elif attribute == 'bg':
coloresc = escape + '48;5;'
if type(color) is int:
if color < 0:
color = 0
elif color > 256:
color = 256
return(coloresc + str(color) + 'm')
elif type(color) is str:
try:
return(coloresc + full_colors[color.lower()] + 'm')
except KeyError:
return('\033[0m')
def make_rainbow(string, style='word', width=1, start_color='rand'):
'''returns the original string mixed with terminal escape
sequences
style can be any character or the string 'char(acter)' 'word'
'line', and indicates when the function will change to the next
color
width specifies the number of "styles" before a color change
and can be an interger of any size or the string (r)andom
start_color is the | |
""" Descartes Labs utilities for our Universal Transverse Mercator (UTM)-based
projection system. """
# The Descartes Labs projection system is slightly different from the
# canonical UTM standard. Only North UTM zones are used, including for the
# southern hemisphere; so there are no false northings. Also, the latitude
# range is extended to the full +/-90 (instead of -80 to +84).
from collections.abc import Sequence
import json
import numpy as np
import shapely.geometry as geo
from .conversions import points_from_polygon
from .exceptions import InvalidLatLonError
# WGS84 constants:
# usually written 'a'
EARTH_MAJOR_AXIS = 6378137 # in meters
# usually written 'f'
FLATTENING = 1.0 / 298.257223563
# UTM constants
# usually written 'k0'
POINT_SCALE_FACTOR = 0.9996
# usually written 'E0'
FALSE_EASTING = 500000 # in meters
# Note that we do not use a false northing.
# usually written 'n'
THIRD_FLATTENING = FLATTENING / (2 - FLATTENING)
# Usually written 'A'
RECTIFYING_RADIUS = (
EARTH_MAJOR_AXIS
/ (1 + THIRD_FLATTENING)
* (
1.0
+ 1.0 / 4.0 * THIRD_FLATTENING ** 2
+ 1.0 / 64.0 * THIRD_FLATTENING ** 4
)
)
# Numbers outside these ranges are surely outside their UTM zone
# (but not strictly invalid)
UTM_MIN_EAST = FALSE_EASTING - 334000
UTM_MAX_EAST = FALSE_EASTING + 334000
# Distances from equator to south/north poles, according to our transformation
# of points of latitude -90.0 and 90.0 respectively
UTM_MIN_NORTH = -9997964.943
UTM_MAX_NORTH = 9997964.943
# Numbers outside these ranges are not supported by our UTM system
UTM_MIN_LON = -180.0
UTM_MAX_LON = 180.0
UTM_MIN_LAT = -90.0
UTM_MAX_LAT = 90.0
# The width of a zone, in degrees longitude
ZONE_WIDTH_LON = 6
def zone_to_lon(zone: int):
"""Returns the middle longitude of a zone"""
if zone < 1 or zone > 60:
raise ValueError("Zones must be between 1 and 60 (inclusive)")
return zone * ZONE_WIDTH_LON - 183.0
def lon_to_zone(lon: float):
if lon < UTM_MIN_LON or lon > UTM_MAX_LON:
raise InvalidLatLonError(
"Longitude must be between -180.0 and 180.0 " "(inclusive)"
)
return max(1, 1 + np.floor((lon + 180.0) / 6.0).astype(int))
def coordinate_transform(function):
"""Decorate a function which accepts numpy arrays of shape (?, 2), and
optionally other arguments, and returns numpy arrays of the same shape;
then the function will work for shapes and non-numpy sequences as well as
numpy arrays, and will attempt to return arguments of the same type as its
points parameter.
"""
def _transform(points, *args, axis=-1, **kwargs):
if isinstance(points, np.ndarray):
pass
elif isinstance(points, str):
points = json.loads(points)
points = geo.shape(points)
transformed_points = _transform(points, *args, **kwargs)
return json.dumps(geo.mapping(transformed_points))
elif isinstance(points, dict):
points = geo.shape(points)
transformed_points = _transform(points, *args, **kwargs)
return geo.mapping(transformed_points)
elif isinstance(points, geo.MultiPolygon):
return geo.MultiPolygon(
[_transform(polygon, *args, **kwargs) for polygon in points]
)
elif isinstance(points, Sequence):
try:
if np.isfinite(points).all():
points = np.array(points, dtype=np.double)
else:
raise TypeError # Catch
except TypeError:
# The elements of this sequence could not become a numpy array,
# try instead to see if this is a list of polygons.
return [
_transform(polygon, *args, **kwargs) for polygon in points
]
elif isinstance(points, geo.Polygon):
exterior_points, *interiors_points = points_from_polygon(points)
return geo.Polygon(
_transform(exterior_points, *args, **kwargs),
holes=[
_transform(interior_points, *args, **kwargs)
for interior_points in interiors_points
]
or None,
)
else:
raise TypeError(
"Could not interpret points of type %s, "
"try passing an ndarray or shape" % type(points)
)
points = points.swapaxes(axis, -1).reshape((-1, 2)).astype(np.double)
shape = list(points.shape)
shape[axis] = points.shape[-1]
shape[-1] = points.shape[axis]
transformed_points = function(points, *args, **kwargs)
return transformed_points.reshape(shape).swapaxes(axis, -1)
return _transform
@coordinate_transform
def lonlat_to_utm(points, zone=None, ref_lon=None):
""" Convert lon,lat points in a numpy array or shapely shape to UTM
coordinates in the given zone.
Parameters
----------
points: numpy array, shapely polygon/multipolygon, geojson, or array-like
Points of WGS84 lon,lat coordinates
zone: int, optional
UTM zone from 1 to 60 inclusive, must be specified if ref_lon is not
ref_lon: float, optional
Reference longitude to determine zone from
axis: int, default=-1
The given axis should have size 2, with lon,lat pairs.
Returns
-------
utm_points: tries to be the same type as points, or numpy array
Raises
------
ValueError
When UTM zone is outside of 1 to 60 inclusive, or the numpy array
axis does not have size==2.
"""
if zone is None:
if ref_lon is None:
raise TypeError("Either `zone` or `ref_lon` must be specified")
zone = lon_to_zone(ref_lon)
# These series expansion coefficients are sufficient to approximate the UTM
# projection system to a precision of millimeters.
n = THIRD_FLATTENING
N = 2 * np.sqrt(n) / (1.0 + n)
a1 = 1.0 / 2.0 * n - 2.0 / 3.0 * n ** 2 + 5.0 / 16.0 * n ** 3
a2 = 13.0 / 48.0 * n ** 2 - 3.0 / 5.0 * n ** 3
a3 = 61.0 / 240.0 * n ** 3
lon = points[:, 0]
lat = points[:, 1]
radlon = np.deg2rad(lon - 6.0 * zone + 183.0)
radlat = np.deg2rad(lat)
sinlat = np.sin(radlat)
t = np.sinh(np.arctanh(sinlat) - N * np.arctanh(N * sinlat))
etap = np.arctanh(np.sin(radlon) / np.sqrt(1 + t ** 2))
xip = np.arctan(t / np.cos(radlon))
easting = FALSE_EASTING + POINT_SCALE_FACTOR * RECTIFYING_RADIUS * (
etap
+ a1 * np.cos(2 * xip) * np.sinh(2 * etap)
+ a2 * np.cos(4 * xip) * np.sinh(4 * etap)
+ a3 * np.cos(6 * xip) * np.sinh(6 * etap)
)
northing = (
POINT_SCALE_FACTOR
* RECTIFYING_RADIUS
* (
xip
+ a1 * np.sin(2 * xip) * np.cosh(2 * etap)
+ a2 * np.sin(4 * xip) * np.cosh(4 * etap)
+ a3 * np.sin(6 * xip) * np.cosh(6 * etap)
)
)
return np.stack((easting, northing), axis=-1)
@coordinate_transform
def utm_to_lonlat(points, zone):
""" Convert UTM points in a numpy array or shapely shape to lon,lat
coordinates in the given zone.
Parameters
----------
points: numpy array, shapely polygon/multipolygon, geojson, or array-like
Points of x,y coordinates in the given UTM north zone
zone: int
UTM north zone from 1 to 60 inclusive
axis: int, default=-1
The given axis should have size 2, with UTM x,y pairs.
Returns
-------
lonlat_points: tries to be the same type as points, or numpy array
Raises
------
ValueError
When UTM zone is outside of 1 to 60 inclusive, or the numpy array
axis does not have size==2.
"""
# These series expansion coefficients are sufficient to approximate the UTM
# projection system to a precision of millimeters.
n = THIRD_FLATTENING
b1 = 1.0 / 2.0 * n - 2.0 / 3.0 * n ** 2 + 37.0 / 96.0 * n ** 3
b2 = 1.0 / 48.0 * n ** 2 + 1.0 / 15.0 * n ** 3
b3 = 17.0 / 480.0 * n ** 3
d1 = 2.0 * n - 2.0 / 3.0 * n ** 2 - 2.0 * n ** 3
d2 = 7.0 / 3.0 * n ** 2 - 8.0 / 5.0 * n ** 3
d3 = 56.0 / 15.0 * n ** 3
easting = points[:, 0]
northing = points[:, 1]
xi = northing / (POINT_SCALE_FACTOR * RECTIFYING_RADIUS)
eta = (easting - FALSE_EASTING) / (POINT_SCALE_FACTOR * RECTIFYING_RADIUS)
xip = xi - (
b1 * np.sin(2 * xi) * np.cosh(2 * eta)
+ b2 * np.sin(4 * xi) * np.cosh(4 * eta)
+ b3 * np.sin(6 * xi) * np.cosh(6 * eta)
)
etap = eta - (
b1 * np.cos(2 * xi) * np.sinh(2 * eta)
+ b2 * np.cos(4 * xi) * np.sinh(4 * eta)
+ b3 * np.cos(6 * xi) * np.sinh(6 * eta)
)
chi = np.arcsin(np.sin(xip) / np.cosh(etap))
lat = np.rad2deg(
chi
+ d1 * np.sin(2 * chi)
+ d2 * np.sin(4 * chi)
+ d3 * np.sin(6 * chi)
)
lon = (
6.0 * zone - 183.0 + np.rad2deg(np.arctan(np.sinh(etap) / np.cos(xip)))
)
# Return all longitude outputs within the range -180.0, +180.0
lon = (lon + 180.0) % 360.0 - 180.0
return np.stack((lon, lat), axis=-1)
@coordinate_transform
def utm_to_rowcol(utm_points, tile):
""" Convert UTM points in an array of shape (?, 2) to row,col array indices
given a tile. """
if not utm_points.shape[1] == 2:
raise ValueError(
"Expected array of utm points of shape (?, 2), got %s"
% str(utm_points.shape)
)
min_col = tile.tilesize * tile.path - tile.pad
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 <NAME> <<EMAIL>>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import re
import six
import yaml
import time
from .. import plugins
from ..AppriseAsset import AppriseAsset
from ..URLBase import URLBase
from ..common import ConfigFormat
from ..common import CONFIG_FORMATS
from ..common import ContentIncludeMode
from ..utils import GET_SCHEMA_RE
from ..utils import parse_list
from ..utils import parse_bool
from ..utils import parse_urls
from . import SCHEMA_MAP
# Test whether token is valid or not
VALID_TOKEN = re.compile(
r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I)
class ConfigBase(URLBase):
"""
This is the base class for all supported configuration sources
"""
# The Default Encoding to use if not otherwise detected
encoding = 'utf-8'
# The default expected configuration format unless otherwise
# detected by the sub-modules
default_config_format = ConfigFormat.TEXT
# This is only set if the user overrides the config format on the URL
# this should always initialize itself as None
config_format = None
# Don't read any more of this amount of data into memory as there is no
# reason we should be reading in more. This is more of a safe guard then
# anything else. 128KB (131072B)
max_buffer_size = 131072
# By default all configuration is not includable using the 'include'
# line found in configuration files.
allow_cross_includes = ContentIncludeMode.NEVER
# the config path manages the handling of relative include
config_path = os.getcwd()
def __init__(self, cache=True, recursion=0, insecure_includes=False,
**kwargs):
"""
Initialize some general logging and common server arguments that will
keep things consistent when working with the configurations that
inherit this class.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
recursion defines how deep we recursively handle entries that use the
`include` keyword. This keyword requires us to fetch more configuration
from another source and add it to our existing compilation. If the
file we remotely retrieve also has an `include` reference, we will only
advance through it if recursion is set to 2 deep. If set to zero
it is off. There is no limit to how high you set this value. It would
be recommended to keep it low if you do intend to use it.
insecure_include by default are disabled. When set to True, all
Apprise Config files marked to be in STRICT mode are treated as being
in ALWAYS mode.
Take a file:// based configuration for example, only a file:// based
configuration can include another file:// based one. because it is set
to STRICT mode. If an http:// based configuration file attempted to
include a file:// one it woul fail. However this include would be
possible if insecure_includes is set to True.
There are cases where a self hosting apprise developer may wish to load
configuration from memory (in a string format) that contains 'include'
entries (even file:// based ones). In these circumstances if you want
these 'include' entries to be honored, this value must be set to True.
"""
super(ConfigBase, self).__init__(**kwargs)
# Tracks the time the content was last retrieved on. This place a role
# for cases where we are not caching our response and are required to
# re-retrieve our settings.
self._cached_time = None
# Tracks previously loaded content for speed
self._cached_servers = None
# Initialize our recursion value
self.recursion = recursion
# Initialize our insecure_includes flag
self.insecure_includes = insecure_includes
if 'encoding' in kwargs:
# Store the encoding
self.encoding = kwargs.get('encoding')
if 'format' in kwargs \
and isinstance(kwargs['format'], six.string_types):
# Store the enforced config format
self.config_format = kwargs.get('format').lower()
if self.config_format not in CONFIG_FORMATS:
# Simple error checking
err = 'An invalid config format ({}) was specified.'.format(
self.config_format)
self.logger.warning(err)
raise TypeError(err)
# Set our cache flag; it can be True or a (positive) integer
try:
self.cache = cache if isinstance(cache, bool) else int(cache)
if self.cache < 0:
err = 'A negative cache value ({}) was specified.'.format(
cache)
self.logger.warning(err)
raise TypeError(err)
except (ValueError, TypeError):
err = 'An invalid cache value ({}) was specified.'.format(cache)
self.logger.warning(err)
raise TypeError(err)
return
def servers(self, asset=None, **kwargs):
"""
Performs reads loaded configuration and returns all of the services
that could be parsed and loaded.
"""
if not self.expired():
# We already have cached results to return; use them
return self._cached_servers
# Our cached response object
self._cached_servers = list()
# read() causes the child class to do whatever it takes for the
# config plugin to load the data source and return unparsed content
# None is returned if there was an error or simply no data
content = self.read(**kwargs)
if not isinstance(content, six.string_types):
# Set the time our content was cached at
self._cached_time = time.time()
# Nothing more to do; return our empty cache list
return self._cached_servers
# Our Configuration format uses a default if one wasn't one detected
# or enfored.
config_format = \
self.default_config_format \
if self.config_format is None else self.config_format
# Dynamically load our parse_ function based on our config format
fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format))
# Initialize our asset object
asset = asset if isinstance(asset, AppriseAsset) else self.asset
# Execute our config parse function which always returns a tuple
# of our servers and our configuration
servers, configs = fn(content=content, asset=asset)
self._cached_servers.extend(servers)
# Configuration files were detected; recursively populate them
# If we have been configured to do so
for url in configs:
if self.recursion > 0:
# Attempt to acquire the schema at the very least to allow
# our configuration based urls.
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Plan B is to assume we're dealing with a file
schema = 'file'
if not os.path.isabs(url):
# We're dealing with a relative path; prepend
# our current config path
url = os.path.join(self.config_path, url)
url = '{}://{}'.format(schema, URLBase.quote(url))
else:
# Ensure our schema is always in lower case
schema = schema.group('schema').lower()
# Some basic validation
if schema not in SCHEMA_MAP:
ConfigBase.logger.warning(
'Unsupported include schema {}.'.format(schema))
continue
# Parse our url details of the server object as dictionary
# containing all of the information parsed from our URL
results = SCHEMA_MAP[schema].parse_url(url)
if not results:
# Failed to parse the server URL
self.logger.warning(
'Unparseable include URL {}'.format(url))
continue
# Handle cross inclusion based on allow_cross_includes rules
if (SCHEMA_MAP[schema].allow_cross_includes ==
ContentIncludeMode.STRICT
and schema not in self.schemas()
and not self.insecure_includes) or \
SCHEMA_MAP[schema].allow_cross_includes == \
ContentIncludeMode.NEVER:
# Prevent the loading if insecure base protocols
ConfigBase.logger.warning(
'Including {}:// based configuration is prohibited. '
'Ignoring URL {}'.format(schema, url))
continue
# Prepare our Asset Object
results['asset'] = asset
# No | |
the spatial_scale w.r.t input image.
trans_std: multiplier used in 2nd phase.
"""
op = builtin.DeformablePSROIPooling(
no_trans=no_trans,
part_size=part_size,
pooled_h=pooled_h,
pooled_w=pooled_w,
sample_per_part=sample_per_part,
spatial_scale=spatial_scale,
trans_std=trans_std,
)
output, _ = apply(op, inp, rois, trans)
return output
def hswish(x):
r"""Element-wise `x * relu6(x + 3) / 6`.
Example:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(5).astype(np.float32))
out = F.hswish(x)
print(out.numpy().round(decimals=4))
.. testoutput::
[0. 0.6667 1.6667 3. 4. ]
"""
return _elwise(x, mode=Elemwise.Mode.H_SWISH)
def sigmoid(x):
r"""Element-wise `1 / ( 1 + exp( -x ) )`."""
return _elwise(x, mode=Elemwise.Mode.SIGMOID)
def hsigmoid(x):
r"""Element-wise `relu6(x + 3) / 6`."""
return relu6(x + 3) / 6
def relu(x):
r"""Element-wise `max(x, 0)`."""
return _elwise(x, mode=Elemwise.Mode.RELU)
def relu6(x):
r"""Element-wise `min(max(x, 0), 6)`."""
return minimum(maximum(x, 0), 6)
def prelu(inp: Tensor, weight: Tensor) -> Tensor:
r"""Elememt-wise PReLU function.
Refer to :class:`~.PReLU` for more information.
"""
return maximum(inp, 0) + weight * minimum(inp, 0)
def leaky_relu(inp: Tensor, negative_slope: float = 0.01) -> Tensor:
r"""Element-wose LeakyReLU function
Refer to :class:`~.LeakyReLU` for more information.
"""
return maximum(inp, 0) + negative_slope * minimum(inp, 0)
def silu(x):
r"""Applies the element-wise Sigmoid Linear Unit function, i.e. `x * sigmoid(x)`."""
return _elwise(x, mode=Elemwise.Mode.SILU)
def gelu(x):
r"""Applies the element-wise function:
.. math::
\text{gelu}(x) = x\Phi(x)
where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.
"""
return _elwise(x, mode=Elemwise.Mode.GELU)
def softplus(inp: Tensor) -> Tensor:
r"""Applies the element-wise function:
.. math::
\text{softplus}(x) = \log(1 + \exp(x))
softplus is a smooth approximation to the ReLU function and can be used
to constrain the output to be always positive.
For numerical stability the implementation follows this transformation:
.. math::
\text{softplus}(x) = \log(1 + \exp(x))
= \log(1 + \exp(-\text{abs}(x))) + \max(x, 0)
= \log1p(\exp(-\text{abs}(x))) + \text{relu}(x)
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(-3, 3, dtype=np.float32))
y = F.softplus(x)
print(y.numpy().round(decimals=4))
Outputs:
.. testoutput::
[0.0486 0.1269 0.3133 0.6931 1.3133 2.1269]
"""
return log1p(exp(-abs(inp))) + relu(inp)
def logsoftmax(inp: Tensor, axis: Union[int, Sequence[int]]) -> Tensor:
r"""Applies the :math:`\log(\text{softmax}(x))` function to an n-dimensional
input tensor. The :math:`\text{logsoftmax}(x)` formulation can be simplified as:
.. math::
\text{logsoftmax}(x_{i}) = \log(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} )
For numerical stability the implementation follows this transformation:
.. math::
\text{logsoftmax}(x)
= \log (\frac{\exp (x)}{\sum_{i}(\exp (x_{i}))})
= x - \log (\sum_{i}(\exp (x_{i})))
= x - \text{logsumexp}(x)
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
y = F.logsoftmax(x, axis=1)
print(y.numpy().round(decimals=4))
Outputs:
.. testoutput::
[[-4.4519 -3.4519 -2.4519 -1.4519 -0.4519]
[-4.4519 -3.4519 -2.4519 -1.4519 -0.4519]]
"""
return inp - logsumexp(inp, axis, keepdims=True)
def logsigmoid(inp: Tensor) -> Tensor:
r"""Applies the element-wise function:
.. math::
\text{logsigmoid}(x) = \log(\frac{ 1 }{ 1 + \exp(-x)})
= \log(1/(1 + \exp(-x)))
= - \log(1 + \exp(-x))
= - \text{softplus}(-x)
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(-5, 5, dtype=np.float32))
y = F.logsigmoid(x)
print(y.numpy().round(decimals=4))
Outputs:
.. testoutput::
[-5.0067 -4.0182 -3.0486 -2.1269 -1.3133 -0.6931 -0.3133 -0.1269 -0.0486
-0.0181]
"""
return -softplus(-inp)
def logsumexp(
inp: Tensor, axis: Union[int, Sequence[int]], keepdims: bool = False
) -> Tensor:
r"""Calculates the logarithm of the inputs' exponential sum along the given :attr:`axis`.
.. math::
\text{logsumexp}(x)= \log \sum_{j=1}^{n} \exp \left(x_{j}\right)
For numerical stability, the implementation follows this transformation:
.. math::
\text{logsumexp}(x)= \log \sum_{j=1}^{n} \exp \left(x_{j}\right)
= \text{logsumexp}(x)=b+\log \sum_{j=1}^{n} \exp \left(x_{j}-b\right)
where
.. math::
b = \max(x_j)
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
y = F.logsumexp(x, axis=1, keepdims=False)
print(y.numpy().round(decimals=4))
Outputs:
.. testoutput::
[-0.5481 4.4519]
"""
max_value = max(inp.detach(), axis, keepdims=True)
if keepdims:
return max_value + log(sum(exp(inp - max_value), axis, keepdims))
else:
return squeeze(max_value, axis=None) + log(
sum(exp(inp - max_value), axis, keepdims)
)
def _get_softmax_axis(ndim: int) -> int:
if ndim in (0, 1, 3):
return 0
return 1
def softmax(inp: Tensor, axis: Optional[int] = None) -> Tensor:
r"""Applies a :math:`\text{softmax}(x)` function. :math:`\text{softmax}(x)` is defined as:
.. math::
\text{softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
It is applied to all elements along axis, and rescales elements so that
they stay in the range `[0, 1]` and sum to 1.
See :class:`~.module.Softmax` for more details.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
out = F.softmax(x)
print(out.numpy().round(decimals=4))
Outputs:
.. testoutput::
[[0.0117 0.0317 0.0861 0.2341 0.6364]
[0.0117 0.0317 0.0861 0.2341 0.6364]]
"""
if axis is None:
axis = _get_softmax_axis(len(inp.shape))
offset = inp.max(axis=axis, keepdims=True).detach()
cached = exp(inp - offset)
down = sum(cached, axis=axis, keepdims=True)
return cached / down
@lru_cache(maxsize=None)
def _get_layerNorm(device, dtype, dim, gopt_level=2):
@subgraph("LayerNormAffine", dtype, device, 5, gopt_level=gopt_level)
def layerNormAffine(inputs, f, c):
inp, eps, _flatten_shape, weight, bias = inputs
inp_shape = f(GetVarShape(), inp)
inp = f(Reshape(axis=dim), inp, _flatten_shape)
mean = f(Reduce(mode="mean", axis=-1), inp)
x2s = f(Reduce(mode="sum_sqr", axis=-1), inp)
reduce_shape = f(GetVarShape(), x2s)
reduce_size = f(
"//",
f(Reduce(mode="product", axis=0), inp_shape),
f(Reduce(mode="product", axis=0), reduce_shape),
)
reduce_size_f = f(TypeCvt(dtype=dtype), reduce_size)
var = f("-", f("/", x2s, reduce_size_f), f("**", mean, c(2)))
inv_sqrt_var = f("**", f("+", var, eps), c(-0.5))
oup = f("fma3", inp, inv_sqrt_var, f("*", f("-", mean), inv_sqrt_var))
affine_oup = f(Reshape(), oup, inp_shape)
affine_oup = f("fma3", affine_oup, weight, bias)
# NOTE: return oup make backward faster but take more memory
return (affine_oup, oup, mean, x2s), (True, False, False, False)
@subgraph("LayerNorm", dtype, device, 3, gopt_level=gopt_level)
def layerNorm(inputs, f, c):
inp, eps, _flatten_shape = inputs
inp_shape = f(GetVarShape(), inp)
inp = f(Reshape(axis=dim), inp, _flatten_shape)
mean = f(Reduce(mode="mean", axis=-1), inp)
x2s = f(Reduce(mode="sum_sqr", axis=-1), inp)
reduce_shape = f(GetVarShape(), x2s)
reduce_size = f(
"//",
f(Reduce(mode="product", axis=0), inp_shape),
f(Reduce(mode="product", axis=0), reduce_shape),
)
reduce_size_f = f(TypeCvt(dtype=dtype), reduce_size)
var = f("-", f("/", x2s, reduce_size_f), f("**", mean, c(2)))
inv_sqrt_var = f("**", f("+", var, eps), c(-0.5))
oup = f("fma3", inp, inv_sqrt_var, f("*", f("-", mean), inv_sqrt_var))
oup = f(Reshape(), oup, inp_shape)
return (oup,), (True,)
return (layerNorm, layerNormAffine)
def layer_norm(
inp: Tensor,
normalized_shape: tuple,
affine: bool,
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
eps: float = 1e-5,
eps_mode="additive",
):
assert eps_mode.lower() in {"max", "additive"}, "unknown eps_mode: {}".format(
eps_mode
)
if amp._enabled:
inp, weight, bias = cast_tensors(inp, weight, bias, promote=True)
_device = inp.device
_dtype = inp.dtype
_dim = len(inp.shape) - len(normalized_shape)
_flatten_shape = concat(
(
convert_single_value(inp.shape[:_dim], dtype="int32", device=inp.device),
convert_single_value(-1, dtype="int32", device=inp.device),
)
)
(layerNorm, layerNormAffine) = _get_layerNorm(_device, _dtype, _dim)
eps = convert_single_value(eps, dtype=inp.dtype, device=inp.device)
if affine:
outvar, *_ = apply(layerNormAffine(), inp, eps, _flatten_shape, weight, bias)
else:
outvar, *_ = apply(layerNorm(), inp, eps, _flatten_shape)
return outvar
def batch_norm(
inp: Tensor,
running_mean: Tensor = None,
running_var: Tensor = None,
weight: Optional[Tensor] = None,
bias: Optional[Tensor] = None,
*,
training: bool = False,
momentum: float = 0.9,
eps: float = 1e-5,
inplace: bool = True,
compute_mode="default",
param_dim="dim_1c11"
):
r"""Applies batch normalization to the input.
Refer to :class:`~.BatchNorm2d` and :class:`~.BatchNorm1d` for more information.
Args:
inp: input tensor.
running_mean: tensor to store running mean.
running_var: tensor to store running variance.
weight: scaling tensor in the learnable affine parameters.
See :math:`\gamma` in :class:`~.BatchNorm2d`.
bias: bias tensor in the learnable affine parameters.
See :math:`\beta` in :class:`~.BatchNorm2d`.
training: a boolean value to indicate whether batch norm is performed
in training mode. Default: False
momentum: value used for the ``running_mean`` and ``running_var``
computation. Default: 0.9
eps: a value added to the denominator for numerical stability. Default: 1e-5
inplace: whether to update ``running_mean`` and ``running_var``
inplace or return new tensors. Default: True
"""
if inp.ndim != 4:
raise NotImplementedError("batch_norm for ndim != 4")
if param_dim == "dim_1c11":
C = inp.shape[1]
pshape = (1, C, 1, 1)
elif param_dim == "dim_111c":
C = inp.shape[3]
pshape = (1, 1, 1, C)
else:
raise ValueError("Invalid param_dim {}".format(param_dim))
def make_full_if_none(x, value):
if x is None:
(x,) = Const(value, dtype=inp.dtype, device=inp.device)()
shape = astensor1d(pshape, inp, dtype="int32", device=inp.device)
(result,) = apply(builtin.Broadcast(), x, shape)
return result
elif x.ndim == 1:
shape = astensor1d(pshape, inp, dtype="int32", device=inp.device)
(result,) = apply(builtin.Reshape(), x, shape)
return result
return x
has_mean = running_mean is not None
has_var = running_var is not None
if not training:
assert has_mean, "running_mean must be provided in inference mode"
assert has_var, "running_var must | |
= _icon_path_ + 'autorig_large.png'
_cam_rig_icon_large_ = _icon_path_ + 'cam_rig_large.png'
_cyclo_icon_large_ = _icon_path_ + 'cyclo_large.png'
_gizmo_icon_large_ = _icon_path_ + 'gizmo_large.png'
_light_rig_icon_large_ = _icon_path_ + 'light_rig_large.png'
_lut_icon_large_ = _icon_path_ + 'lut_large.png'
_render_graph_icon_large_ = _icon_path_ + 'render_graph_large.png'
_render_pass_icon_large_ = _icon_path_ + 'render_pass_large.png'
_fx_setup_icon_large_ = _icon_path_ + 'fx_setup_large.png'
_scripts_icon_large_ = _icon_path_ + 'scripts_large.png'
_sons_icon_large_ = _icon_path_ + 'sons_large.png'
_stockshot_icon_large_ = _icon_path_ + 'stockshot_large.png'
_video_icon_large_ = _icon_path_ + 'video_large.png'
_video_edit_icon_large_ = _icon_path_ + 'video_edit_large.png'
_sound_edit_icon_large_ = _icon_path_ + 'sound_edit_large.png'
_material_icon_large_ = _icon_path_ + 'video_edit_large.png'
_painter_template_icon_large_ = _icon_path_ + 'painter_template_large.png'
_stage_icon_={}
_stage_icon_[_design_] = _icon_path_ + _design_icon_
_stage_icon_[_geo_] = _icon_path_ + _geo_icon_
_stage_icon_[_rig_] = _icon_path_ + _rig_icon_
_stage_icon_[_texturing_] = _icon_path_ + _texturing_icon_
_stage_icon_[_shading_] = _icon_path_ + _shading_icon_
_stage_icon_[_hair_] = _icon_path_ + _hair_icon_
_stage_icon_[_concept_] = _icon_path_ + _concept_icon_
_stage_icon_[_layout_] = _icon_path_ + _layout_icon_
_stage_icon_[_animation_] = _icon_path_ + _animation_icon_
_stage_icon_[_lighting_] = _icon_path_ + _lighting_icon_
_stage_icon_[_cfx_] = _icon_path_ + _cfx_icon_
_stage_icon_[_fx_] = _icon_path_ + _fx_icon_
_stage_icon_[_compositing_] = _icon_path_ + _compositing_icon_
_stage_icon_[_camera_] = _icon_path_ + _camera_icon_
_stage_icon_[_autorig_]= _icon_path_ + _autorig_icon_
_stage_icon_[_cam_rig_] = _icon_path_ + _cam_rig_icon_
_stage_icon_[_cyclo_] = _icon_path_ + _cyclo_icon_
_stage_icon_[_gizmo_] = _icon_path_ + _gizmo_icon_
_stage_icon_[_light_rig_] = _icon_path_ + _light_rig_icon_
_stage_icon_[_lut_] = _icon_path_ + _lut_icon_
_stage_icon_[_render_graph_] = _icon_path_ + _render_graph_icon_
_stage_icon_[_render_pass_] = _icon_path_ + _render_pass_icon_
_stage_icon_[_fx_setup_] = _icon_path_ + _fx_setup_icon_
_stage_icon_[_scripts_] = _icon_path_ + _scripts_icon_
_stage_icon_[_sons_] = _icon_path_ + _sons_icon_
_stage_icon_[_stockshot_] = _icon_path_ + _stockshot_icon_
_stage_icon_[_video_] = _icon_path_ + _video_icon_
_stage_icon_[_video_edit_] = _icon_path_ + _video_edit_icon_
_stage_icon_[_sound_edit_] = _icon_path_ + _sound_edit_icon_
_stage_icon_[_material_] = _icon_path_ + _material_icon_
_stage_icon_[_painter_template_] = _icon_path_ + _painter_template_icon_
# Publish files count
_pub_count_dic_ = {}
_pub_count_dic_[_design_] = 1
_pub_count_dic_[_geo_] = 1
_pub_count_dic_[_rig_] = 1
_pub_count_dic_[_texturing_] = 10000
_pub_count_dic_[_shading_] = 1
_pub_count_dic_[_hair_] = 1
_pub_count_dic_[_concept_] = 1
_pub_count_dic_[_layout_] = 1
_pub_count_dic_[_animation_] = 1
_pub_count_dic_[_lighting_] = 1000000
_pub_count_dic_[_cfx_] = 1
_pub_count_dic_[_fx_] = 1
_pub_count_dic_[_fx_setup_] = 1
_pub_count_dic_[_compositing_] = 1000000
# Softs library
_maya_ = 'Maya'
_mayapy_ = 'Mayapy'
_maya_yeti_ = 'Maya + Yeti'
_photoshop_ = 'Photoshop'
_krita_ = 'Krita'
_zbrush_ = 'Zbrush'
_blender_ = 'Blender'
_3dsmax_ = '3ds Max'
_marvelous_ = 'Marvelous Designer'
_painter_ = 'Substance Painter'
_designer_ = 'Substance Designer'
_mari_ = 'Mari'
_guerilla_ = 'Guerilla Render'
_houdini_ = 'Houdini'
_hython_ = 'Hython'
_nuke_ = 'Nuke'
_rumba_ = 'Rumba'
_resolve_ = 'Resolve'
_reaper_ = 'Reaper'
_folder_ = 'Explorer'
_softwares_list_ = [_maya_,
_maya_yeti_,
_mayapy_,
_photoshop_,
_krita_,
_zbrush_,
_blender_,
_3dsmax_,
_marvelous_,
_painter_,
_designer_,
_mari_,
_guerilla_,
_houdini_,
_hython_,
_nuke_,
_rumba_,
_resolve_,
_reaper_,
_folder_]
_publish_softwares_list_ = [_maya_, _blender_, _maya_yeti_, _guerilla_, _nuke_]
# Publish extension dictionary
_pub_ext_dic_ = {}
_pub_ext_dic_[_design_] = {}
_pub_ext_dic_[_design_][_photoshop_] = 'png'
_pub_ext_dic_[_geo_] = {}
_pub_ext_dic_[_geo_][_maya_] = 'abc'
_pub_ext_dic_[_geo_][_blender_] = 'abc'
_pub_ext_dic_[_rig_] = {}
_pub_ext_dic_[_rig_][_maya_] = 'ma'
_pub_ext_dic_[_rig_][_blender_] = 'blend'
_pub_ext_dic_[_autorig_] = {}
_pub_ext_dic_[_autorig_][_maya_] = 'ma'
_pub_ext_dic_[_cam_rig_] = {}
_pub_ext_dic_[_cam_rig_][_maya_] = 'ma'
_pub_ext_dic_[_texturing_] = {}
_pub_ext_dic_[_texturing_][_painter_] = 'exr'
_pub_ext_dic_[_texturing_][_designer_] = 'sbsar'
_pub_ext_dic_[_shading_] = {}
_pub_ext_dic_[_shading_][_guerilla_] = 'gnode'
_pub_ext_dic_[_shading_][_maya_] = 'ma'
_pub_ext_dic_[_render_pass_] = {}
_pub_ext_dic_[_render_pass_][_guerilla_] = 'gnode'
_pub_ext_dic_[_render_graph_] = {}
_pub_ext_dic_[_render_graph_][_guerilla_] = 'gnode'
_pub_ext_dic_[_light_rig_] = {}
_pub_ext_dic_[_light_rig_][_maya_] = 'ma'
_pub_ext_dic_[_light_rig_][_guerilla_] = 'gnode'
_pub_ext_dic_[_hair_] = {}
_pub_ext_dic_[_hair_][_maya_] = 'ma'
_pub_ext_dic_[_hair_][_maya_yeti_] = 'ma'
_pub_ext_dic_[_concept_] = {}
_pub_ext_dic_[_concept_][_photoshop_] = 'png'
_pub_ext_dic_[_layout_] = {}
_pub_ext_dic_[_layout_][_maya_] = 'abc'
_pub_ext_dic_[_animation_] = {}
_pub_ext_dic_[_animation_][_maya_] = 'abc'
_pub_ext_dic_[_lighting_] = {}
_pub_ext_dic_[_lighting_][_maya_] = 'exr'
_pub_ext_dic_[_lighting_][_guerilla_] = 'exr'
_pub_ext_dic_[_cfx_] = {}
_pub_ext_dic_[_cfx_][_maya_] = 'fur'
_pub_ext_dic_[_cfx_][_maya_yeti_] = 'fur'
_pub_ext_dic_[_fx_] = {}
_pub_ext_dic_[_fx_][_maya_] = 'abc'
_pub_ext_dic_[_fx_][_houdini_] = 'hip'
_pub_ext_dic_[_fx_setup_] = {}
_pub_ext_dic_[_fx_setup_][_houdini_] = 'hip'
_pub_ext_dic_[_compositing_] = {}
_pub_ext_dic_[_compositing_][_nuke_] = 'exr'
_pub_ext_dic_[_camera_] = {}
_pub_ext_dic_[_camera_][_maya_] = 'abc'
_pub_ext_dic_[_cyclo_] = {}
_pub_ext_dic_[_cyclo_][_maya_] = 'abc'
_pub_ext_dic_[_cyclo_][_guerilla_] = 'gproject'
_pub_ext_dic_[_material_] = {}
_pub_ext_dic_[_material_][_designer_] = 'sbsar'
_pub_ext_dic_[_material_][_photoshop_] = 'png'
_pub_ext_dic_[_painter_template_] = {}
_pub_ext_dic_[_painter_template_][_painter_] = 'spt'
_extension_key_ = 'extension'
# Publish extensions lists dictionary
_pub_ext_list_dic_ = {}
_pub_ext_list_dic_[_design_] = {}
_pub_ext_list_dic_[_design_][_photoshop_] = ['png']
_pub_ext_list_dic_[_geo_] = {}
_pub_ext_list_dic_[_geo_][_maya_] = ['abc', 'ma']
_pub_ext_list_dic_[_geo_][_blender_] = ['abc', 'blend']
_pub_ext_list_dic_[_rig_] = {}
_pub_ext_list_dic_[_rig_][_maya_] = ['ma']
_pub_ext_list_dic_[_rig_][_blender_] = ['blend']
_pub_ext_list_dic_[_hair_] = {}
_pub_ext_list_dic_[_hair_][_maya_] = ['ma']
_pub_ext_list_dic_[_hair_][_maya_yeti_] = ['ma']
_pub_ext_list_dic_[_texturing_] = {}
_pub_ext_list_dic_[_texturing_][_painter_] = ['exr', 'png', 'tiff']
_pub_ext_list_dic_[_texturing_][_designer_] = ['sbsar']
_pub_ext_list_dic_[_shading_] = {}
_pub_ext_list_dic_[_shading_][_guerilla_] = ['gnode']
_pub_ext_list_dic_[_shading_][_maya_] = ['ma']
_pub_ext_list_dic_[_autorig_] = {}
_pub_ext_list_dic_[_autorig_][_maya_] = ['ma']
_pub_ext_list_dic_[_cam_rig_] = {}
_pub_ext_list_dic_[_cam_rig_][_maya_] = ['ma']
_pub_ext_list_dic_[_render_pass_] = {}
_pub_ext_list_dic_[_render_pass_][_guerilla_] = ['gnode']
_pub_ext_list_dic_[_render_graph_] = {}
_pub_ext_list_dic_[_render_graph_][_guerilla_] = ['gnode']
_pub_ext_list_dic_[_light_rig_] = {}
_pub_ext_list_dic_[_light_rig_][_maya_] = ['ma']
_pub_ext_list_dic_[_light_rig_][_guerilla_] = ['gnode']
_pub_ext_list_dic_[_fx_setup_] = {}
_pub_ext_list_dic_[_fx_setup_][_houdini_] = ['hip', 'vdb', 'abc']
_pub_ext_list_dic_[_cyclo_] = {}
_pub_ext_list_dic_[_cyclo_][_maya_] = ['abc', 'ma']
_pub_ext_list_dic_[_cyclo_][_guerilla_] = ['gproject']
_pub_ext_list_dic_[_material_] = {}
_pub_ext_list_dic_[_material_][_designer_] = ['sbsar', 'png', 'exr', 'tiff']
_pub_ext_list_dic_[_material_][_photoshop_] = ['png']
_pub_ext_list_dic_[_painter_template_] = {}
_pub_ext_list_dic_[_painter_template_][_painter_] = ['spt']
_pub_ext_list_dic_[_concept_] = {}
_pub_ext_list_dic_[_concept_][_photoshop_] = ['png']
_pub_ext_list_dic_[_layout_] = {}
_pub_ext_list_dic_[_layout_][_maya_] = ['abc', 'ma']
_pub_ext_list_dic_[_animation_] = {}
_pub_ext_list_dic_[_animation_][_maya_] = ['abc', 'ma']
_pub_ext_list_dic_[_lighting_] = {}
_pub_ext_list_dic_[_lighting_][_maya_] = ['exr']
_pub_ext_list_dic_[_lighting_][_guerilla_] = ['exr']
_pub_ext_list_dic_[_cfx_] = {}
_pub_ext_list_dic_[_cfx_][_maya_] = ['fur', 'abc']
_pub_ext_list_dic_[_cfx_][_maya_yeti_] = ['fur', 'abc']
_pub_ext_list_dic_[_fx_] = {}
_pub_ext_list_dic_[_fx_][_maya_] = ['abc', 'ma']
_pub_ext_list_dic_[_fx_][_houdini_] = ['hip', 'vdb', 'abc']
_pub_ext_list_dic_[_compositing_] = {}
_pub_ext_list_dic_[_compositing_][_nuke_] = ['exr']
_pub_ext_list_dic_[_camera_] = {}
_pub_ext_list_dic_[_camera_][_maya_] = ['abc', 'ma']
_custom_ext_dic_key_ = "custom_ext_dic"
# Publish extension dictionary
_project_extension_dic_key_ = 'extensions_dic'
_workflow_ext_dic_custom_ = dict()
_workflow_ext_dic_custom_[_rig_] = dict()
_workflow_ext_dic_custom_[_rig_][_maya_] = 'ma'
_workflow_ext_dic_custom_[_rig_][_blender_] = 'blend'
_workflow_ext_dic_custom_[_rig_][_houdini_] = 'hip'
_workflow_ext_dic_custom_[_rig_][_3dsmax_] = 'max'
_workflow_ext_dic_custom_[_hair_] = dict()
_workflow_ext_dic_custom_[_hair_][_maya_yeti_] = 'ma'
_workflow_ext_dic_custom_[_hair_][_maya_] = 'ma'
_workflow_ext_dic_custom_[_hair_][_blender_] = 'blend'
_workflow_ext_dic_custom_[_hair_][_houdini_] = 'hip'
_workflow_ext_dic_custom_[_hair_][_3dsmax_] = 'max'
_workflow_ext_dic_custom_[_shading_] = dict()
_workflow_ext_dic_custom_[_shading_][_guerilla_] = 'gnode'
_workflow_ext_dic_custom_[_shading_][_maya_] = 'ma'
_workflow_ext_dic_custom_[_shading_][_blender_] = 'blend'
_workflow_ext_dic_custom_[_shading_][_houdini_] = 'hip'
_workflow_ext_dic_custom_[_shading_][_3dsmax_] = 'max'
_workflow_ext_dic_ = dict()
_workflow_ext_dic_[_design_] = 'png'
_workflow_ext_dic_[_geo_] = 'abc'
_workflow_ext_dic_[_autorig_] = 'ma'
_workflow_ext_dic_[_cam_rig_] = 'ma'
_workflow_ext_dic_[_texturing_] = 'exr'
_workflow_ext_dic_[_render_pass_] = 'gnode'
_workflow_ext_dic_[_render_graph_] = 'gnode'
_workflow_ext_dic_[_light_rig_] = 'gnode'
_workflow_ext_dic_[_concept_] = 'png'
_workflow_ext_dic_[_layout_] = 'abc'
_workflow_ext_dic_[_animation_] = 'abc'
_workflow_ext_dic_[_lighting_] = 'exr'
_workflow_ext_dic_[_cfx_] = 'fur'
_workflow_ext_dic_[_fx_] = 'abc'
_workflow_ext_dic_[_compositing_] = 'exr'
_workflow_ext_dic_[_cyclo_] = 'gproject'
_textures_ext_list_ = ['exr', 'png', 'tiff']
# Softs icons library
_photoshop_icon_ = _icon_path_ + 'photoshop.png'
_krita_icon_ = _icon_path_ + 'krita.png'
_maya_icon_ = _icon_path_ + 'maya.png'
_maya_py_icon_ = _icon_path_ + 'maya.png'
_maya_yeti_icon_ = _icon_path_ + 'maya_yeti.png'
_painter_icon_ = _icon_path_ + 'painter.png'
_blender_icon_ = _icon_path_ + 'blender.png'
_3dsmax_icon_ = _icon_path_ + '3dsmax.png'
_designer_icon_ = _icon_path_ + 'designer.png'
_zbrush_icon_ = _icon_path_ + 'zbrush.png'
_mari_icon_ = _icon_path_ + 'mari.png'
_marvelous_icon_ = _icon_path_ + 'marvelous.png'
_guerilla_icon_ = _icon_path_ + 'guerilla.png'
_houdini_icon_ = _icon_path_ + 'houdini.png'
_nuke_icon_ = _icon_path_ + 'nuke.png'
_rumba_icon_ = _icon_path_ + 'rumba.png'
_resolve_icon_ = _icon_path_ + 'resolve.png'
_reaper_icon_ = _icon_path_ + 'reaper.png'
_folder_icon_ = _icon_path_ + 'folder.png'
# Soft icons dic
_soft_icons_dic_ = {}
_soft_icons_dic_[_maya_]=_maya_icon_
_soft_icons_dic_[_mayapy_]=_maya_icon_
_soft_icons_dic_[_maya_yeti_]=_maya_yeti_icon_
_soft_icons_dic_[_photoshop_]=_photoshop_icon_
_soft_icons_dic_[_krita_]=_krita_icon_
_soft_icons_dic_[_painter_]=_painter_icon_
_soft_icons_dic_[_blender_]=_blender_icon_
_soft_icons_dic_[_3dsmax_]=_3dsmax_icon_
_soft_icons_dic_[_designer_]=_designer_icon_
_soft_icons_dic_[_zbrush_]=_zbrush_icon_
_soft_icons_dic_[_marvelous_]=_marvelous_icon_
_soft_icons_dic_[_guerilla_]=_guerilla_icon_
_soft_icons_dic_[_houdini_]=_houdini_icon_
_soft_icons_dic_[_hython_]=_houdini_icon_
_soft_icons_dic_[_mari_]=_mari_icon_
_soft_icons_dic_[_nuke_]=_nuke_icon_
_soft_icons_dic_[_rumba_]=_rumba_icon_
_soft_icons_dic_[_resolve_]=_resolve_icon_
_soft_icons_dic_[_reaper_]=_reaper_icon_
_soft_icons_dic_[_folder_]=_folder_icon_
_maya_icon_path_ = 'XBMLANGPATH'
_maya_scripts_path_ = 'MAYA_SCRIPT_PATH'
_mel_startup_ = 'maya_wizard/startup.mel'
_guerilla_conf_file_ = 'guerilla.conf'
_guerilla_custom_python_ = 'GUERILLA_PYTHON_LIBRARY'
_guerilla_node_type_ = 'SceneGraphNode'
_blender_startup_ = 'blender_wizard/startup.py'
_houdini_startup_ = 'houdini_wizard/startup.py'
_script_software_env_dic_=dict()
_script_software_env_dic_[_maya_]='PYTHONPATH'
_script_software_env_dic_[_mayapy_]='PYTHONPATH'
_script_software_env_dic_[_maya_yeti_]='PYTHONPATH'
_script_software_env_dic_[_photoshop_]='PYTHONPATH'
_script_software_env_dic_[_krita_]='PYTHONPATH'
_script_software_env_dic_[_nuke_]='NUKE_PATH'
_script_software_env_dic_[_houdini_]='PYTHONPATH'
_script_software_env_dic_[_hython_]='PYTHONPATH'
_script_software_env_dic_[_zbrush_]='PYTHONPATH'
_script_software_env_dic_[_guerilla_]='GUERILLA_CONF'
_script_software_env_dic_[_painter_]='SUBSTANCE_PAINTER_PLUGINS_PATH'
_script_software_env_dic_[_designer_]='SBS_DESIGNER_PYTHON_PATH'
_script_software_env_dic_[_blender_]='PYTHONPATH'
_script_software_env_dic_[_3dsmax_]='PYTHONPATH'
_script_software_env_dic_[_resolve_]='PYTHONPATH'
_script_software_env_dic_[_reaper_]='PYTHONPATH'
_script_software_env_dic_[_folder_]='null'
# Extensions dictionary
_extension_dic_ = {}
_extension_dic_[_maya_]='ma'
_extension_dic_[_mayapy_]='ma'
_extension_dic_[_maya_yeti_]='ma'
_extension_dic_[_photoshop_]='psd'
_extension_dic_[_krita_]='kra'
_extension_dic_[_painter_]='spp'
_extension_dic_[_designer_]='sbs'
_extension_dic_[_zbrush_]='zpr'
_extension_dic_[_marvelous_]='hw'
_extension_dic_[_guerilla_]='gproject'
_extension_dic_[_houdini_]='hip'
_extension_dic_[_hython_]='hip'
_extension_dic_[_mari_]='Mari'
_extension_dic_[_nuke_]='nk'
_extension_dic_[_blender_]='blend'
_extension_dic_[_3dsmax_]='max'
_extension_dic_[_resolve_]='drp'
_extension_dic_[_reaper_]='rpp'
_extension_dic_[_folder_]='null'
# Init file for each software
_init_file_='ressources/init_files/init_file'
_init_file__dic_=dict()
_init_file__dic_[_maya_]='{}.{}'.format(_init_file_,_extension_dic_[_maya_])
_init_file__dic_[_mayapy_]='{}.{}'.format(_init_file_,_extension_dic_[_mayapy_])
_init_file__dic_[_maya_yeti_]='{}.{}'.format(_init_file_,_extension_dic_[_maya_yeti_])
_init_file__dic_[_photoshop_]='{}.{}'.format(_init_file_,_extension_dic_[_photoshop_])
_init_file__dic_[_krita_]='{}.{}'.format(_init_file_,_extension_dic_[_krita_])
_init_file__dic_[_painter_]='{}.{}'.format(_init_file_,_extension_dic_[_painter_])
_init_file__dic_[_blender_]='{}.{}'.format(_init_file_,_extension_dic_[_blender_])
_init_file__dic_[_3dsmax_]='{}.{}'.format(_init_file_,_extension_dic_[_3dsmax_])
_init_file__dic_[_designer_]='{}.{}'.format(_init_file_,_extension_dic_[_designer_])
_init_file__dic_[_zbrush_]='{}.{}'.format(_init_file_,_extension_dic_[_zbrush_])
_init_file__dic_[_marvelous_]='{}.{}'.format(_init_file_,_extension_dic_[_marvelous_])
_init_file__dic_[_guerilla_]='{}.{}'.format(_init_file_,_extension_dic_[_guerilla_])
_init_file__dic_[_houdini_]='{}.{}'.format(_init_file_,_extension_dic_[_houdini_])
_init_file__dic_[_hython_]='{}.{}'.format(_init_file_,_extension_dic_[_houdini_])
_init_file__dic_[_mari_]='{}.{}'.format(_init_file_,_extension_dic_[_mari_])
_init_file__dic_[_nuke_]='{}.{}'.format(_init_file_,_extension_dic_[_nuke_])
_init_file__dic_[_resolve_]='null'
_init_file__dic_[_reaper_]='{}.{}'.format(_init_file_,_extension_dic_[_reaper_])
_init_file__dic_[_folder_]='null'
# Stage softs dic
_stage_softs_dic_ = {}
_stage_softs_dic_[_design_]=[_photoshop_, _krita_]
_stage_softs_dic_[_concept_]=[_photoshop_, _krita_]
_stage_softs_dic_[_geo_]=[_maya_, _zbrush_, _marvelous_, _blender_, _houdini_, _3dsmax_]
_stage_softs_dic_[_rig_]=[_maya_, _blender_, _houdini_, _3dsmax_]
_stage_softs_dic_[_texturing_]=[_painter_, _designer_, _mari_, _blender_, _houdini_, _3dsmax_]
_stage_softs_dic_[_shading_]=[_guerilla_, _maya_, _blender_, _houdini_, _3dsmax_]
_stage_softs_dic_[_hair_]=[_maya_, _blender_, _houdini_, _3dsmax_, _maya_yeti_]
_stage_softs_dic_[_layout_]=[_maya_, _blender_, _houdini_, _3dsmax_]
_stage_softs_dic_[_animation_]=[_maya_, _rumba_, _blender_, _houdini_, _3dsmax_]
_stage_softs_dic_[_lighting_]=[_guerilla_, _maya_, _maya_yeti_, _blender_, _houdini_, _3dsmax_]
_stage_softs_dic_[_cfx_]=[_maya_, _blender_, _houdini_, _3dsmax_, _maya_yeti_]
_stage_softs_dic_[_fx_]=[_houdini_, _maya_, _blender_, _3dsmax_]
_stage_softs_dic_[_fx_setup_]=[_houdini_, _maya_, _blender_, _3dsmax_]
_stage_softs_dic_[_compositing_]=[_nuke_, _blender_]
_stage_softs_dic_[_camera_]=[_maya_, _rumba_, _blender_, _houdini_, _3dsmax_]
_stage_softs_dic_[_autorig_]=[_maya_, _blender_]
_stage_softs_dic_[_cam_rig_] =[_maya_, _blender_]
_stage_softs_dic_[_cyclo_] = [_maya_, _blender_, _guerilla_]
_stage_softs_dic_[_gizmo_] = [_nuke_]
_stage_softs_dic_[_light_rig_] = [_maya_, _blender_, _guerilla_]
_stage_softs_dic_[_lut_] = [_nuke_, _guerilla_]
_stage_softs_dic_[_render_graph_] = [_guerilla_]
_stage_softs_dic_[_render_pass_] = [_guerilla_]
_stage_softs_dic_[_scripts_] = [_folder_]
_stage_softs_dic_[_sons_] = [_folder_]
_stage_softs_dic_[_stockshot_] = [_folder_]
_stage_softs_dic_[_video_] = [_folder_]
_stage_softs_dic_[_video_edit_] = [_resolve_]
_stage_softs_dic_[_sound_edit_] = [_reaper_]
_stage_softs_dic_[_material_] = [_designer_, _photoshop_]
_stage_softs_dic_[_painter_template_] = [_painter_]
# Game icons library
_life_progress_0_ = _icon_path_ + 'life0.png'
_life_progress_1_ = _icon_path_ + 'life1.png'
_life_progress_2_ = _icon_path_ + 'life2.png'
_life_progress_3_ = _icon_path_ + 'life3.png'
_life_progress_4_ = _icon_path_ + 'life4.png'
_lvl_progress_0_ = _icon_path_ + 'lvl0.png'
_lvl_progress_1_ = _icon_path_ + 'lvl1.png'
_lvl_progress_2_ = _icon_path_ + 'lvl2.png'
_lvl_progress_3_ = _icon_path_ + 'lvl3.png'
_lvl_progress_4_ = _icon_path_ + 'lvl4.png'
_heart_icon_ = _icon_path_ + 'heart.png'
_xp_icon_ = _icon_path_ + 'xp.png'
_gold_icon_ = _icon_path_ + 'gold.png'
_lvl_icon_ = _icon_path_ + 'l.png'
_cup_icon_ = _icon_path_ + 'cup.png'
_stuff_icon_ = _icon_path_ + 'stuff.png'
_shop_icon_ = _icon_path_ + 'shop.png'
_buy_icon_ = _icon_path_ + 'buy.png'
# Jokes library
_previous_icon_ = _icon_path_ + 'previous.png'
_next_icon_ = _icon_path_ + 'next.png'
_star_empty_icon_ = _icon_path_ + 'star_empty.png'
_star_full_icon_ = _icon_path_ + 'star_full.png'
_star_hover_icon_ = _icon_path_ + 'star_hover.png'
_joke_data_ = 'joke'
_note_key_ = 'note'
_notes_list_key_ = 'note_list'
_users_jury_list_key_ = 'jury_list'
_high_quote_icon_ = _icon_path_ + 'high_quote.png'
_low_quote_icon_ = _icon_path_ + 'low_quote.png'
_police_icon_ = _icon_path_ + 'policeman.png'
_star1_ = 'star1'
_star2_ = 'star2'
_star3_ = 'star3'
_star4_ = 'star4'
_star5_ = 'star5'
_stars_states_dic_ = {}
_stars_states_dic_[0] = []
_stars_states_dic_[1] = [_star1_]
_stars_states_dic_[2] = [_star1_, _star2_]
_stars_states_dic_[3] = [_star1_, _star2_, _star3_]
_stars_states_dic_[4] = [_star1_, _star2_, _star3_, _star4_]
_stars_states_dic_[5] = [_star1_, _star2_, _star3_, _star4_, _star5_]
# Game keys library
_user_avatar_ = 'avatar'
_user_gold_ = 'gold'
_user_level_ = 'level'
_user_xp_ = 'xp'
_user_life_ = 'life'
# Tickets library
_all_tickets_ = 'all_tickets'
_user_tickets_ = 'user_tickets'
_adress_tickets_ = 'adress_tickets'
_assets_tickets_ = 'assets_tickets'
_ticket_state_ = 'state'
_ticket_open_ = 'opened'
_ticket_close_ = 'closed'
_close_date_ = 'close_date'
_ticket_adress_ = 'adress'
_ticket_comment_ = 'comment'
_ticket_close_user_ = 'close_user'
# Wall library
_message_key_ = 'message'
_file_key_ = 'file'
_wall_id_key_ = 'id'
_wall_creation_event_ = 'creation'
_wall_publish_event_ = 'publish'
_wall_xp_event_ = 'xp'
_wall_message_event_ = 'message'
_wall_remove_event_ = 'remove'
_wall_playblast_event_ = 'playblast'
_wall_ticket_event_ = 'ticket'
_wall_close_ticket_event_ = 'close_ticket'
_wall_time_key_ = 'date'
_wall_time_id_key_ = 'time_id'
_asset_key_ = 'asset'
_classic_message_ = 'classic_message_'
_file_message_ = 'file_message_'
_message_id_key_ = 'id'
# Lan keys library
_server_ip_ = 'host_ip'
_create_event_icon_ = _icon_path_ + 'create_event.png'
_remove_event_icon_ = _icon_path_ + 'remove_event.png'
_publish_event_icon_ = _icon_path_ + 'publish_event.png'
_xp_event_icon_ = _icon_path_ + 'level_medal.png'
_event_icon_dic_ = dict()
_event_icon_dic_[_wall_creation_event_] = _create_event_icon_
_event_icon_dic_[_wall_remove_event_] = _remove_event_icon_
_event_icon_dic_[_wall_publish_event_] = _publish_event_icon_
_event_icon_dic_[_wall_playblast_event_] = _publish_event_icon_
_event_icon_dic_[_wall_xp_event_] = _xp_event_icon_
_event_icon_dic_[_wall_ticket_event_] = _xp_event_icon_
_event_icon_dic_[_wall_close_ticket_event_] = _xp_event_icon_
_event_color_dic_ = dict()
_event_color_dic_[_wall_creation_event_] = '#93d1ff'
_event_color_dic_[_wall_remove_event_] = '#ff8383'
_event_color_dic_[_wall_publish_event_] = '#a4ff83'
_event_color_dic_[_wall_xp_event_] = '#faff89'
_event_color_dic_[_wall_playblast_event_] = '#f6ff67'
_event_color_dic_[_wall_ticket_event_] = '#8693FF'
_event_color_dic_[_wall_close_ticket_event_] = '#8693FF'
# Prefs files
_site_ = 'Data/site.wd'
_site_path_ = 'Data/_/'
_site_db_ = 'Data/site.db'
_stats_ = 'Data/'
_jokes_ = 'Data/jokes.wd'
_site_var_ = 'SITE_FILE'
_jokes_var_ = 'JOKES_FILE'
_stats_var_ = 'STATS_FILE'
_asset_var_ = 'ASSET_VAR'
_softwares_scripts_path_ = 'softwares_env/softwares/'
_project_ = 'project.wd'
_project_db_ = 'project.db'
_chat_archives_ = 'chat_archives.wd'
_tree_ = 'tree.wd'
_production_ = 'production.wd'
_user_path_ = '{}/Documents/wizard/'.format(os.getenv("USERPROFILE"))
_screen_records_path_ = _user_path_ + 'screen_records/'
_screen_records_file_ = _screen_records_path_ + 'screen_record.mov'
_clipboard_file_ = _user_path_ + 'clipboard.yaml'
_lock_file_ = _user_path_ + '.lock'
_user_ = _user_path_ + 'user.wd'
_user_db_ = _user_path_ + 'user.db'
_shortcuts_prefs_file_ = _user_path_ + 'shorcuts.pickle'
_user_scripts_file_ = _user_path_ + 'scripts.yaml'
_project_script_file_ = 'scripts.yaml'
_user_custom_scripts_path_ = _user_path_ + 'scripts/'
_project_custom_scripts_path_ = 'scripts/'
_session_file_ = _user_custom_scripts_path_ + 'session.py'
_user_custom_icons_ = _user_path_ + 'custom_icons'
_wall_ = 'wall.log'
_tickets_ = 'tickets.wd'
_chat_ = 'chat.log'
_stylesheet_template_ = ressources_path("ressources/stylesheet/stylesheet_template.st")
_software_path_key_ = "software_path"
_software_additionnal_script_key_ = "software_additionnal_script_path"
_software_additionnal_env_key_ = "software_additionnal_env_paths"
# Scene keys library
_scene_current_asset_ = "scene_current_asset"
# Clipboard keys library
_clipboard_work_scene_path_ = "clipboard_work_scene_path"
_clipboard_reference_list_ = "clipboard_references_list"
# Shortcuts keys library
_screen_record_ = 'screen_record'
# Project settings keys library
_project_name_key_ = 'project_name'
_frame_rate_key_ = 'frame_rate'
_yeti_as_abc_key_ = 'yeti_as_abc'
_format_key_ = 'format'
_color_management_key_ = 'color_management'
_sampling_rate_key_ = 'sampling_rate'
# Main keys library
_creation_date_key_ = 'creation_date'
_creation_user_key_ = 'creation_user'
_creation_id_key_ = 'creation_id'
_asset_events_key_ = 'events'
_events_ = 'events'
_locks_ = 'locks'
_comment_key_ = 'comment'
_softwares_list_key_ = 'softwares_list'
_variants_list_key_ = 'variants_list'
_default_software_key_ = 'default_software'
_variant_references_list_ = 'references_list'
_default_variant_key_ = 'default_variant'
_versions_list_key_ = 'versions_list'
_versions_list_ = 'versions_list_soft'
_export_assets_list_key_ = 'export_assets_list'
_export_asset_key_ = 'export_asset'
_default_export_asset_key_ = 'default_export_asset'
_frame_range_key_ = 'frame_range'
_preroll_key_ = 'preroll'
_postroll_key_ = 'postroll'
_lock_key_ = 'lock'
_run_key_ = 'run'
_software_key_ = 'software'
_from_asset_key_ = "from_asset"
# User | |
rel_addr(value)
if cpu.getStatus(cpu.statusFlags['v']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def CLC_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['c'], 0)
advancePC(cpu, size)
return nCycles
def CLD_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['d'], 0)
advancePC(cpu, size)
return nCycles
def CLI_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['i'], 0)
advancePC(cpu, size)
return nCycles
def CLV_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['v'], 0)
advancePC(cpu, size)
return nCycles
def CMP_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPX_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = cpu.registers['X'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPX_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = cpu.registers['X'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPX_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = cpu.registers['X'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPY_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = cpu.registers['Y'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPY_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = cpu.registers['Y'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPY_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = cpu.registers['Y'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DEC_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEC_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEC_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEC_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEX_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['X']
value = (value - 1) & 0xFF
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEY_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['Y']
value = (value - 1) & 0xFF
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INC_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INC_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INC_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INC_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INX_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['X']
value = (value + 1) & 0xFF
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INY_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['Y']
value = (value + 1) & 0xFF
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def JMP_Absolute(cpu):
size = 3
nCycles = 3
address = addrmodes.Absolute(cpu)
advancePC(cpu, size)
cpu.registers['PC'] = address
return nCycles
def JMP_Indirect(cpu):
size = 3
nCycles = 5
address = addrmodes.Indirect(cpu)
advancePC(cpu, size)
cpu.registers['PC'] = address
return nCycles
def JSR_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
advancePC(cpu, 2)
cpu.pushStack((cpu.registers['PC'] >> 8) & 0xFF)
cpu.pushStack(cpu.registers['PC'] & 0xFF)
cpu.registers['PC'] = address
return nCycles
def LDA_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Absolute(cpu):
size = | |
batched
]
) + tuple( # Validate transformations of a complex matrix
_make_triangular_solve_harness("complex_transformations", dtype=np.complex64,
lower=lower, transpose_a=transpose_a,
conjugate_a=conjugate_a)
for lower in [False, True]
for transpose_a in [False, True]
for conjugate_a in [False, True]
) + tuple( # Validate transformations of a real matrix
_make_triangular_solve_harness("real_transformations", dtype=np.float32,
lower=lower, transpose_a=transpose_a)
for lower in [False, True]
for transpose_a in [False, True]
# conjugate_a is irrelevant for real dtypes, and is thus omitted
)
def _make_linear_solve_harnesses():
def linear_solve(a, b, solve, transpose_solve=None, symmetric=False):
matvec = partial(lax.dot, a, precision=lax.Precision.HIGHEST)
return lax.custom_linear_solve(matvec, b, solve, transpose_solve, symmetric)
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(jnp.linalg.solve(jax.api.jacobian(matvec)(b), b))
def _make_harness(name, *, shape=(4, 4), dtype=np.float32, symmetric=False,
solvers=(explicit_jacobian_solve, explicit_jacobian_solve)):
solve, transpose_solve = solvers
transpose_solve_name = transpose_solve.__name__ if transpose_solve else None
return Harness(f"_{name}_a={jtu.format_shape_dtype_string(shape, dtype)}_b={jtu.format_shape_dtype_string(shape[:-1], dtype)}_solve={solve.__name__}_transposesolve={transpose_solve_name}_symmetric={symmetric}",
linear_solve,
[RandArg(shape, dtype), RandArg(shape[:-1], dtype),
StaticArg(solve), StaticArg(transpose_solve),
StaticArg(symmetric)],
shape=shape,
dtype=dtype,
solve=solve,
transpose_solve=transpose_solve,
symmetric=symmetric)
return tuple( # Validate dtypes
_make_harness("dtypes", dtype=dtype)
for dtype in
jtu.dtypes.all_floating if not dtype in [np.float16, dtypes.bfloat16]
) + tuple( # Validate symmetricity
[_make_harness("symmetric", symmetric=True)]
) + tuple( # Validate removing transpose_solve
[_make_harness("transpose_solve", solvers=(explicit_jacobian_solve, None))]
)
lax_linear_solve = _make_linear_solve_harnesses()
lax_slice = tuple(
Harness(f"_shape={shape}_start_indices={start_indices}_limit_indices={limit_indices}_strides={strides}", # type: ignore
lax.slice,
[RandArg(shape, dtype), # type: ignore
StaticArg(start_indices), # type: ignore
StaticArg(limit_indices), # type: ignore
StaticArg(strides)], # type: ignore
shape=shape, # type: ignore
start_indices=start_indices, # type: ignore
limit_indices=limit_indices) # type: ignore
for shape, start_indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
# out-of-bounds cases
[(5,), (-1,), (0,), None],
[(5,), (-1,), (1,), None],
[(5,), (-4,), (-2,), None],
[(5,), (-5,), (-2,), None],
[(5,), (-6,), (-5,), None],
[(5,), (-10,), (-9,), None],
[(5,), (-100,), (-99,), None],
[(5,), (5,), (6,), None],
[(5,), (10,), (11,), None],
[(5,), (0,), (100,), None],
[(5,), (3,), (6,), None]
]
for dtype in [np.float32]
)
def _make_conj_harness(name, *, shape=(3, 4), dtype=np.float32, **kwargs):
return Harness(f"{name}_operand={jtu.format_shape_dtype_string(shape, dtype)}_kwargs={kwargs}".replace(" ", ""),
lambda x: lax.conj_p.bind(x, **kwargs),
[RandArg(shape, dtype)],
shape=shape,
dtype=dtype,
**kwargs)
lax_conj = tuple( # Validate dtypes
_make_conj_harness("dtypes", dtype=dtype)
for dtype in jtu.dtypes.floating + jtu.dtypes.complex
) + tuple( # Validate kwargs
_make_conj_harness("kwargs", **kwargs)
for kwargs in [
{ "_input_dtype": np.float32 }, # expected kwarg for ad
]
)
# Use lax_slice, but (a) make the start_indices dynamic arg, and (b) no strides.
lax_dynamic_slice = [
Harness(harness.name,
lax.dynamic_slice,
[harness.arg_descriptors[0],
np.array(list(start_indices)),
StaticArg(tuple(map(operator.sub, limit_indices, start_indices)))],
**harness.params)
for harness in lax_slice
for start_indices in [harness.params["start_indices"]]
for limit_indices in [harness.params["limit_indices"]]
]
lax_dynamic_update_slice = tuple(
Harness((f"_operand={jtu.format_shape_dtype_string(shape, dtype)}" # type: ignore
f"_update={jtu.format_shape_dtype_string(update_shape, update_dtype)}"
f"_start_indices={start_indices}"),
lax.dynamic_update_slice,
[RandArg(shape, dtype), # type: ignore
RandArg(update_shape, update_dtype), # type: ignore
np.array(start_indices)], # type: ignore
shape=shape, # type: ignore
start_indices=start_indices, # type: ignore
update_shape=update_shape) # type: ignore
for shape, start_indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
[(3,), (-1,), (1,)], # out-of-bounds
[(3,), (10,), (1,)], # out-of-bounds
[(3,), (10,), (4,)], # out-of-bounds shape too big
[(3,), (10,), (2,)], # out-of-bounds
]
for dtype, update_dtype in [
(np.float32, np.float32),
(np.float64, np.float64)
])
lax_squeeze = tuple(
Harness(f"_inshape={jtu.format_shape_dtype_string(arg_shape, dtype)}_dimensions={dimensions}", # type: ignore
lax.squeeze,
[RandArg(arg_shape, dtype), StaticArg(dimensions)], # type: ignore[has-type]
arg_shape=arg_shape, dtype=dtype, dimensions=dimensions) # type: ignore[has-type]
for arg_shape, dimensions in [
[(1,), (0,)],
[(1,), (-1,)],
[(2, 1, 4), (1,)],
[(2, 1, 4), (-2,)],
[(2, 1, 3, 1), (1,)],
[(2, 1, 3, 1), (1, 3)],
[(2, 1, 3, 1), (3,)],
[(2, 1, 3, 1), (1, -1)],
]
for dtype in [np.float32]
)
shift_inputs = [
(arg, dtype, shift_amount)
for dtype in jtu.dtypes.all_unsigned + jtu.dtypes.all_integer
for arg in [
np.array([-250, -1, 0, 1, 250], dtype=dtype),
]
for shift_amount in [-8, -1, 0, 1, 3, 7, 8, 16, 32, 64]
]
lax_shift_left = tuple(
Harness(f"_dtype={dtype.__name__}_shift_amount={shift_amount}", # type: ignore
lax.shift_left,
[arg, StaticArg(np.array([shift_amount], dtype=dtype))])
for arg, dtype, shift_amount in shift_inputs
)
lax_shift_right_logical = tuple(
Harness(f"_dtype={dtype.__name__}_shift_amount={shift_amount}", # type: ignore
lax.shift_right_logical,
[arg, dtype(shift_amount)],
dtype=dtype)
for arg, dtype, shift_amount in shift_inputs
)
lax_shift_right_arithmetic = tuple(
Harness(f"_dtype={dtype.__name__}_shift_amount={shift_amount}", # type: ignore
lax.shift_right_arithmetic,
[arg, StaticArg(np.array([shift_amount], dtype=dtype))],
dtype=dtype)
for arg, dtype, shift_amount in shift_inputs
)
def _make_select_and_scatter_add_harness(
name, *, shape=(2, 4, 6), dtype=np.float32, select_prim=lax.ge_p,
window_dimensions=(2, 2, 2), window_strides=(1, 1, 1),
padding=((0, 0), (0, 0), (0, 0)), nb_inactive_dims=0):
ones = (1,) * len(shape)
cotangent_shape = jax.api.eval_shape(
lambda x: lax._select_and_gather_add(x, x, lax.ge_p, window_dimensions,
window_strides, padding, ones, ones),
np.ones(shape, dtype)).shape
return Harness(f"{name}_shape={jtu.format_shape_dtype_string(shape, dtype)}_selectprim={select_prim}_windowdimensions={window_dimensions}_windowstrides={window_strides}_padding={padding}",
lax._select_and_scatter_add,
[RandArg(cotangent_shape, dtype), RandArg(shape, dtype),
StaticArg(select_prim), StaticArg(window_dimensions),
StaticArg(window_strides), StaticArg(padding)],
shape=shape,
dtype=dtype,
select_prim=select_prim,
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
# JAX can only run select_and_scatter_add on TPU when 2
# or more dimensions are inactive
run_on_tpu=(nb_inactive_dims >= 2))
lax_select_and_scatter_add = tuple( # Validate dtypes
_make_select_and_scatter_add_harness("dtypes", dtype=dtype)
for dtype in set(jtu.dtypes.all) - set([np.complex64, np.complex128])
) + tuple( # Validate different reduction primitives
_make_select_and_scatter_add_harness("select_prim", select_prim=select_prim)
for select_prim in [lax.le_p]
) + tuple( # Validate padding
_make_select_and_scatter_add_harness("padding", padding=padding)
for padding in [
# TODO(bchetioui): commented out the test based on
# https://github.com/google/jax/issues/4690
#((1, 2), (2, 3), (3, 4)) # non-zero padding
((1, 1), (1, 1), (1, 1)) # non-zero padding
]
) + tuple( # Validate window_dimensions
_make_select_and_scatter_add_harness("window_dimensions",
window_dimensions=window_dimensions)
for window_dimensions in [
(1, 2, 3) # uneven dimensions
]
) + tuple( # Validate window_strides
_make_select_and_scatter_add_harness("window_strides",
window_strides=window_strides)
for window_strides in [
(1, 2, 3) # smaller than/same as/bigger than corresponding window dimension
]
) + tuple( # Validate dtypes on TPU
_make_select_and_scatter_add_harness("tpu_dtypes", dtype=dtype,
nb_inactive_dims=nb_inactive_dims,
window_strides=window_strides,
window_dimensions=window_dimensions)
for dtype in set(jtu.dtypes.all) - set([np.bool_, np.complex64, np.complex128,
np.int8, np.uint8])
for window_strides, window_dimensions, nb_inactive_dims in [
((1, 2, 1), (1, 3, 1), 2)
]
)
def _make_select_and_gather_add_harness(
name, *, shape=(4, 6), dtype=np.float32, select_prim=lax.le_p,
padding='VALID', window_dimensions=(2, 2), window_strides=(1, 1),
base_dilation=(1, 1), window_dilation=(1, 1)):
if isinstance(padding, str):
padding = tuple(lax.padtype_to_pads(shape, window_dimensions,
window_strides, padding))
return Harness(f"{name}_shape={jtu.format_shape_dtype_string(shape, dtype)}_selectprim={select_prim}_windowdimensions={window_dimensions}_windowstrides={window_strides}_padding={padding}_basedilation={base_dilation}_windowdilation={window_dilation}",
lax._select_and_gather_add,
[RandArg(shape, dtype), RandArg(shape, dtype),
StaticArg(select_prim), StaticArg(window_dimensions),
StaticArg(window_strides), StaticArg(padding),
StaticArg(base_dilation), StaticArg(window_dilation)],
shape=shape,
dtype=dtype,
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation)
lax_select_and_gather_add = tuple( # Validate dtypes
_make_select_and_gather_add_harness("dtypes", dtype=dtype)
for dtype in jtu.dtypes.all_floating
) + tuple( # Validate selection primitives
[_make_select_and_gather_add_harness("select_prim", select_prim=lax.ge_p)]
) + tuple( # Validate window dimensions
_make_select_and_gather_add_harness("window_dimensions",
window_dimensions=window_dimensions)
for window_dimensions in [(2, 3)]
) + tuple( # Validate window strides
_make_select_and_gather_add_harness("window_strides",
window_strides=window_strides)
for window_strides in [(2, 3)]
) + tuple( # Validate padding
_make_select_and_gather_add_harness("padding", padding=padding)
for padding in ['SAME']
) + tuple( # Validate dilations
_make_select_and_gather_add_harness("dilations", base_dilation=base_dilation,
window_dilation=window_dilation)
for base_dilation, window_dilation in [
((2, 3), (1, 1)), # base dilation, no window dilation
((1, 1), (2, 3)), # no base dilation, window dilation
((2, 3), (3, 2)) # base dilation, window dilation
]
)
def _make_reduce_window_harness(name, *, shape=(4, 6), base_dilation=(1, 1),
computation=lax.add, window_dimensions=(2, 2),
window_dilation=(1, 1), init_value=0,
window_strides=(1, 1), dtype=np.float32,
padding=((0, 0), (0, 0))):
return Harness(f"{name}_shape={jtu.format_shape_dtype_string(shape, dtype)}_initvalue={init_value}_computation={computation.__name__}_windowdimensions={window_dimensions}_windowstrides={window_strides}_padding={padding}_basedilation={base_dilation}_windowdilation={window_dilation}".replace(' ', ''),
lax.reduce_window,
[RandArg(shape, dtype),
StaticArg(np.array(init_value, dtype=dtype)), # Must be static to trigger the picking of the reducers
StaticArg(computation), StaticArg(window_dimensions),
StaticArg(window_strides), StaticArg(padding),
StaticArg(base_dilation), StaticArg(window_dilation)],
shape=shape,
dtype=dtype,
init_value=np.array(init_value, dtype=dtype),
computation=computation,
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation)
lax_reduce_window = tuple( # Validate dtypes across all execution paths
# This first harness runs the tests for all dtypes using default values for
# the other parameters (outside of computation and its init_value), through
# several execution paths. Variations of other parameters can thus safely
# skip testing their corresponding default value.
_make_reduce_window_harness("dtypes", dtype=dtype, computation=computation,
init_value=init_value)
for dtype in jtu.dtypes.all
for computation, init_value in [
(lax.min, _get_min_identity(dtype)), # path through reduce_window_min
(lax.max, _get_max_identity(dtype)), # path through TF reduce_window_max
(lax.max, 1), # path through reduce_window
] + ([
(lax.add, 0), # path_through reduce_window_sum
(lax.mul, 1), # path through reduce_window
] if dtype != jnp.bool_ else [])
) + tuple( # Validate window_dimensions
_make_reduce_window_harness("window_dimensions",
window_dimensions=window_dimensions)
for window_dimensions in [(1, 1)]
) + tuple( # Validate window_strides
_make_reduce_window_harness("window_strides", window_strides=window_strides)
for window_strides in [(1, 2)]
) + tuple( # Validate padding
[_make_reduce_window_harness("padding", padding=((1, 2), (0, 3)))]
) + tuple( # Validate base_dilation
_make_reduce_window_harness("base_dilation", base_dilation=base_dilation)
for base_dilation in [(1, 2)]
) + tuple( # Validate window_dilation
_make_reduce_window_harness("window_dilation",
window_dilation=window_dilation)
for window_dilation in [(1, 2)]
) + tuple( # Validate squeezing behavior and dimensions in tf.nn.max_pool
_make_reduce_window_harness("squeeze_dim", computation=lax.max, shape=shape,
dtype=np.float32, init_value=-np.inf,
base_dilation=tuple([1] * len(shape)),
window_dilation=tuple([1] * len(shape)),
padding=tuple([(0, 0)] * len(shape)),
window_strides=tuple([1] * len(shape)),
window_dimensions=window_dimensions)
for shape, window_dimensions in [
((2,), (2,)), # 1 spatial dimension, left and right squeeze
((2, 1), (2, | |
True
PERFORMANCE WARNING: if keepAppearance is false, then this does not properly
reuse OpenGL display lists. A better approach to changing the robot's
appearances is to set the link Appearance's directly.
"""
return _robotsim.RobotModel_drawGL(self, keepAppearance)
__swig_setmethods__["world"] = _robotsim.RobotModel_world_set
__swig_getmethods__["world"] = _robotsim.RobotModel_world_get
if _newclass:
world = _swig_property(_robotsim.RobotModel_world_get, _robotsim.RobotModel_world_set)
__swig_setmethods__["index"] = _robotsim.RobotModel_index_set
__swig_getmethods__["index"] = _robotsim.RobotModel_index_get
if _newclass:
index = _swig_property(_robotsim.RobotModel_index_get, _robotsim.RobotModel_index_set)
__swig_setmethods__["robot"] = _robotsim.RobotModel_robot_set
__swig_getmethods__["robot"] = _robotsim.RobotModel_robot_get
if _newclass:
robot = _swig_property(_robotsim.RobotModel_robot_get, _robotsim.RobotModel_robot_set)
__swig_setmethods__["dirty_dynamics"] = _robotsim.RobotModel_dirty_dynamics_set
__swig_getmethods__["dirty_dynamics"] = _robotsim.RobotModel_dirty_dynamics_get
if _newclass:
dirty_dynamics = _swig_property(_robotsim.RobotModel_dirty_dynamics_get, _robotsim.RobotModel_dirty_dynamics_set)
__swig_destroy__ = _robotsim.delete_RobotModel
__del__ = lambda self: None
RobotModel_swigregister = _robotsim.RobotModel_swigregister
RobotModel_swigregister(RobotModel)
class RigidObjectModel(_object):
"""
A rigid movable object.
A rigid object has a name, geometry, appearance, mass, surface properties, and
current transform / velocity.
State is retrieved/set using get/setTransform, and get/setVelocity
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, RigidObjectModel, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, RigidObjectModel, name)
__repr__ = _swig_repr
def __init__(self):
"""
Returns:
(:class:`~klampt.RigidObjectModel`):
"""
this = _robotsim.new_RigidObjectModel()
try:
self.this.append(this)
except Exception:
self.this = this
def loadFile(self, fn):
"""
Loads the object from a file.
Args:
fn (str)
Returns:
(bool):
"""
return _robotsim.RigidObjectModel_loadFile(self, fn)
def saveFile(self, fn, geometryName=None):
"""
Saves the object. If geometryName is given, the geometry is saved to that file.
saveFile (fn,geometryName=None): bool
saveFile (fn): bool
Args:
fn (str):
geometryName (str, optional): default value None
Returns:
(bool):
"""
return _robotsim.RigidObjectModel_saveFile(self, fn, geometryName)
def getID(self):
"""
Returns the ID of the rigid object in its world (Note: not the same as the rigid
object index)
Returns:
(int):
"""
return _robotsim.RigidObjectModel_getID(self)
def getName(self):
"""
Returns:
(str):
"""
return _robotsim.RigidObjectModel_getName(self)
def setName(self, name):
"""
Args:
name (str)
"""
return _robotsim.RigidObjectModel_setName(self, name)
def geometry(self):
"""
Returns a reference to the geometry associated with this object.
Returns:
(:class:`~klampt.Geometry3D`):
"""
return _robotsim.RigidObjectModel_geometry(self)
def appearance(self):
"""
Returns a reference to the appearance associated with this object.
Returns:
(:class:`~klampt.Appearance`):
"""
return _robotsim.RigidObjectModel_appearance(self)
def getMass(self):
"""
Returns a copy of the Mass of this rigid object. Note: to change the mass
properties, you should call m=object.getMass(), change the desired properties in
m, and then object.setMass(m)
Returns:
(:class:`~klampt.Mass`):
"""
return _robotsim.RigidObjectModel_getMass(self)
def setMass(self, mass):
"""
Args:
mass (:class:`~klampt.Mass`)
"""
return _robotsim.RigidObjectModel_setMass(self, mass)
def getContactParameters(self):
"""
Returns a copy of the ContactParameters of this rigid object.
Returns:
(:class:`~klampt.ContactParameters`):
Note: to change the contact parameters, you should call
`p=object.getContactParameters()`, change the desired properties in p, and then
`object.setContactParameters(p)`
"""
return _robotsim.RigidObjectModel_getContactParameters(self)
def setContactParameters(self, params):
"""
Args:
params (:class:`~klampt.ContactParameters`)
"""
return _robotsim.RigidObjectModel_setContactParameters(self, params)
def getTransform(self):
"""
Retrieves the rotation / translation of the rigid object (R,t)
"""
return _robotsim.RigidObjectModel_getTransform(self)
def setTransform(self, R, t):
"""
Sets the rotation / translation (R,t) of the rigid object.
Args:
R (:obj:`list of 9 floats (so3 element)`)
t (:obj:`list of 3 floats`)
"""
return _robotsim.RigidObjectModel_setTransform(self, R, t)
def getVelocity(self):
"""
Retrieves the (angular velocity, velocity) of the rigid object.
"""
return _robotsim.RigidObjectModel_getVelocity(self)
def setVelocity(self, angularVelocity, velocity):
"""
Sets the (angular velocity, velocity) of the rigid object.
Args:
angularVelocity (:obj:`list of 3 floats`)
velocity (:obj:`list of 3 floats`)
"""
return _robotsim.RigidObjectModel_setVelocity(self, angularVelocity, velocity)
def drawGL(self, keepAppearance=True):
"""
Draws the object's geometry. If keepAppearance=true, the current appearance is
honored. Otherwise, only the raw geometry is drawn.
drawGL (keepAppearance=True)
drawGL ()
Args:
keepAppearance (bool, optional): default value True
PERFORMANCE WARNING: if keepAppearance is false, then this does not properly
reuse OpenGL display lists. A better approach is to change the object's
Appearance directly.
"""
return _robotsim.RigidObjectModel_drawGL(self, keepAppearance)
__swig_setmethods__["world"] = _robotsim.RigidObjectModel_world_set
__swig_getmethods__["world"] = _robotsim.RigidObjectModel_world_get
if _newclass:
world = _swig_property(_robotsim.RigidObjectModel_world_get, _robotsim.RigidObjectModel_world_set)
__swig_setmethods__["index"] = _robotsim.RigidObjectModel_index_set
__swig_getmethods__["index"] = _robotsim.RigidObjectModel_index_get
if _newclass:
index = _swig_property(_robotsim.RigidObjectModel_index_get, _robotsim.RigidObjectModel_index_set)
__swig_setmethods__["object"] = _robotsim.RigidObjectModel_object_set
__swig_getmethods__["object"] = _robotsim.RigidObjectModel_object_get
if _newclass:
object = _swig_property(_robotsim.RigidObjectModel_object_get, _robotsim.RigidObjectModel_object_set)
__swig_destroy__ = _robotsim.delete_RigidObjectModel
__del__ = lambda self: None
RigidObjectModel_swigregister = _robotsim.RigidObjectModel_swigregister
RigidObjectModel_swigregister(RigidObjectModel)
class TerrainModel(_object):
"""
Static environment geometry.
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TerrainModel, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TerrainModel, name)
__repr__ = _swig_repr
def __init__(self):
"""
Returns:
(:obj:`TerrainModel`):
"""
this = _robotsim.new_TerrainModel()
try:
self.this.append(this)
except Exception:
self.this = this
def loadFile(self, fn):
"""
Loads the terrain from a file.
Args:
fn (str)
Returns:
(bool):
"""
return _robotsim.TerrainModel_loadFile(self, fn)
def saveFile(self, fn, geometryName=None):
"""
Saves the terrain. If geometryName is given, the geometry is saved to that file.
saveFile (fn,geometryName=None): bool
saveFile (fn): bool
Args:
fn (str):
geometryName (str, optional): default value None
Returns:
(bool):
"""
return _robotsim.TerrainModel_saveFile(self, fn, geometryName)
def getID(self):
"""
Returns the ID of the terrain in its world (Note: not the same as the terrain
index)
Returns:
(int):
"""
return _robotsim.TerrainModel_getID(self)
def getName(self):
"""
Returns:
(str):
"""
return _robotsim.TerrainModel_getName(self)
def setName(self, name):
"""
Args:
name (str)
"""
return _robotsim.TerrainModel_setName(self, name)
def geometry(self):
"""
Returns a reference to the geometry associated with this object.
Returns:
(:class:`~klampt.Geometry3D`):
"""
return _robotsim.TerrainModel_geometry(self)
def appearance(self):
"""
Returns a reference to the appearance associated with this object.
Returns:
(:class:`~klampt.Appearance`):
"""
return _robotsim.TerrainModel_appearance(self)
def setFriction(self, friction):
"""
Changes the friction coefficient for this terrain.
Args:
friction (float)
"""
return _robotsim.TerrainModel_setFriction(self, friction)
def drawGL(self, keepAppearance=True):
"""
Draws the object's geometry. If keepAppearance=true, the current appearance is
honored. Otherwise, only the raw geometry is drawn.
drawGL (keepAppearance=True)
drawGL ()
Args:
keepAppearance (bool, optional): default value True
PERFORMANCE WARNING: if keepAppearance is false, then this does not properly
reuse OpenGL display lists. A better approach is to change the object's
Appearance directly.
"""
return _robotsim.TerrainModel_drawGL(self, keepAppearance)
__swig_setmethods__["world"] = _robotsim.TerrainModel_world_set
__swig_getmethods__["world"] = _robotsim.TerrainModel_world_get
if _newclass:
world = _swig_property(_robotsim.TerrainModel_world_get, _robotsim.TerrainModel_world_set)
__swig_setmethods__["index"] = _robotsim.TerrainModel_index_set
__swig_getmethods__["index"] = _robotsim.TerrainModel_index_get
if _newclass:
index = _swig_property(_robotsim.TerrainModel_index_get, _robotsim.TerrainModel_index_set)
__swig_setmethods__["terrain"] = _robotsim.TerrainModel_terrain_set
__swig_getmethods__["terrain"] = _robotsim.TerrainModel_terrain_get
if _newclass:
terrain = _swig_property(_robotsim.TerrainModel_terrain_get, _robotsim.TerrainModel_terrain_set)
__swig_destroy__ = _robotsim.delete_TerrainModel
__del__ = lambda self: None
TerrainModel_swigregister = _robotsim.TerrainModel_swigregister
TerrainModel_swigregister(TerrainModel)
class WorldModel(_object):
"""
The main world class, containing robots, rigid objects, and static environment
geometry.
Note that this is just a model and can be changed at will -- in fact planners
and simulators will make use of a model to "display" computed
Every robot/robot link/terrain/rigid object is given a unique ID in the world.
This is potentially a source of confusion because some functions take IDs and
some take indices. Only the WorldModel and Simulator classes use IDs when the
argument has 'id' as a suffix, e.g., geometry(), appearance(),
Simulator.inContact(). All other functions use indices, e.g. robot(0),
terrain(0), etc.
To get an object's ID, you can see the value returned by loadElement and/or
object.getID(). states.
To save/restore the state of the model, you must manually maintain copies of the
states of whichever objects you wish to save/restore.
C++ includes: robotmodel.h
"""
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, WorldModel, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, WorldModel, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
Creates a WorldModel.
__init__ (): :class:`~klampt.WorldModel`
__init__ (ptrRobotWorld): :class:`~klampt.WorldModel`
__init__ (w): :class:`~klampt.WorldModel`
__init__ (fn): :class:`~klampt.WorldModel`
Args:
ptrRobotWorld (:obj:`void`, optional):
w (:class:`~klampt.WorldModel`, optional):
fn (str, optional):
* Given no arguments, creates a new world.
* Given another WorldModel instance, creates a reference to an existing world.
(To create a copy, use the copy() method.)
* Given a string, loads from a file. A PyException is raised on failure.
* Given a pointer to a C++ RobotWorld structure, a reference to that structure
is returned. (This is advanced usage, seen only when interfacing C++ and
Python code)
"""
this = _robotsim.new_WorldModel(*args)
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _robotsim.delete_WorldModel
__del__ = lambda self: None
def copy(self):
"""
Creates a copy of the world model. Note that geometries and appearances are
shared...
Returns:
(:class:`~klampt.WorldModel`):
"""
return _robotsim.WorldModel_copy(self)
def readFile(self, | |
<reponame>EUFAR/asmm-eufar<filename>functions/asmm_xml.py
import datetime
import xml.dom.minidom
import logging
from PyQt5 import QtCore, QtWidgets
from functions.button_functions import add_read
NAMESPACE_URI = 'http://www.eufar.net/ASMM'
def create_asmm_xml(self, out_file_name):
logging.debug('asmm_xml.py - create_asmm_xml - out_file_name ' + out_file_name)
doc = xml.dom.minidom.Document()
doc_root = add_element(doc, "MissionMetadata", doc)
doc_root.setAttribute("xmlns:asmm", NAMESPACE_URI)
current_date = datetime.date.isoformat(datetime.date.today())
if not self.create_date:
self.create_date = current_date
add_element(doc, "CreationDate", doc_root, self.create_date)
add_element(doc, "RevisionDate", doc_root, current_date)
############################
# Flight Information
############################
flightInformation = add_element(doc, "FlightInformation", doc_root)
add_element(doc, "FlightNumber", flightInformation, self.flightNumber_ln.text())
add_element(doc, "Date", flightInformation, self.date_dt.date().toString(QtCore.Qt.ISODate))
add_element(doc, "ProjectAcronym", flightInformation, self.projectAcronym_ln.text())
add_element(doc, "MissionScientist", flightInformation, self.missionSci_ln.text())
add_element(doc, "FlightManager", flightInformation, self.flightManager_ln.text())
operator = self.operator_cb.currentText()
aircraft = self.aircraft_cb.currentText()
country = ''
manufacturer = ''
registration = ''
if operator == 'Other...':
operator = self.newOperator_ln.text()
aircraft = self.newAircraft_ln.text()
registration = self.newRegistration_ln.text()
manufacturer = self.newManufacturer_ln.text()
if self.newCountry_cb.currentText() != 'Make a choice...':
country = self.newCountry_cb.currentText()
elif operator != 'Make a choice...':
if aircraft != 'Make a choice...':
index = -1
index = aircraft.find(' - ')
if (index != -1):
registration = aircraft[index + 3:]
if len(registration) > 3:
aircraft = aircraft[0:index]
for i in range(len(self.new_operators_aircraft)):
if registration != '' and len(registration) > 3:
if registration == self.new_operators_aircraft[i][2]:
index = self.new_operators_aircraft[i][1].find(', ');
manufacturer = self.new_operators_aircraft[i][1][: index]
country = self.new_operators_aircraft[i][3]
break
else:
index = self.new_operators_aircraft[i][1].find(', ');
aircraft_from_table = self.new_operators_aircraft[i][1][index + 2:]
if aircraft == aircraft_from_table:
manufacturer = self.new_operators_aircraft[i][1][: index]
country = self.new_operators_aircraft[i][3]
registration = self.new_operators_aircraft[i][2]
break
else:
aircraft = ''
else:
operator = ''
aircraft = ''
for key, value in self.new_country_code.items():
if value == country:
country = key
break
add_element(doc, "Platform", flightInformation, aircraft)
add_element(doc, "Operator", flightInformation, operator)
add_element(doc, "OperatorCountry", flightInformation, country)
add_element(doc, "Manufacturer", flightInformation, manufacturer)
add_element(doc, "RegistrationNumber", flightInformation, registration)
if self.location_cb.currentText() == "Make a choice...":
add_element(doc, "Localisation", flightInformation, "")
elif self.detailList.currentText() == "Make a choice...":
add_element(doc, "Localisation", flightInformation, "")
else:
add_element(doc, "Localisation", flightInformation, self.detailList.currentText())
###########################
# Metadata Contact Info
###########################
contactInfo = add_element(doc, "ContactInfo", doc_root)
add_element(doc, "ContactName", contactInfo, self.contactName_ln.text())
if self.contact_cb.currentText() == 'Make a choice...':
add_element(doc, "ContactRole", contactInfo, '')
else:
add_element(doc, "ContactRole", contactInfo, self.contact_cb.currentText())
add_element(doc, "ContactEmail", contactInfo, self.contactEmail_ln.text())
############################
# Scientific Aims
############################
scientificAims = add_element(doc, "ScientificAims", doc_root)
add_check_elements(doc, self.scientific_aims_check_dict, "SA_Code", scientificAims)
if self.sa_ck_list:
for i in range(self.gridLayout_5.count()):
if isinstance(self.gridLayout_5.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_5.itemAt(i).widget().isChecked():
add_element(doc,"SA_User", scientificAims, self.gridLayout_5.itemAt(i).widget().
text())
add_element(doc, "SA_Other", scientificAims, self.SAOtherTextBox.toPlainText())
############################
# Geographical Region
############################
geographicalRegion = add_element(doc, "GeographicalRegion", doc_root)
geographicBoundingBox = add_element(doc, "GeographicBoundingBox", geographicalRegion)
add_element(doc, "westBoundLongitude", geographicBoundingBox, self.westBoundLongitudeLine.text())
add_element(doc, "eastBoundLongitude", geographicBoundingBox, self.eastBoundLongitudeLine.text())
add_element(doc, "northBoundLatitude", geographicBoundingBox, self.northBoundLatitudeLine.text())
add_element(doc, "southBoundLatitude", geographicBoundingBox, self.southBoundLatitudeLine.text())
add_element(doc, "minAltitude", geographicBoundingBox, self.minAltitudeLine.text())
add_element(doc, "maxAltitude", geographicBoundingBox, self.maxAltitudeLine.text())
add_check_elements(doc, self.geographical_region_check_dict, "GR_Code", geographicalRegion)
if self.gr_ck_list:
for i in range(self.gridLayout_8.count()):
if isinstance(self.gridLayout_8.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_8.itemAt(i).widget().isChecked():
add_element(doc,"GR_User", geographicalRegion, self.gridLayout_8.itemAt(i).
widget().text())
add_element(doc, "GR_Other", geographicalRegion, self.GROtherTextBox.toPlainText())
############################
# Atmospheric Features
############################
atmosphericFeatures = add_element(doc, "AtmosFeatures", doc_root)
add_check_elements(doc, self.atmospheric_features_check_dict, "AF_Code", atmosphericFeatures)
if self.af_ck_list:
for i in range(self.gridLayout_9.count()):
if isinstance(self.gridLayout_9.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_9.itemAt(i).widget().isChecked():
add_element(doc,"AF_User", atmosphericFeatures, self.gridLayout_9.itemAt(i).
widget().text())
add_element(doc, "AF_Other", atmosphericFeatures, self.AFOtherTextBox.toPlainText())
############################
# Cloud Types
############################
cloudTypes = add_element(doc, "CloudTypes", doc_root)
add_check_elements(doc, self.cloud_types_check_dict, "CT_Code", cloudTypes)
if self.ct_ck_list:
for i in range(self.gridLayout_10.count()):
if isinstance(self.gridLayout_10.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_10.itemAt(i).widget().isChecked():
add_element(doc,"CT_User", cloudTypes, self.gridLayout_10.itemAt(i).widget().
text())
add_element(doc, "CT_Other", cloudTypes, self.CTOtherTextBox.toPlainText())
############################
# Particles Sampled
############################
particlesSampled = add_element(doc, "ParticlesSampled", doc_root)
add_check_elements(doc, self.particles_sampled_check_dict, "PS_Code", particlesSampled)
if self.ps_ck_list:
for i in range(self.gridLayout_11.count()):
if isinstance(self.gridLayout_11.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_11.itemAt(i).widget().isChecked():
add_element(doc,"PS_User", particlesSampled, self.gridLayout_11.itemAt(i).
widget().text())
add_element(doc, "PS_Other", particlesSampled, self.PSOtherTextBox.toPlainText())
############################
# Surfaces Overflown
############################
surfacesOverflown = add_element(doc, "SurfacesOverflown", doc_root)
add_check_elements(doc, self.surfaces_overflown_check_dict, "SO_Code", surfacesOverflown)
if self.so_ck_list:
for i in range(self.gridLayout_13.count()):
if isinstance(self.gridLayout_13.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_13.itemAt(i).widget().isChecked():
add_element(doc,"SO_User", surfacesOverflown, self.gridLayout_13.itemAt(i).
widget().text())
add_element(doc, "SO_Other", surfacesOverflown, self.SOOtherTextBox.toPlainText())
############################
# Altitude Ranges
############################
altitudeRanges = add_element(doc, "AltitudeRanges", doc_root)
add_check_elements(doc, self.altitude_ranges_check_dict, "AR_Code", altitudeRanges)
if self.ar_ck_list:
for i in range(self.gridLayout_14.count()):
if isinstance(self.gridLayout_14.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_14.itemAt(i).widget().isChecked():
add_element(doc,"AR_User", altitudeRanges, self.gridLayout_14.itemAt(i).
widget().text())
add_element(doc, "AR_Other", altitudeRanges, self.AROtherTextBox.toPlainText())
############################
# Flight Types
############################
flightTypes = add_element(doc, "FlightTypes", doc_root)
add_check_elements(doc, self.flight_types_check_dict, "FT_Code", flightTypes)
if self.fm_ck_list:
for i in range(self.gridLayout_15.count()):
if isinstance(self.gridLayout_15.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_15.itemAt(i).widget().isChecked():
add_element(doc,"FT_User", flightTypes, self.gridLayout_15.itemAt(i).widget().
text())
add_element(doc, "FT_Other", flightTypes, self.FTOtherTextBox.toPlainText())
############################
# Satellite coordination
############################
satelliteCoordination = add_element(doc, "SatelliteCoordination", doc_root)
add_check_elements(doc, self.satellite_coordination_check_dict, "SC_Code", satelliteCoordination)
if self.sc_ck_list:
for i in range(self.gridLayout_25.count()):
if isinstance(self.gridLayout_25.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_25.itemAt(i).widget().isChecked():
add_element(doc,"SC_User", satelliteCoordination, self.gridLayout_25.itemAt(i).
widget().text())
add_element(doc, "SC_Other", satelliteCoordination, self.SCOtherTextBox.toPlainText())
############################
# Surface Observations
############################
surfaceObs = add_element(doc, "SurfaceObs", doc_root)
for item in self.ground_site_list:
add_element(doc, "GroundSite", surfaceObs, item)
for item in self.research_vessel_list:
add_element(doc, "ResearchVessel", surfaceObs, item)
for item in self.arm_site_list:
add_element(doc, "ArmSite", surfaceObs, item)
for item in self.arm_mobile_list:
add_element(doc, "ArmMobile", surfaceObs, item)
############################
# Other Comments
############################
if self.OtherCommentsTextBox.toPlainText():
add_element(doc, "OtherComments", doc_root, self.OtherCommentsTextBox.toPlainText())
############################
# File Creation
############################
f = open(out_file_name, 'w')
f.write(doc.toprettyxml())
f.close()
self.saved = True
self.modified = False
logging.debug('asmm_xml.py - create_asmm_xml - file created successfully')
def read_asmm_xml(self, in_file_name):
logging.debug('asmm_xml.py - read_asmm_xml - out_file_name ' + in_file_name)
self.reset_all_fields()
f = open(in_file_name, 'r')
doc = xml.dom.minidom.parse(f)
############################
# Flight Information
############################
self.create_date = get_element_value(doc, "CreationDate")
flightInformation = get_element(doc, "FlightInformation")
set_text_value(self.flightNumber_ln, flightInformation, "FlightNumber")
date = get_element_value(flightInformation, "Date")
self.date_dt.setDate(QtCore.QDate.fromString(date, QtCore.Qt.ISODate))
set_text_value(self.projectAcronym_ln, flightInformation, "ProjectAcronym")
set_text_value(self.missionSci_ln, flightInformation, "MissionScientist")
set_text_value(self.flightManager_ln, flightInformation, "FlightManager")
operator = get_element_value(flightInformation, "Operator")
aircraft = get_element_value(flightInformation, "Platform")
registration = get_element_value(flightInformation, "RegistrationNumber")
aircraft_found = False
if registration:
for i in range(len(self.new_operators_aircraft)):
if registration == self.new_operators_aircraft[i][2]:
aircraft_found = True
self.operator_cb.setCurrentIndex(self.operator_cb.findText(operator))
self.operator_changed()
index = self.aircraft_cb.findText(aircraft)
if index != -1:
self.aircraft_cb.setCurrentIndex(index)
else:
index = self.aircraft_cb.findText(aircraft + ' - ' + registration)
self.aircraft_cb.setCurrentIndex(index)
break
if not aircraft_found:
self.operator_cb.setCurrentIndex(1)
self.operator_changed()
self.newOperator_ln.setText(operator)
self.newAircraft_ln.setText(aircraft)
self.newRegistration_ln.setText(registration)
self.newManufacturer_ln.setText(get_element_value(flightInformation, "Manufacturer"))
if get_element_value(flightInformation, "OperatorCountry"):
self.newCountry_cb.setCurrentIndex(self.newCountry_cb.findText(get_element_value(flightInformation, "OperatorCountry")))
else:
self.operator_cb.setCurrentIndex(1)
self.operator_changed()
self.newOperator_ln.setText(operator)
self.newAircraft_ln.setText(aircraft)
self.newRegistration_ln.setText(registration)
self.newManufacturer_ln.setText(get_element_value(flightInformation, "Manufacturer"))
if get_element_value(flightInformation, "OperatorCountry"):
index = self.newCountry_cb.findText(get_element_value(flightInformation, "OperatorCountry"))
if index != -1:
self.newCountry_cb.setCurrentIndex(index)
combo_text = get_element_value(flightInformation, "Localisation")
if combo_text != None:
if combo_text in self.countries:
self.location_cb.setCurrentIndex(self.location_cb.findText("Countries"))
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.countries)
self.detailList.setCurrentIndex(self.detailList.findText(combo_text))
elif combo_text in self.continents:
self.location_cb.setCurrentIndex(self.location_cb.findText("Continents"))
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.continents)
self.detailList.setCurrentIndex(self.detailList.findText(combo_text))
elif combo_text in self.oceans:
self.location_cb.setCurrentIndex(self.location_cb.findText("Oceans"))
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.oceans)
self.detailList.setCurrentIndex(self.detailList.findText(combo_text))
elif combo_text in self.regions:
self.location_cb.setCurrentIndex(self.location_cb.findText("Regions"))
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.regions)
self.detailList.setCurrentIndex(self.detailList.findText(combo_text))
#############################
# Metadata Contact Info
#############################
contactInfo = get_element(doc, "ContactInfo")
set_text_value(self.contactName_ln, contactInfo, "ContactName")
set_text_value(self.contactEmail_ln, contactInfo, "ContactEmail")
combo_text = get_element_value(contactInfo, "ContactRole")
if combo_text != None:
self.contact_cb.setCurrentIndex(self.contact_cb.findText(combo_text))
#############################
# Scientific Aims
#############################
scientificAims = get_element(doc, "ScientificAims")
try:
set_check_values(self.scientific_aims_check_dict, scientificAims, "SA_Code")
except IndexError:
set_check_values(self.old_scientific_aims_check_dict, scientificAims, "SA_Code")
set_text_value(self.SAOtherTextBox, scientificAims, "SA_Other")
values = get_element_values(scientificAims, "SA_User")
for item in values:
add_read(self, "SA", item)
#############################
# Geographical Region
#############################
geographicalRegion = get_element(doc, "GeographicalRegion")
geographicBoundingBox = get_element(geographicalRegion, "GeographicBoundingBox")
set_text_value_coord(self, self.westBoundLongitudeLine, geographicBoundingBox, "westBoundLongitude")
set_text_value_coord(self, self.eastBoundLongitudeLine, geographicBoundingBox, "eastBoundLongitude")
set_text_value_coord(self, self.northBoundLatitudeLine, geographicBoundingBox, "northBoundLatitude")
set_text_value_coord(self, self.southBoundLatitudeLine, geographicBoundingBox, "southBoundLatitude")
set_text_value_coord(self, self.minAltitudeLine, geographicBoundingBox, "minAltitude")
set_text_value_coord(self, self.maxAltitudeLine, geographicBoundingBox, "maxAltitude")
try:
set_check_values(self.geographical_region_check_dict, geographicalRegion, "GR_Code")
except IndexError:
set_check_values(self.old_geographical_region_check_dict, geographicalRegion, "GR_Code")
set_text_value(self.GROtherTextBox, geographicalRegion, "GR_Other")
values = get_element_values(geographicalRegion, "GR_User")
for item in values:
add_read(self, "GR", item)
#############################
# Atmospheric Features
#############################
atmosphericFeatures = get_element(doc, "AtmosFeatures")
try:
set_check_values(self.atmospheric_features_check_dict, atmosphericFeatures, "AF_Code")
except IndexError:
set_check_values(self.old_atmospheric_features_check_dict, atmosphericFeatures, "AF_Code")
set_text_value(self.AFOtherTextBox, atmosphericFeatures, "AF_Other")
values = get_element_values(atmosphericFeatures, "AF_User")
for item in values:
add_read(self, "AF", item)
#############################
# Cloud Types
#############################
cloudTypes = get_element(doc, "CloudTypes")
try:
set_check_values(self.cloud_types_check_dict, cloudTypes, "CT_Code")
except IndexError:
set_check_values(self.old_cloud_types_check_dict, cloudTypes, "CT_Code")
set_text_value(self.CTOtherTextBox, cloudTypes, "CT_Other")
values = get_element_values(cloudTypes, "CT_User")
for item in values:
add_read(self, "CT", item)
#############################
# Particles Sampled
#############################
particlesSampled = get_element(doc, "ParticlesSampled")
try:
set_check_values(self.particles_sampled_check_dict, particlesSampled, "PS_Code")
except IndexError:
set_check_values(self.old_particles_sampled_check_dict, particlesSampled, "PS_Code")
set_text_value(self.PSOtherTextBox, particlesSampled, "PS_Other")
values = get_element_values(particlesSampled, "PS_User")
for item in values:
add_read(self, "PS", item)
#############################
# Surfaces Overflown
#############################
surfacesOverflown = get_element(doc, "SurfacesOverflown")
try:
set_check_values(self.surfaces_overflown_check_dict, surfacesOverflown, "SO_Code")
except IndexError:
set_check_values(self.old_surfaces_overflown_check_dict, surfacesOverflown, "SO_Code")
set_text_value(self.SOOtherTextBox, surfacesOverflown, "SO_Other")
values = get_element_values(surfacesOverflown, "SO_User")
for item in values:
add_read(self, "SO", item)
#############################
# Altitude Ranges
#############################
altitudeRanges = get_element(doc, "AltitudeRanges")
try:
set_check_values(self.altitude_ranges_check_dict, altitudeRanges, "AR_Code")
except IndexError:
set_check_values(self.old_altitude_ranges_check_dict, altitudeRanges, "AR_Code")
set_text_value(self.AROtherTextBox, altitudeRanges, "AR_Other")
values = get_element_values(altitudeRanges, "AR_User")
for item in values:
add_read(self, "AR", item)
#############################
# Flight Types
#############################
flightTypes = get_element(doc, "FlightTypes")
try:
set_check_values(self.flight_types_check_dict, flightTypes, "FT_Code")
except IndexError:
set_check_values(self.old_flight_types_check_dict, flightTypes, "FT_Code")
set_text_value(self.FTOtherTextBox, flightTypes, "FT_Other")
values = get_element_values(flightTypes, "FT_User")
for item in values:
add_read(self, "FM", item)
#############################
# Satellite Coordination
#############################
satelliteCoordination = get_element(doc, "SatelliteCoordination")
try:
set_check_values(self.satellite_coordination_check_dict, satelliteCoordination, "SC_Code")
except IndexError:
set_check_values(self.old_satellite_coordination_check_dict, satelliteCoordination, "SC_Code")
set_text_value(self.SCOtherTextBox, satelliteCoordination, "SC_Other")
values = get_element_values(satelliteCoordination, "SC_User")
for item in values:
add_read(self, "SC", item)
#############################
# Surface Observations
#############################
surfaceObservations = get_element(doc, "SurfaceObs")
self.ground_site_list = get_element_values(surfaceObservations, "GroundSite")
self.groundListWidget.addItems(self.ground_site_list)
self.research_vessel_list = get_element_values(surfaceObservations, "ResearchVessel")
self.vesselListWidget.addItems(self.research_vessel_list)
self.arm_site_list = get_element_values(surfaceObservations, "ArmSite")
self.armListWidget.addItems(self.arm_site_list)
self.arm_mobile_list = get_element_values(surfaceObservations, "ArmMobile")
self.armMobileListWidget.addItems(self.arm_mobile_list)
##############################
# | |
<reponame>lmazuel/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualMachinesOperations(object):
"""VirtualMachinesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-04-30-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-04-30-preview"
self.config = config
def _capture_initial(
self, resource_group_name, vm_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.capture.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualMachineCaptureParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineCaptureResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def capture(
self, resource_group_name, vm_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Captures the VM by copying virtual hard disks of the VM and outputs a
template that can be used to create similar VMs.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Capture Virtual Machine
operation.
:type parameters:
~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineCaptureParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
VirtualMachineCaptureResult or
ClientRawResponse<VirtualMachineCaptureResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineCaptureResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineCaptureResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._capture_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualMachineCaptureResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'}
def _create_or_update_initial(
self, resource_group_name, vm_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualMachine')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachine', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachine', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, vm_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""The operation to create or update a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Create Virtual Machine
operation.
:type parameters:
~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachine
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualMachine or
ClientRawResponse<VirtualMachine> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachine]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachine]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualMachine', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'}
def _delete_initial(
self, resource_group_name, vm_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, vm_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""The operation to delete a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2016_04_30_preview.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2016_04_30_preview.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'}
def get(
self, resource_group_name, vm_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Retrieves information about the model view or the instance view of a
virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param expand: The expand expression to apply on the operation.
Possible values include: 'instanceView'
:type expand: str or
~azure.mgmt.compute.v2016_04_30_preview.models.InstanceViewTypes
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualMachine or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachine
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vmName': self._serialize.url("vm_name", vm_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'InstanceViewTypes')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachine', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'}
def | |
<reponame>ViolaBuddy/EscapeFromPlegia
import logging
import math
from app.data.database import DB
from app.engine import (action, combat_calcs, engine, equations, evaluate,
item_funcs, item_system, line_of_sight, pathfinding,
skill_system, target_system)
from app.engine.combat import interaction
from app.engine.game_state import game
from app.engine.movement import MovementManager
from app.utilities import utils
class AIController():
def __init__(self):
# Controls whether we should be skipping through the AI's turns
self.do_skip: bool = False
self.reset()
def skip(self):
self.do_skip = True
def end_skip(self):
self.do_skip = False
def reset(self):
self.unit = None
self.state = "Init"
self.behaviour_idx = 0
self.behaviour = None
self.inner_ai = None
self.did_something = False
self.move_ai_complete = False
self.attack_ai_complete = False
self.canto_ai_complete = False
def load_unit(self, unit):
self.reset()
self.unit = unit
def is_done(self):
return self.move_ai_complete and \
self.attack_ai_complete and self.canto_ai_complete
def clean_up(self):
self.goal_position = None
self.goal_item = None
self.goal_target = None
def set_next_behaviour(self):
behaviours = DB.ai.get(self.unit.ai).behaviours
if self.behaviour_idx < len(behaviours):
self.behaviour = behaviours[self.behaviour_idx]
self.behaviour_idx += 1
else:
self.behaviour = None
self.behaviour_idx = 0
def get_behaviour(self):
return self.behaviour
def act(self):
logging.info("AI Act!")
change = False
if not self.move_ai_complete:
if self.think():
change = self.move()
self.move_ai_complete = True
elif not self.attack_ai_complete:
change = self.attack()
self.attack_ai_complete = True
elif not self.canto_ai_complete:
if self.unit.has_attacked and skill_system.has_canto(self.unit, None):
self.canto_retreat()
change = self.move()
self.canto_ai_complete = True
return self.did_something, change
def move(self):
if self.goal_position and self.goal_position != self.unit.position:
path = target_system.get_path(self.unit, self.goal_position)
# if self.unit.has_attacked:
# self.unit.wait()
game.state.change('movement')
action.do(action.Move(self.unit, self.goal_position, path))
return True
else:
return False
def attack(self):
# Attacking or supporting
if self.goal_target: # Target is a position tuple
if self.goal_item and self.goal_item in item_funcs.get_all_items(self.unit):
self.unit.equip(self.goal_item)
# Highlights
if item_system.is_weapon(self.unit, self.goal_item):
game.highlight.remove_highlights()
splash_positions = item_system.splash_positions(self.unit, self.goal_item, self.goal_target)
game.highlight.display_possible_attacks({self.goal_target})
game.highlight.display_possible_attacks(splash_positions, light=True)
elif item_system.is_spell(self.unit, self.goal_item):
game.highlight.remove_highlights()
splash_positions = item_system.splash_positions(self.unit, self.goal_item, self.goal_target)
game.highlight.display_possible_spell_attacks({self.goal_target})
game.highlight.display_possible_spell_attacks(splash_positions, light=True)
# Used for steal
if item_system.targets_items(self.unit, self.goal_item):
# Choose most expensive item that is legal
target = game.board.get_unit(self.goal_target)
legal_items = [item for item in target.items if item_system.item_restrict(self.unit, self.goal_item, target, item)]
items = sorted(legal_items, key=lambda x: item_system.sell_price(self.unit, x) or 0)
self.goal_item.data['target_item'] = items[-1]
# Combat
interaction.start_combat(self.unit, self.goal_target, self.goal_item, ai_combat=True, skip=self.do_skip)
return True
# Interacting with regions
elif self.goal_position and self.behaviour and self.behaviour.action == 'Interact':
# Get region
region = None
for r in game.level.regions:
if r.contains(self.goal_position) and r.region_type == 'event' and r.sub_nid == self.behaviour.target_spec:
try:
if not r.condition or evaluate.evaluate(r.condition, self.unit, position=self.goal_position):
region = r
break
except:
logging.warning("Could not evaluate region conditional %s" % r.condition)
if region:
did_trigger = game.events.trigger(region.sub_nid, self.unit, position=self.unit.position, region=region)
if did_trigger and region.only_once:
action.do(action.RemoveRegion(region))
if did_trigger:
action.do(action.HasAttacked(self.unit))
return True
return False
def canto_retreat(self):
valid_positions = self.get_true_valid_moves()
enemy_positions = {u.position for u in game.units if u.position and skill_system.check_enemy(self.unit, u)}
self.goal_position = utils.farthest_away_pos(self.unit.position, valid_positions, enemy_positions)
def smart_retreat(self) -> bool:
valid_positions = self.get_true_valid_moves()
target_positions = get_targets(self.unit, self.behaviour)
zero_move = max(target_system.find_potential_range(self.unit, True, True), default=0)
single_move = zero_move + equations.parser.movement(self.unit)
double_move = single_move + equations.parser.movement(self.unit)
target_positions = {(pos, utils.calculate_distance(self.unit.position, pos)) for pos in target_positions}
if self.behaviour.view_range == -4:
pass
elif self.behaviour.view_range == -3:
target_positions = {(pos, mag) for pos, mag in target_positions if mag < double_move}
elif self.behaviour.view_range == -2:
target_positions = {(pos, mag) for pos, mag in target_positions if mag < single_move}
elif self.behaviour.view_range == -1:
target_positions = {(pos, mag) for pos, mag in target_positions if mag < zero_move}
else:
target_positions = {(pos, mag) for pos, mag in target_positions if mag < self.view_range}
if target_positions and len(valid_positions) > 1:
self.goal_position = utils.smart_farthest_away_pos(self.unit.position, valid_positions, target_positions)
return True
else:
return False
def get_true_valid_moves(self) -> set:
valid_moves = target_system.get_valid_moves(self.unit)
other_unit_positions = {unit.position for unit in game.units if unit.position and unit is not self.unit}
valid_moves -= other_unit_positions
return valid_moves
def think(self):
time = engine.get_time()
success = False
self.did_something = False
orig_pos = self.unit.position
logging.info("*** AI Thinking... ***")
while True:
# Can spend up to half a frame thinking
over_time = engine.get_true_time() - time >= 8
logging.info("Current State: %s", self.state)
if self.state == 'Init':
self.start_time = engine.get_time()
logging.info("Starting AI with nid: %s, position: %s, class: %s, AI: %s", self.unit.nid, self.unit.position, self.unit.klass, self.unit.ai)
self.clean_up()
# Get next behaviour
self.set_next_behaviour()
if self.behaviour:
logging.info(self.behaviour.action)
if self.behaviour.action == "None":
pass # Try again
elif self.behaviour.action == "Attack":
self.inner_ai = self.build_primary()
self.state = "Primary"
elif self.behaviour.action == "Support":
self.inner_ai = self.build_primary()
self.state = "Primary"
elif self.behaviour.action == 'Steal':
self.inner_ai = self.build_primary()
self.state = "Primary"
elif self.behaviour.action == 'Interact':
self.inner_ai = self.build_secondary()
self.state = "Secondary"
elif self.behaviour.action == 'Move_to':
self.inner_ai = self.build_secondary()
self.state = "Secondary"
elif self.behaviour.action == "Move_away_from":
success = self.smart_retreat()
if success:
self.state = "Done"
else:
self.state = "Init" # Try another behaviour
else:
self.state = 'Done'
elif self.state == 'Primary':
done, self.goal_target, self.goal_position, self.goal_item = self.inner_ai.run()
if done:
if self.goal_target:
self.ai_group_ping()
success = True
self.state = "Done"
else:
self.inner_ai = self.build_secondary()
self.state = "Secondary" # Try secondary
elif over_time:
# Make sure to quick move back so that the in-between frames aren't flickering around
self.inner_ai.quick_move(self.inner_ai.orig_pos)
elif self.state == 'Secondary':
done, self.goal_position = self.inner_ai.run()
if done:
if self.goal_position:
if self.goal_position != self.unit.position:
self.ai_group_ping()
success = True
self.state = "Done"
else:
self.state = "Init" # Try another behaviour
if self.state == 'Done':
self.did_something = success
self.state = 'Init'
return True
if over_time:
break
return False
def ai_group_ping(self):
ai_group = self.unit.ai_group
if not ai_group:
return
for unit in game.units:
if unit.team == self.unit.team and unit.ai_group == ai_group:
if not unit._has_moved and not unit._has_attacked:
unit.has_run_ai = False # So it can be run through the AI state again
if not unit.ai_group_active:
action.do(action.AIGroupPing(unit))
def build_primary(self):
if self.behaviour.view_range == -1: # Guard AI
valid_moves = {self.unit.position}
else:
valid_moves = self.get_true_valid_moves()
return PrimaryAI(self.unit, valid_moves, self.behaviour)
def build_secondary(self):
return SecondaryAI(self.unit, self.behaviour)
class PrimaryAI():
def __init__(self, unit, valid_moves, behaviour):
self.max_tp = 0
self.unit = unit
self.orig_pos = self.unit.position
self.orig_item = self.unit.items[0] if self.unit.items else None
self.behaviour = behaviour
if self.behaviour.action == "Attack":
self.items = [item for item in item_funcs.get_all_items(self.unit) if
item_funcs.available(self.unit, item)]
self.extra_abilities = skill_system.get_extra_abilities(self.unit)
for ability in self.extra_abilities.values():
self.items.append(ability)
elif self.behaviour.action == 'Support':
self.items = [item for item in item_funcs.get_all_items(self.unit) if
item_funcs.available(self.unit, item)]
self.extra_abilities = skill_system.get_extra_abilities(self.unit)
for ability in self.extra_abilities.values():
self.items.append(ability)
elif self.behaviour.action == 'Steal':
self.items = []
self.extra_abilities = skill_system.get_extra_abilities(self.unit)
for ability in self.extra_abilities.values():
if ability.name == 'Steal':
self.items.append(ability)
self.behaviour_targets = get_targets(self.unit, self.behaviour)
logging.info("Testing Items: %s", self.items)
self.item_index = 0
self.move_index = 0
self.target_index = 0
self.valid_moves = list(valid_moves)
self.best_target = None
self.best_position = None
self.best_item = None
self.item_setup()
def item_setup(self):
if self.item_index < len(self.items):
logging.info("Testing %s" % self.items[self.item_index])
self.unit.equip(self.items[self.item_index])
self.get_all_valid_targets()
self.possible_moves = self.get_possible_moves()
logging.info(self.possible_moves)
def get_valid_targets(self, unit, item, valid_moves) -> list:
item_range = item_funcs.get_range(unit, item)
ai_targets = item_system.ai_targets(unit, item)
if len(ai_targets) < 20:
logging.info("AI Targets: %s", ai_targets)
filtered_targets = set()
for pos in ai_targets:
for valid_move in valid_moves:
# Determine if we can hit this unit at one of our moves
if (utils.calculate_distance(pos, valid_move) in item_range) and \
(not DB.constants.value('ai_fog_of_war') or game.board.in_vision(pos, self.unit.team)):
filtered_targets.add(pos)
break
return list(filtered_targets)
def get_all_valid_targets(self):
item = self.items[self.item_index]
logging.info("Determining targets for item: %s", item)
self.valid_targets = self.get_valid_targets(self.unit, item, self.valid_moves)
# Only if we already have some legal targets (ie, ourself)
if self.valid_targets and 0 in item_funcs.get_range(self.unit, item):
self.valid_targets += self.valid_moves # Hack to target self in all valid positions
self.valid_targets = list(set(self.valid_targets)) # Only uniques
logging.info("Valid Targets: %s", self.valid_targets)
def get_possible_moves(self) -> list:
if self.target_index < len(self.valid_targets) and self.item_index < len(self.items):
# Given an item and a target, find all positions in valid_moves that I can strike the target at.
item = self.items[self.item_index]
target = self.valid_targets[self.target_index]
a = target_system.find_manhattan_spheres(item_funcs.get_range(self.unit, item), *target)
b = set(self.valid_moves)
return list(a & b)
else:
return []
def quick_move(self, move):
game.leave(self.unit, test=True)
self.unit.position = move
game.arrive(self.unit, test=True)
def run(self):
if self.item_index >= len(self.items):
self.quick_move(self.orig_pos)
if self.orig_item:
self.unit.equip(self.orig_item)
return (True, self.best_target, self.best_position, self.best_item)
elif self.target_index >= len(self.valid_targets):
self.target_index = 0
self.item_index += 1
self.item_setup()
elif self.move_index >= len(self.possible_moves):
self.move_index = 0
self.target_index += 1
self.possible_moves = self.get_possible_moves()
else:
target = self.valid_targets[self.target_index]
item = self.items[self.item_index]
# If too many legal targets, just try for the best move first
# Otherwise it spends way too long trying every possible position to strike from
if len(self.valid_targets) > 10:
enemy_positions = {u.position for u in game.units if u.position and skill_system.check_enemy(self.unit, u)}
move = | |
* m;
return 42.0 * dot( m*m, vec4( dot(p0,x0), dot(p1,x1),
dot(p2,x2), dot(p3,x3) ) );
}
float snoise(vec2 v)
{
const vec4 C = vec4(0.211324865405187, // (3.0-sqrt(3.0))/6.0
0.366025403784439, // 0.5*(sqrt(3.0)-1.0)
-0.577350269189626, // -1.0 + 2.0 * C.x
0.024390243902439); // 1.0 / 41.0
// First corner
vec2 i = floor(v + dot(v, C.yy) );
vec2 x0 = v - i + dot(i, C.xx);
// Other corners
vec2 i1;
//i1.x = step( x0.y, x0.x ); // x0.x > x0.y ? 1.0 : 0.0
//i1.y = 1.0 - i1.x;
i1 = (x0.x > x0.y) ? vec2(1.0, 0.0) : vec2(0.0, 1.0);
// x0 = x0 - 0.0 + 0.0 * C.xx ;
// x1 = x0 - i1 + 1.0 * C.xx ;
// x2 = x0 - 1.0 + 2.0 * C.xx ;
vec4 x12 = x0.xyxy + C.xxzz;
x12.xy -= i1;
// Permutations
i = mod289(i); // Avoid truncation effects in permutation
vec3 p = permute( permute( i.y + vec3(0.0, i1.y, 1.0 ))
+ i.x + vec3(0.0, i1.x, 1.0 ));
vec3 m = max(0.5 - vec3(dot(x0,x0), dot(x12.xy,x12.xy), dot(x12.zw,x12.zw)), 0.0);
m = m*m ;
m = m*m ;
// Gradients: 41 points uniformly over a line, mapped onto a diamond.
// The ring size 17*17 = 289 is close to a multiple of 41 (41*7 = 287)
vec3 x = 2.0 * fract(p * C.www) - 1.0;
vec3 h = abs(x) - 0.5;
vec3 ox = floor(x + 0.5);
vec3 a0 = x - ox;
// Normalise gradients implicitly by scaling m
// Approximation of: m *= inversesqrt( a0*a0 + h*h );
m *= 1.79284291400159 - 0.85373472095314 * ( a0*a0 + h*h );
// Compute final noise value at P
vec3 g;
g.x = a0.x * x0.x + h.x * x0.y;
g.yz = a0.yz * x12.xz + h.yz * x12.yw;
return 130.0 * dot(m, g);
}
float fractal_noise(vec2 texCoord, int nlevels) {
if (nlevels < 1)
return 0.0;
const float w = 0.5;
float result = 0;
for (int n = 1; n < nlevels + 1; ++n) {
result += pow(w, n) * snoise(n * texCoord);
}
return result;
}
float fractal_noise(vec3 texCoord, int nlevels) {
if (nlevels < 1)
return 0.0;
const float w = 0.5;
float result = 0;
for (int n = 1; n < nlevels + 1; ++n) {
result += pow(w, n) * snoise(n * texCoord);
}
return result;
}
float filtered_noise(in vec2 texCoord, in float detail) {
// Figure out how many spots we might need to sample
vec2 dxv = vec2(dFdx(texCoord.x), dFdy(texCoord.x));
vec2 dyv = vec2(dFdx(texCoord.y) + dFdy(texCoord.y));
float dx = length(dxv);
float dy = length(dyv);
// How many samples are needed in each direction?
const int MaxSamples = 10;
int sx = 1 + clamp( int( detail*dx ), 0, MaxSamples-1 );
int sy = 1 + clamp( int( detail*dy ), 0, MaxSamples-1 );
float dt = length(vec2(dx, dy));
if (dt > 5)
// return -1.0;
return fractal_noise(texCoord, 0); // stuff really far away is just a blurry grey
else if (dt <= 0.1) {
// return 1.0;
return fractal_noise(texCoord, 5); // close stuff gets one exact sample
}
else if (dt <= 0.3) {
// return 1.0;
return fractal_noise(texCoord, 3); // close stuff gets one exact sample
}
else if (dt <= 0.7) {
// return 1.0;
return fractal_noise(texCoord, 1); // close stuff gets one exact sample
}
else {
// TODO: Multisample here
float result = 0.0;
vec2 dv = vec2(dx, dy);
for (int x = 0; x < sx; ++x) {
for (int y = 0; y < sy; ++y) {
vec2 tc = texCoord + dv*vec2(x, y)/vec2(sx, sy) - 0.5*dv;
result += fractal_noise(tc, 1);
}
}
// return 1.0;
return result / (sx*sy);
// return fractal_noise(texCoord, 1); // needs filtering
}
}
"""),
GL_FRAGMENT_SHADER)
class FloorActor(object):
"Floor plane with procedural texture for context"
def __init__(self):
self.shader = 0
self.vao = 0
def init_gl(self):
vertex_shader = compileShader(dedent(
"""
#version 450 core
#line 563
layout(location = 0) uniform mat4 Projection = mat4(1);
layout(location = 4) uniform mat4 ModelView = mat4(1);
const vec3 FLOOR_QUAD[4] = vec3[4](
vec3(-1, 0, -1),
vec3(-1, 0, +1),
vec3(+1, 0, +1),
vec3(+1, 0, -1)
);
const int FLOOR_INDICES[6] = int[6](
2, 1, 0,
0, 3, 2
);
out vec2 texCoord;
void main() {
int vertexIndex = FLOOR_INDICES[gl_VertexID];
vec3 v = FLOOR_QUAD[vertexIndex];
const float scale = 50; // meters per side
texCoord = scale * v.xz;
gl_Position = Projection * ModelView * vec4(scale * v, 1);
}
"""
), GL_VERTEX_SHADER)
fragment_shader = compileShader(dedent(
"""\
#version 450 core
#line 594
in vec2 texCoord; // Floor texture coordinate in meters
out vec4 FragColor;
float filtered_noise(in vec2 texCoord, in float detail);
void main()
{
// shift texture coordinate so origin artifact is probably far away,
// and shift intensity from range [-1,1] to range [0,1]
float noise = 0.50 * (filtered_noise(texCoord * 2 + vec2(10, 10), 8) + 1.0);
// interpolate final color between brown and green
const vec3 color1 = vec3(0.25, 0.3, 0.15); // green
const vec3 color2 = vec3(0.05, 0.05, 0.0); // dark brown
vec3 color = mix(color2, color1, noise);
FragColor = vec4(color, 1.0);
}
"""),
GL_FRAGMENT_SHADER)
self.shader = compileProgram(vertex_shader, fragment_shader, ProceduralNoiseShader().fragment_shader)
#
self.vao = glGenVertexArrays(1)
glBindVertexArray(self.vao)
glEnable(GL_DEPTH_TEST)
def display_gl(self, modelview, projection):
glUseProgram(self.shader)
glUniformMatrix4fv(0, 1, False, projection)
glUniformMatrix4fv(4, 1, False, modelview)
glBindVertexArray(self.vao)
glDrawArrays(GL_TRIANGLES, 0, 6)
def dispose_gl(self):
glDeleteProgram(self.shader)
self.shader = 0
glDeleteVertexArrays(1, (self.vao,))
self.vao = 0
class SkyActor(object):
"Sky sphere with procedural texture for context"
def __init__(self):
self.shader = 0
self.vao = 0
def init_gl(self):
vertex_shader = compileShader(dedent(
"""
#version 450 core
#line 644
layout(location = 0) uniform mat4 Projection = mat4(1);
layout(location = 4) uniform mat4 ViewMatrix = mat4(1);
const vec4 SCREEN_QUAD[4] = vec4[4](
vec4(-1, -1, 0, 1),
vec4(-1, +1, 0, 1),
vec4(+1, +1, 0, 1),
vec4(+1, -1, 0, 1));
const int SCREEN_INDICES[6] = int[6](
0, 1, 2,
0, 3, 2
);
out mat4 dirFromNdc;
out vec4 ndc;
void main() {
int vertexIndex = SCREEN_INDICES[gl_VertexID];
vec4 v = SCREEN_QUAD[vertexIndex];
gl_Position = v;
dirFromNdc = mat4(inverse(mat3(ViewMatrix))) * inverse(Projection);
ndc = v;
}
"""
), GL_VERTEX_SHADER)
fragment_shader = compileShader(dedent(
"""\
#version 450 core
#line 674
in mat4 dirFromNdc;
in vec4 ndc;
// in vec3 view_direction; // Floor texture coordinate in meters
out vec4 FragColor;
// float filtered_noise(in vec2 texCoord, in float detail);
float fractal_noise(vec3 texCoord, int nlevels);
void main()
{
vec4 d = dirFromNdc*ndc;
vec3 view_dir = normalize(d.xyz/d.w);
vec3 zenith_color = vec3(0.2, 0.2, 1.0); // deep blue
vec3 horizon_color = vec3(0.80, 0.80, 1.0); // pale blue
vec3 sky_color = mix(horizon_color, zenith_color, view_dir.y);
vec3 cloud_color = vec3(1);
float noise = 0.5 * fractal_noise(2 * view_dir, 4) + 0.5;
noise = clamp( 0.7 * noise + 0.4 , 0, 1);
vec3 color = mix(cloud_color, sky_color, noise);
// color = 0.5*ndc.xyz/ndc.w + vec3(0.5);
FragColor = vec4(color, 1.0);
// FragColor = vec4 (1.0, 0.8, 0.8, 1.0); // pink
}
"""),
GL_FRAGMENT_SHADER)
self.shader = compileProgram(vertex_shader, fragment_shader, ProceduralNoiseShader().fragment_shader)
#
self.vao = glGenVertexArrays(1)
glBindVertexArray(self.vao)
glEnable(GL_DEPTH_TEST)
def display_gl(self, modelview, projection):
glDisable(GL_DEPTH_TEST)
glUseProgram(self.shader)
glUniformMatrix4fv(0, 1, False, projection)
glUniformMatrix4fv(4, 1, False, modelview)
glBindVertexArray(self.vao)
glDrawArrays(GL_TRIANGLES, 0, 6)
def dispose_gl(self):
glDeleteProgram(self.shader)
self.shader = 0
glDeleteVertexArrays(1, (self.vao,))
self.vao = 0
class SpatialInteractor(object):
"Composite interactor consisting of both controllers plus maybe other inputs"
def __init__(self):
self.translation_history = collections.deque() # array of translation increments
self.max_history_size = 100
self.left_controller = ControllerState("left controller")
self.right_controller = ControllerState("right controller")
self.is_dragging = self.left_controller.is_dragging or self.right_controller.is_dragging
self.velocity_damping = 1.5 # meters per second per second
self.speed = 0.0 # meters per second inertial velocity
self.min_velocity = 0.01 # meters per second
def update_controller_states(self):
new_event = openvr.VREvent_t()
while openvr.VRSystem().pollNextEvent(new_event):
self._check_controller_drag(new_event)
now_is_dragging = self.left_controller.is_dragging or self.right_controller.is_dragging
xform = self._compute_controllers_transform()
if xform is not None:
obj.model_matrix | |
<filename>generate_wavelets.py
# File: generate_wavelets.py
# Author: <NAME>, <EMAIL>
# Last modified: 2017 Nov. 28
#
# A utility script to generate DWT image files for the image files requested.
# Parse passed-in arguments:
import argparse
# Other imports
import os
import numpy as np # For math and analysis
import pandas as pd # For data structures
import matplotlib # To set backend
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt # For graphing and image generation
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
import sklearn as skl # (scikit-learn) for various machine learning tasks
import sklearn.utils, sklearn.preprocessing, sklearn.decomposition, sklearn.svm
import librosa
import librosa.display
import audioread
import fma.utils as fma_utils # Utilities provided for loading and manipulating the
# Free Music Archive dataset.
import pywt # For wavelets
import code_timing as timer # For tracking long runs
# Set control variables from args. Start by setting up:
parser = argparse.ArgumentParser()
parser.add_argument("input_dir", help = "Directory for audio and metadata input files.")
parser.add_argument("output_dir", help = "Directory for image output files.")
parser.add_argument("-v", "--verbose", help = "Show all messages", action="store_true")
parser.add_argument("-o", "--overwrite",
help = "Overwrite existing files instead of skipping",
action="store_true")
parser.add_argument("-d", "--dwt",
help = "Generate DWT files",
action="store_true")
parser.add_argument("-c", "--cwt",
help = "Generate CWT files (slow)",
action="store_true")
parser.add_argument("-m", "--cmap",
help = "Specify colormap for wavelet images (defaults to 'magma')")
parser.add_argument("-x", "--wvlt_cont",
help = "Specify continuous wavelet (defaults to 'gaus4')")
parser.add_argument("-w", "--wvlt_disc",
help = "Specify discrete wavelet (defaults to 'db5')")
parser.add_argument("-z", "--size",
help = "Specify the dataset size to use",
choices = ["small", "medium", "large"])
parser.add_argument("-s", "--split",
help = "Specify the split to use",
choices = ["training", "validation", "test"])
parser.add_argument("-l", "--limit", help = "Limit how many files to generate",
type = int)
parser.add_argument("--octaves",
help = "Specify the number of octaves to use (defaults to 11)",
type = int)
# Set defaults:
# General
verbose = False # Controls how much debugging/progress information is printed
generate_dwts = False # Should we try to generate DWT files?
generate_cwts = False # Should we try to generate CWT files (slow)?
overwrite = False # Set to True to regenerate existing files - slow, but useful
# for code debugging; don't have to delete files between runs
cmap = "magma" # Perceptually uniform and relatively friendly to various types of
# color-blindness, as well as greyscaling gracefully; see
# http://bids.github.io/colormap/
limit_num = None # Set to None to run the whole set
# By default, generate training data for small dataset:
requested_subset = "small"
requested_split = "training"
# Set up the CWT (not ultimately used in this project, because the slowness of the CWT
# makes this approach less useful, but the code is provided in case it's useful):
num_octaves = 11 # 11 octaves goes from ~22 Hz to 22050 Hz, i.e. nearly the full range of
# human hearing. Decreasing the number of octaves leads to loss on the
# *low* end in the CWT.
wvlt_cont = 'gaus4'
# Set up the DWT:
wvlt_disc = "db5"
# Override as necessary from arguments:
args = parser.parse_args()
input_dir = os.path.join(args.input_dir, '')
output_dir = os.path.join(args.output_dir, '')
if args.verbose:
verbose = args.verbose
if args.overwrite:
overwrite = args.overwrite
if args.dwt:
generate_dwts = args.dwt
if args.cwt:
generate_cwts = args.cwt
if args.cmap:
cmap = args.cmap
if args.limit:
limit_num = args.limit
if args.wvlt_cont:
wvlt_cont = args.wvlt_cont
if args.wvlt_disc:
wvlt_disc = args.wvlt_disc
if args.octaves:
num_octaves = args.octaves
if args.size:
requested_subset = args.size
if args.split:
requested_split = args.split
# Wrapper to print that mutes output if we're not in verbose mode:
def printt(*args, verbose = verbose, **kwargs):
if (verbose):
print(*args, **kwargs)
with open("generate_wavelets.log","a+") as f:
print(*args, **kwargs, file=f)
def flatten(arr):
flat = np.ndarray(0,dtype = arr[0].dtype)
for item in arr:
flat = np.append(flat, item)
return flat
def strip_filename(filename):
# "/p/a/t/h/name.mp3" => "name" :
stripped = os.path.splitext(os.path.basename(filename))[0]
# Now append the logical subdirectory (first 3 digit characters):
stripped = os.path.join(stripped[0:3], stripped)
# Note that this is missing an extension--we will add info and extension in save:
return stripped
def fileify(stripped, subdir, tail):
fullpath = os.path.join(output_dir, os.path.join(subdir, stripped + tail))
# Make sure the directory chain exists
directory = os.path.dirname(fullpath)
if not os.path.exists(directory):
os.makedirs(directory)
return fullpath
# Adapted from https://stackoverflow.com/questions/16482166/basic-plotting-of-wavelet-
# analysis-output-in-matplotlib
def dwtplots(track_id, stripped, tree):
files_generated = {}
# Make a new figure, since matplotlib defaults to hold on
fig = plt.figure()
# Generate the training images first
fig.set_figwidth(small_img_dim)
fig.set_figheight(small_img_dim)
# Set the axes to not have any bordering whitespace
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# DWT plots are by level:
bottom = 0
tree_arr = flatten(tree)
vmin = np.amin(tree_arr)
vmax = np.amax(tree_arr)
ax.set_ybound([1,scales_max])
ax.set_autoscale_on(False)
ax.set_autoscalex_on(True)
scale = scales_max/len(tree) # Use log y scale
for row in range(0, len(tree)):
row_data = tree[row]
row_data = row_data.reshape(1,len(row_data))
cax = ax.imshow(row_data,
cmap="magma",
interpolation = 'none',
vmin = vmin,
vmax = vmax,
aspect="auto",
extent = [0, 30, bottom, bottom + scale],
)
bottom += scale
# Save with no axis labels and no bordering whitespace
files_generated[(track_id, "small_dwt_noframe")] = fileify(stripped, "dwt/noframe/", "_small.png")
files_generated[(track_id, "large_dwt_noframe")] = fileify(stripped, "dwt/noframe/", "_large.png")
plt.savefig(files_generated[(track_id, "small_dwt_noframe")])
fig.set_dpi(scales_max) # make bigger
fig.set_figwidth(large_img_dim)
fig.set_figheight(scales_max/scales_max)
plt.savefig(files_generated[(track_id, "large_dwt_noframe")])
# Resize and update the axes and colorbar to show, and tweak the tick labels:
files_generated[(track_id, "small_dwt_frame")] = fileify(stripped, "dwt/frame/", "_small.png")
files_generated[(track_id, "large_dwt_frame")] = fileify(stripped, "dwt/frame/", "_large.png")
fig.set_figwidth(small_img_dim+colorbar_pad)
cbar = fig.colorbar(cax, ticks=[vmin, vmax])
ax.set_ylabel("DWT Level")
ax.set_xlabel("Time [sec]")
ax.set_yticks(np.arange(0, scales_max+1, scales_max/4))#np.append([1],(np.geomspace(1, scales_max, num_octaves))[-3:]))
ax.set_xticks(np.arange(0, 31, 10)) # Clips are 30s long
ax.set_axis_on()
fig.add_axes(ax)
for tick in ax.get_xticklabels():
tick.set_rotation(30)
# Save with axis labels and minimal bordering whitespace
plt.savefig(files_generated[(track_id, "small_dwt_frame")], bbox_inches="tight")
fig.set_figwidth(large_img_dim+colorbar_pad)
plt.savefig( files_generated[(track_id, "large_dwt_frame")], bbox_inches="tight")
# Free memory by closing this figure
plt.close(fig)
return files_generated
def cwtplots(track_id, stripped, data, t, frequencies):
files_generated = {}
# Make a new figure, since matplotlib defaults to hold on
fig = plt.figure()
# Generate the training images first
fig.set_figwidth(small_img_dim)
fig.set_figheight(small_img_dim)
# Set the axes to not have any bordering whitespace
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# Plots are frequency versus time:
vmax = np.max(data)
vmin = np.min(data)
t0 = np.min(t)
tlast = np.max(t)
f0 = np.min(frequencies)
flast = np.max(frequencies)
cax = ax.imshow(data,
cmap=cmap,
interpolation = 'none',
vmin = vmin,
vmax = vmax,
aspect="auto",
extent=[t0, tlast, f0, flast],
)
# Save with no axis labels and no bordering whitespace
files_generated[(track_id, "small_cwt_noframe")] = fileify(stripped, "cwt/noframe/", "_small.png")
files_generated[(track_id, "large_cwt_noframe")] = fileify(stripped, "cwt/noframe/", "_large.png")
plt.savefig(files_generated[(track_id, "small_cwt_noframe")])
fig.set_dpi(scales_max) # make bigger
fig.set_figwidth(large_img_dim)
fig.set_figheight(scales_max/scales_max)
plt.savefig(files_generated[(track_id, "large_cwt_noframe")])
# Resize and update the axes and colorbar to show, and tweak the tick labels:
files_generated[(track_id, "small_cwt_frame")] = fileify(stripped, "cwt/frame/", "_small.png")
files_generated[(track_id, "large_cwt_frame")] = fileify(stripped, "cwt/frame/", "_large.png")
fig.set_figwidth(small_img_dim+colorbar_pad)
cbar = fig.colorbar(cax, ticks=[vmin, vmax])
ax.set_ylabel("Frequency [Hz]")
ax.set_xlabel("Time [sec]")
ax.set_yticks([20, 7500, 15000, 22050]) # Pretty nicely spaced along audible freq. range
ax.set_xticks(np.arange(0, 31, 10)) # Clips are 30s long
ax.set_axis_on()
fig.add_axes(ax)
for tick in ax.get_xticklabels():
tick.set_rotation(30)
# Save with axis labels and minimal bordering whitespace
plt.savefig(files_generated[(track_id, "small_cwt_frame")], bbox_inches="tight")
fig.set_figwidth(large_img_dim+colorbar_pad)
plt.savefig(files_generated[(track_id, "large_cwt_frame")], bbox_inches="tight")
# Free memory by closing this figure
plt.close(fig)
return files_generated
def make_wavelets(track_id,
min_f, max_f, filename,
total_times,
file_load_times,
cwt_times,
dwt_times,
pyplot_cwt_times,
pyplot_dwt_times):
timer.tic("single_run")
files_generated = {}
# Make sure the audio file exists
# print("File:", filename)
assert(os.path.isfile(filename))
# Get the part of the filename that gets replicated in image file names
stripped = strip_filename(filename)
# If overwrite is not enabled, we decide whether to generate files based on:
# a) whether or not a particular wavelet (DWT/CWT) is requested, and
# b) whether or not all files for that wavelet (or those wavelets) already exists
if not overwrite:
cwt_to_generate = False
if (not os.path.exists(fileify(stripped, "cwt/noframe/", "_small.png")) or
not os.path.exists(fileify(stripped, "cwt/noframe/", "_large.png")) or
not os.path.exists(fileify(stripped, "cwt/frame/", "_small.png")) or
not os.path.exists(fileify(stripped, "cwt/frame/", "_large.png"))): # one or more CWT files is missing
if generate_cwts:
cwt_to_generate = True
dwt_to_generate = False
if (not os.path.exists(fileify(stripped, "dwt/noframe/", "_small.png")) or
not os.path.exists(fileify(stripped, "dwt/noframe/", "_large.png")) or
not os.path.exists(fileify(stripped, "dwt/frame/", "_small.png")) or
not os.path.exists(fileify(stripped, "dwt/frame/", "_large.png"))): # one or more DWT files is missing
if generate_dwts:
dwt_to_generate = True
# If overwrite is enabled, always generate the requested type(s) of wavelet images:
else:
cwt_to_generate = generate_cwts # for this file <- for all files
dwt_to_generate = generate_dwts # for this file <- for all files
files_to_generate = cwt_to_generate or dwt_to_generate # for this file
#print("There {} files to generate.".format("are a nonzero number of" if files_to_generate else "are no"))
if (files_to_generate):
# Load and adjust data
timer.tic("file_load")
try:
data, sample_rate = librosa.load(filename, sr=None, mono=True) # Convert to mono for
# simpler processing
# Set up time/sample time variables
dt = 1/sample_rate
t = dt*np.arange(len(data)) # Time of data samples in seconds [x axis]
# Normalize the (signed) data relative to its max value:
data = 1/(np.max(abs(data))+np.finfo(float).eps)*data
except audioread.NoBackendError as e:
printt("Couldn't load {} because the backend is missing.".format(filename))
printt(e)
raise
except Exception as e: # all other errors, just log, then skip this file
printt("\n\n=> Couldn't load {}:\n{}\n".format(filename,e), verbose = False)
cwt_to_generate = False # force code to not do anything else for this file
dwt_to_generate = False
file_load_times = np.append(file_load_times, timer.toc("file_load"))
# Generate CWT graphics
if cwt_to_generate:
# Calculate and adjust CWT:
timer.tic("cwt")
[cwt_data,frequencies] = pywt.cwt(data, scales, wvlt_cont, dt)
# Convert CWT data from straight magnitude to power (dB)
cwt_data = 10*np.log10(np.abs(cwt_data)**2 + np.finfo(float).eps)
cwt_times = np.append(cwt_times, timer.toc("cwt"))
# Find the minimum and maximum frequencies, so we can make sure we're operating on
# the same frequency scale for all generated images (i.e. comparing apples to apples):
min_f = min(frequencies[0],frequencies[-1])
max_f = max(frequencies[0],frequencies[-1])
# Plot and save CWT images:
timer.tic("pyplot_cwt")
files_generated = {**files_generated,
**cwtplots(track_id, stripped, cwt_data, t, frequencies)}
pyplot_cwt_times = np.append(pyplot_cwt_times, | |
#!/usr/bin/python
# Copyright (c) 2014 SUSE Linux Products GmbH
# Copyright (c) 2016 SUSE LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pprint import pprint
import os
import sys
import re
import logging
from optparse import OptionParser
import cmdln
try:
from xml.etree import cElementTree as ET
except ImportError:
import cElementTree as ET
import osc.conf
import osc.core
import urllib2
import yaml
import ReviewBot
from check_maintenance_incidents import MaintenanceChecker
from check_source_in_factory import FactorySourceChecker
class Leaper(ReviewBot.ReviewBot):
def __init__(self, *args, **kwargs):
ReviewBot.ReviewBot.__init__(self, *args, **kwargs)
self.do_comments = True
self.maintbot = MaintenanceChecker(*args, **kwargs)
# for FactorySourceChecker
self.factory = FactorySourceChecker(*args, **kwargs)
self.needs_reviewteam = False
self.pending_factory_submission = False
self.source_in_factory = None
self.needs_release_manager = False
self.release_manager_group = 'leap-reviewers'
self.review_team_group = 'opensuse-review-team'
self.must_approve_version_updates = False
self.must_approve_maintenance_updates = False
self.needs_check_source = False
self.check_source_group = None
self.automatic_submission = False
# project => package list
self.packages = {}
def prepare_review(self):
# update lookup information on every run
if self.ibs:
self.factory.parse_lookup('SUSE:SLE-12-SP3:GA')
self.lookup_sp3 = self.factory.lookup.copy()
return
self.factory.parse_lookup('openSUSE:Leap:42.3')
self.factory.parse_lookup('openSUSE:Leap:42.3:NonFree')
self.lookup_423 = self.factory.lookup.copy()
self.factory.reset_lookup()
self.factory.parse_lookup('openSUSE:Leap:42.2:Update')
self.factory.parse_lookup('openSUSE:Leap:42.2:NonFree:Update')
self.lookup_422 = self.factory.lookup.copy()
self.factory.reset_lookup()
self.factory.parse_lookup('openSUSE:Leap:42.1:Update')
self.lookup_421 = self.factory.lookup.copy()
self.factory.reset_lookup()
def get_source_packages(self, project, expand=False):
"""Return the list of packages in a project."""
query = {'expand': 1} if expand else {}
root = ET.parse(osc.core.http_GET(osc.core.makeurl(self.apiurl,['source', project],
query=query))).getroot()
packages = [i.get('name') for i in root.findall('entry')]
return packages
def is_package_in_project(self, project, package):
if not project in self.packages:
self.packages[project] = self.get_source_packages(project)
return True if package in self.packages[project] else False
def rdiff_link(self, src_project, src_package, src_rev, target_project, target_package = None):
if target_package is None:
target_package = src_package
return '[%(target_project)s/%(target_package)s](/package/rdiff/%(src_project)s/%(src_package)s?opackage=%(target_package)s&oproject=%(target_project)s&rev=%(src_rev)s)'%{
'src_project': src_project,
'src_package': src_package,
'src_rev': src_rev,
'target_project': target_project,
'target_package': target_package,
}
def _check_same_origin(self, origin, project):
if origin == 'FORK':
return True
if origin.startswith('Devel;'):
(dummy, origin, dummy) = origin.split(';')
return project.startswith(origin)
def check_source_submission(self, src_project, src_package, src_rev, target_project, target_package):
super(Leaper, self).check_source_submission(src_project, src_package, src_rev, target_project, target_package)
if src_project == target_project and src_package == target_package:
self.logger.info('self submission detected')
self.needs_release_manager = True
return True
src_srcinfo = self.get_sourceinfo(src_project, src_package, src_rev)
package = target_package
origin = None
if src_srcinfo is None:
# source package does not exist?
# handle here to avoid crashing on the next line
self.logger.warn("Could not get source info for %s/%s@%s" % (src_project, src_package, src_rev))
return False
if self.ibs and target_project.startswith('SUSE:SLE'):
if package in self.lookup_sp3:
origin = self.lookup_sp3[package]
origin_same = True
if origin:
origin_same = self._check_same_origin(origin, src_project)
self.logger.info("expected origin is '%s' (%s)", origin,
"unchanged" if origin_same else "changed")
prj = 'openSUSE.org:openSUSE:Factory'
# True or None (open request) are acceptable for SLE.
self.source_in_factory = self._check_factory(package, src_srcinfo, prj)
if self.source_in_factory is None:
self.pending_factory_submission = True
if self.source_in_factory is not False:
return self.source_in_factory
# got false. could mean package doesn't exist or no match
if self.is_package_in_project(prj, package):
self.logger.info('different sources in {}'.format(self.rdiff_link(src_project, src_package, src_rev, prj, package)))
prj = 'openSUSE.org:openSUSE:Leap:42.2'
if self.is_package_in_project(prj, package):
if self._check_factory(package, src_srcinfo, prj) is True:
self.logger.info('found source match in {}'.format(prj))
else:
self.logger.info('different sources in {}'.format(self.rdiff_link(src_project, src_package, src_rev, prj, package)))
devel_project, devel_package = self.get_devel_project('openSUSE.org:openSUSE:Factory', package)
if devel_project is not None:
# specifying devel package is optional
if devel_package is None:
devel_package = package
if self.is_package_in_project(devel_project, devel_package):
if self.factory._check_project(devel_project, devel_package, src_srcinfo.verifymd5) == True:
self.logger.info('matching sources in {}/{}'.format(devel_project, devel_package))
return True
else:
self.logger.info('different sources in {}'.format(self.rdiff_link(src_project, src_package, src_rev, devel_project, devel_package)))
else:
self.logger.info('no devel project found for {}/{}'.format('openSUSE.org:openSUSE:Factory', package))
self.logger.info('no matching sources in Factory, Leap:42.2, nor devel project')
return origin_same
if package in self.lookup_423:
origin = self.lookup_423[package]
is_fine_if_factory = False
not_in_factory_okish = False
if origin:
origin_same = self._check_same_origin(origin, src_project)
self.logger.info("expected origin is '%s' (%s)", origin,
"unchanged" if origin_same else "changed")
if origin.startswith('Devel;'):
if origin_same == False:
self.logger.debug("not submitted from devel project")
return False
is_fine_if_factory = True
not_in_factory_okish = True
if self.must_approve_version_updates:
self.needs_release_manager = True
# fall through to check history and requests
elif origin.startswith('openSUSE:Factory'):
# A large number of requests are created by hand that leaper
# would have created via update_crawler.py. This applies to
# other origins, but primary looking to let Factory submitters
# know that there is no need to make manual submissions to both.
# Since it has a lookup entry it is not a new package.
self.automatic_submission = True
if self.must_approve_version_updates:
self.needs_release_manager = True
if origin == src_project:
self.source_in_factory = True
return True
is_fine_if_factory = True
# fall through to check history and requests
elif origin == 'FORK':
is_fine_if_factory = True
not_in_factory_okish = True
self.needs_release_manager = True
self.needs_check_source = True
# fall through to check history and requests
elif origin.startswith('openSUSE:Leap:42.2'):
if self.must_approve_maintenance_updates:
self.needs_release_manager = True
# submitted from :Update
if origin_same:
self.logger.debug("submission from 42.2 ok")
return True
# switching to sle package might make sense
if src_project.startswith('SUSE:SLE-12'):
self.needs_release_manager = True
return True
# submitted from elsewhere but is in :Update
else:
good = self.factory._check_project('openSUSE:Leap:42.2:Update', target_package, src_srcinfo.verifymd5)
if good:
self.logger.info("submission found in 42.2")
return good
# check release requests too
good = self.factory._check_requests('openSUSE:Leap:42.2:Update', target_package, src_srcinfo.verifymd5)
if good or good == None:
self.logger.debug("found request")
return good
# let's see where it came from before
if package in self.lookup_422:
oldorigin = self.lookup_422[package]
self.logger.debug("oldorigin {}".format(oldorigin))
# Factory. So it's ok to keep upgrading it to Factory
# TODO: whitelist packages where this is ok and block others?
self.logger.info("Package was from %s in 42.2", oldorigin)
if oldorigin.startswith('openSUSE:Factory'):
# check if an attempt to switch to SLE package is made
for sp in ('SP2:GA', 'SP2:Update', 'SP3:GA'):
good = self.factory._check_project('SUSE:SLE-12-{}'.format(sp), target_package, src_srcinfo.verifymd5)
if good:
self.logger.info("request sources come from SLE")
self.needs_release_manager = True
return good
elif oldorigin.startswith('openSUSE:Leap:42.1'):
o = self.lookup_421[package]
self.logger.info("Package was from %s in 42.1", o)
# the release manager needs to review attempts to upgrade to Factory
is_fine_if_factory = True
self.needs_release_manager = True
elif origin.startswith('SUSE:SLE-12'):
if self.must_approve_maintenance_updates:
self.needs_release_manager = True
for v in ('42.3', '42.2'):
prj = 'openSUSE:Leap:{}:SLE-workarounds'.format(v)
if self.is_package_in_project( prj, target_package):
self.logger.info("found package in %s", prj)
if not self.factory._check_project(prj,
target_package,
src_srcinfo.verifymd5):
self.logger.info("sources in %s are NOT identical", prj)
self.needs_release_manager = True
# submitted from :Update
if origin == src_project:
self.logger.debug("submission origin ok")
return True
elif origin.endswith(':GA') \
and src_project == origin[:-2]+'Update':
self.logger.debug("sle update submission")
return True
# check if submitted from higher SP
priolist = ['SUSE:SLE-12:', 'SUSE:SLE-12-SP1:', 'SUSE:SLE-12-SP2:', 'SUSE:SLE-12-SP3:']
for i in range(len(priolist)-1):
if origin.startswith(priolist[i]):
for prj in priolist[i+1:]:
if src_project.startswith(prj):
self.logger.info("submission from higher service pack %s:* ok", prj)
return True
self.needs_release_manager = True
# the release manager needs to review attempts to upgrade to Factory
is_fine_if_factory = True
else:
self.logger.error("unhandled origin %s", origin)
return False
else: # no origin
# submission from SLE is ok
if src_project.startswith('SUSE:SLE-12'):
return True
is_fine_if_factory = True
self.needs_release_manager = True
if origin is None or not origin.startswith('SUSE:SLE-'):
for p in ('-SP3:GA', '-SP2:Update', '-SP2:GA',
'-SP1:Update', '-SP1:GA', ':Update', ':GA'):
prj = 'SUSE:SLE-12' + p
if self.is_package_in_project(prj, target_package):
self.logger.info('Package is in {}'.format(prj))
break
# we came here because none of the above checks find it good, so
# let's see if the package is in Factory at least
is_in_factory = self._check_factory(target_package, src_srcinfo)
if is_in_factory:
self.source_in_factory = True
self.needs_reviewteam = False
elif is_in_factory is None:
self.pending_factory_submission = True
self.needs_reviewteam = False
else:
if src_project.startswith('SUSE:SLE-12') \
or src_project.startswith('openSUSE:Leap:42.'):
self.needs_reviewteam = False
else:
self.needs_reviewteam = True
self.source_in_factory = False
if is_fine_if_factory:
if self.source_in_factory:
return True
elif self.pending_factory_submission:
return None
elif not_in_factory_okish:
self.needs_reviewteam = True
return True
return False
def _check_factory(self, target_package, src_srcinfo, target_project='openSUSE:Factory'):
| |
<gh_stars>10-100
'''
MIT License
Copyright (c) 2017-2018 Cree-Py
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import discord
from discord.ext import commands
from bs4 import BeautifulSoup
from ext.paginator import PaginatorSession
import aiohttp
import datetime
import json
import pytz
class Brawl_Stars:
'''Brawl Stars stats.'''
def __init__(self, bot):
self.bot = bot
def emoji(self, emoji):
with open('data/emojis.json') as f:
emojis = json.load(f)
e = emojis[emoji]
return e
async def get_tag(self, userid):
result = await self.bot.db.brawlstars.find_one({'_id': userid})
if not result:
return 'None'
return result['tag']
async def save_tag(self, userid, tag):
await self.bot.db.brawlstars.update_one({'_id': userid}, {'$set': {'_id': userid, 'tag': tag}}, upsert=True)
def check_tag(self, tag):
for char in tag:
if char.upper() not in '0289PYLQGRJCUV':
return False
return True
@commands.command()
async def bsprofile(self, ctx, id=None):
'''Get a brawl stars profile.'''
# ID is the player tag
await ctx.trigger_typing()
def get_attr(type: str, attr: str):
return soup.find(type, class_=attr).text
def get_all_attrs(type: str, attr: str):
return soup.find_all(type, class_=attr)
if not id:
id = await self.get_tag(str(ctx.message.author.id))
id = id.strip('#').replace('O', '0')
if id == 'None':
return await ctx.send(f"Please save your player tag using `{ctx.prefix}bssave <tag>`")
else:
id = await self.get_tag(str(ctx.author.id))
if self.check_tag(id):
try:
async with aiohttp.ClientSession() as session:
async with session.get(f'https://brawlstats.io/players/{id}') as resp:
data = await resp.read()
soup = BeautifulSoup(data, 'lxml')
except Exception as e:
await ctx.send(f'`{e}`')
else:
success = True
else:
return await ctx.send("You have an invalid tag.")
else:
id = id.strip('#').replace('O', '0')
if self.check_tag(id):
try:
async with aiohttp.ClientSession() as session:
async with session.get(f'https://brawlstats.io/players/{id}') as resp:
data = await resp.read()
soup = BeautifulSoup(data, 'lxml')
except Exception as e:
await ctx.send(e)
else:
success = True
else:
await ctx.send("Invalid tag. Tags can only contain the following characters: `0289PYLQGRJCUV`")
if success:
source = str(soup.find_all("img", class_="mr-2"))
src = source.split('src="')[1]
imgpath = src.split('" w')[0]
brawlers = get_all_attrs("div", "brawlers-brawler-slot d-inline-block")
top = str(brawlers[0])
name_after = top.split('brawlers/')[1]
highestbrawler = name_after.split('"')[0].title()
em = discord.Embed(color=discord.Color.green())
em.set_thumbnail(url=f'https://brawlstats.io{imgpath}')
em.title = f"{get_attr('div', 'player-name brawlstars-font')} (#{id})"
em.description = f"Band: {get_attr('div', 'band-name mr-2')} ({get_attr('div', 'band-tag')})"
em.add_field(name="Level", value=get_attr('div', 'experience-level'))
em.add_field(name="Experience", value=get_attr('div', 'progress-text'))
em.add_field(name="Trophies", value=get_all_attrs('div', 'trophies')[0].text)
em.add_field(name="Highest Trophies", value=get_all_attrs('div', 'trophies')[1].text)
em.add_field(name="Highest Brawler", value=highestbrawler)
em.add_field(name="Highest Brawler Trophies", value=get_all_attrs('div', 'trophies')[2].text)
em.add_field(name="Victories", value=get_attr('div', 'victories'))
em.add_field(name="Showdown Victories", value=get_attr('div', 'showdown-victories'))
em.add_field(name="Best time as boss", value=get_attr('div', 'boss-time'))
em.add_field(name="Best robo rumble time", value=get_attr('div', 'robo-time'))
em.set_footer(text='Stats made by Cree-Py | Powered by brawlstats',
icon_url='http://brawlstats.io/images/bs-stats.png')
await ctx.send(embed=em)
@commands.command()
async def bssave(self, ctx, id=None):
'''Save a tag.'''
if not id:
await ctx.send("Please specify a tag to save.")
else:
id = id.strip('#').replace('O', '0')
if self.check_tag(id):
await self.save_tag(str(ctx.author.id), id)
await ctx.send(f'Your tag (#{id}) has been successfully saved.')
else:
await ctx.send("Your tag is invalid. Please make sure you only have the characters `0289PYLQGRJCUV` in the tag.")
@commands.command()
async def bsweburl(self, ctx, id=None):
'''Get the url to your brawl stars profile'''
await ctx.trigger_typing()
em = discord.Embed(title='brawlstats.io URL')
em.color = discord.Color.green()
if id is None:
if await self.get_tag(str(ctx.author.id)) == 'None':
return await ctx.send(f'No tag found. Please use `{ctx.prefix}brawlstars save <tag>` to save a tag to your discord profile.')
id = await self.get_tag(str(ctx.author.id))
else:
if not self.check_tag(id.strip('#').replace('O', '0')):
return await ctx.send('Invalid Tag. Please make sure your tag is correct.')
id = id.strip('#').replace('O', '0')
em.url = f'http://brawlstats.io/players/{id}'
em.title = ctx.author.name
em.add_field(name='URL', value=f'http://brawlstats.io/players/{id}')
em.set_footer(text='Stats made by Cree-Py | Powered by brawlstats',
icon_url='http://brawlstats.io/images/bs-stats.png')
await ctx.send(embed=em)
@commands.command()
async def bsband(self, ctx, id=None):
'''Get a brawl stars band's stats'''
def get_attr(type: str, attr: str):
return soup.find(type, class_=attr).text
def get_all_attrs(type: str, attr: str):
return soup.find_all(type, class_=attr)
await ctx.trigger_typing()
if not id:
id = await self.get_tag(str(ctx.message.author.id))
id = id.strip('#').replace('O', '0')
if id == 'None':
return await ctx.send(f'Please save your player tag using `{ctx.prefix}bs save <tag>`')
else:
id = await self.get_tag(str(ctx.author.id))
if self.check_tag(id):
# get player stats
try:
async with aiohttp.ClientSession() as session:
async with session.get(f'https://brawlstats.io/players/{id}') as resp:
data = await resp.read()
soup = BeautifulSoup(data, 'lxml')
except Exception as e:
await ctx.send(f'`{e}`')
bandtag = get_attr('div', 'band-tag').strip("#")
try:
async with aiohttp.ClientSession() as session:
async with session.get(f'https://brawlstats.io/bands/{bandtag}') as resp:
data = await resp.read()
soup = BeautifulSoup(data, 'lxml')
except Exception as e:
await ctx.send(f'`{e}`')
else:
success = True
else:
return await ctx.send("You have an invalid tag.")
else:
id = id.strip('#').replace('O', '0')
if self.check_tag(id):
bandtag = id.strip('#')
try:
async with aiohttp.ClientSession() as session:
async with session.get(f'https://brawlstats.io/bands/{bandtag}') as resp:
data = await resp.read()
soup = BeautifulSoup(data, 'lxml')
except Exception as e:
await ctx.send(f'`{e}`')
else:
success = True
else:
await ctx.send("Invalid tag. Tags can only contain the following characters: `0289PYLQGRJCUV`")
if success:
pages = []
name = str(get_attr('div', 'name'))
desc = str(get_attr('div', 'clan-description'))
trophies = get_all_attrs('div', 'trophies')[0].text
required = get_all_attrs('div', 'trophies')[1].text
n = 1
r = 0
t = 0
info = []
for i in range(4):
player = {}
player['name'] = get_all_attrs('div', 'name')[n].text
player['role'] = get_all_attrs('div', 'clan')[r].text
player['trophies'] = get_all_attrs('div', 'trophy-count')[t].text
info.append(player)
n += 1
r += 1
t += 1
source = str(get_all_attrs('div', 'badge'))
src = source.split('src="')[1]
url = src.split('" w')[0]
imgpath = url.split('"')[0]
em = discord.Embed(color=discord.Color.green())
em.title = f'{name} (#{bandtag})'
em.description = desc
em.set_thumbnail(url=f'https://brawlstats.io{imgpath}')
em.add_field(name="Total trophies", value=trophies)
em.add_field(name="Required trophies", value=required)
em.set_footer(icon_url='http://brawlstats.io/images/bs-stats.png')
pages.append(em)
em = discord.Embed(color=discord.Color.green())
em.title = "Top members"
em.description = "This is calculated through total trophy count."
em.set_thumbnail(url=f'https://brawlstats.io{imgpath}')
em.set_footer(icon_url='http://brawlstats.io/images/bs-stats.png')
for entry in info:
em.add_field(name=entry['name'], value=f"{entry['role'].replace(' ', '-')}\n{entry['trophies']}")
pages.append(em)
p_session = PaginatorSession(ctx, footer=f'Stats made by Cree-Py | Powered by brawlstats', pages=pages)
await p_session.run()
@commands.command()
async def bsevents(self, ctx, when=None):
'''Information about events.'''
url = 'https://brawlstats.io/events/'
def get_attr(type: str, attr: str):
return soup.find(type, class_=attr).text
def get_all_attrs(type: str, attr: str):
return soup.find_all(type, class_=attr)
now = datetime.datetime.now(pytz.UTC)
now = now.astimezone(pytz.timezone("US/Pacific"))
dayofwk = int(now.weekday()) + 1
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
data = await resp.read()
soup = BeautifulSoup(data, 'lxml')
if when not in ('current', 'upcoming', 'both'):
return await ctx.send(f'Usage: `{ctx.prefix}bsevents <current|upcoming|both>`')
if when == "current":
await ctx.trigger_typing()
em = discord.Embed(color=discord.Color.green())
em.set_footer(text='Stats made by Cree-Py | Powered by brawlstats', icon_url='http://brawlstats.io/images/bs-stats.png')
if dayofwk in [1, 2, 3, 4, 5]:
em.title = "Current events"
j = 0
for i in range(3):
val = str(get_all_attrs('h6', 'card-subtitle mb-2 text-muted')[i].text) + '\n'
val += str(get_all_attrs('div', 'card-map-coins')[j].text)
val += ' Coins\n'
j += 1
val += str(get_all_attrs('div', 'card-map-coins')[j].text)
val += ' Coins'
j += 1
em.add_field(name=str(get_all_attrs('h4', 'card-title')[i].text), value=val)
await ctx.send(embed=em)
else:
em = discord.Embed(color=discord.Color.green())
em.set_footer(text='Stats made by Cree-Py | Powered by brawlstats', icon_url='http://brawlstats.io/images/bs-stats.png')
em.title = "Current events"
j = 0
for i in range(3):
val = str(get_all_attrs('h6', 'card-subtitle mb-2 text-muted')[i].text) + '\n'
val += str(get_all_attrs('div', 'card-map-coins')[j].text)
val += ' Coins\n'
j += 1
val += str(get_all_attrs('div', 'card-map-coins')[j].text)
val += ' Coins'
j += 1
em.add_field(name=str(get_all_attrs('h4', 'card-title')[i].text), value=val)
em.add_field(name=str(get_all_attrs('h4', 'card-title')[3].text), value=str(get_all_attrs('h6', 'card-subtitle mb-2 text-muted')[3].text) + '\n' + str(get_attr('div', 'card-map-tickets')) + ' Tickets')
await ctx.send(embed=em)
elif when == "upcoming":
await ctx.trigger_typing()
em = discord.Embed(color=discord.Color.green())
em.set_footer(text='Stats made by Cree-Py | Powered by brawlstats', icon_url='http://brawlstats.io/images/bs-stats.png')
j = 6
if dayofwk in [1, 2, 3, 4, 5]:
em.title = "Upcoming events"
for i in range(3):
val = str(get_all_attrs('h6', 'card-subtitle mb-2 text-muted')[i + 3].text)
val += '\n'
val += str(get_all_attrs('div', 'card-map-coins')[j].text)
val += ' Coins\n'
j += 1
val += str(get_all_attrs('div', 'card-map-coins')[j].text)
val += ' Coins'
j += 1
em.add_field(name=str(get_all_attrs('h4', 'card-title')[i + 3].text), value=val)
em.add_field(name=str(get_all_attrs('h4', 'card-title')[6].text), value=str(get_all_attrs('h6', 'card-subtitle mb-2 text-muted')[6].text) + '\n' + str(get_attr('div', 'card-map-tickets')) + ' Tickets')
await ctx.send(embed=em)
else:
em.title = "Upcoming events"
for i in range(3):
val = str(get_all_attrs('h6', 'card-subtitle mb-2 text-muted')[i + 4].text)
val += '\n'
val += | |
information in the database
self.mainMenu.agents.set_agent_field_db("lost_limit", lostLimit, self.sessionID)
# task the agent with the new lostLimit
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
"global lostLimit; lostLimit=%s; print('lostLimit set to %s')" % (
lostLimit, lostLimit))
# dispatch this event
message = "[*] Tasked agent to change lost limit {}".format(lostLimit)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to change lost limit " + str(lostLimit)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_killdate(self, line):
"Get or set an agent's killdate (01/01/2016)."
parts = line.strip().split(' ')
killDate = parts[0]
if killDate == "":
# task the agent to display the killdate
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
"global killDate; print('killDate = ' + str(killDate))")
# dispatch this event
message = "[*] Tasked agent to display killDate"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to display killDate")
else:
# update this agent's information in the database
self.mainMenu.agents.set_agent_field_db("kill_date", killDate, self.sessionID)
# task the agent with the new killDate
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
"global killDate; killDate='%s'; print('killDate set to %s')" % (
killDate, killDate))
# dispatch this event
message = "[*] Tasked agent to set killDate to {}".format(killDate)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to set killdate to %s" % (killDate)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_workinghours(self, line):
"Get or set an agent's working hours (9:00-17:00)."
parts = line.strip().split(' ')
hours = parts[0]
if hours == "":
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
"global workingHours; print('workingHours = ' + str(workingHours))")
# dispatch this event
message = "[*] Tasked agent to get working hours"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to get working hours")
else:
# update this agent's information in the database
self.mainMenu.agents.set_agent_field_db("working_hours", hours, self.sessionID)
# task the agent with the new working hours
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT",
"global workingHours; workingHours= '%s'" % (hours))
# dispatch this event
message = "[*] Tasked agent to set working hours to {}".format(hours)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to set working hours to: %s" % (hours)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_shell(self, line):
"Task an agent to use a shell command."
line = line.strip()
if line != "":
# task the agent with this shell command
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SHELL", str(line))
# dispatch this event
message = "[*] Tasked agent to run shell command: {}".format(line)
signal = json.dumps({
'print': False,
'message': message,
'command': line
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to run shell command: %s" % (line)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_python(self, line):
"Task an agent to run a Python command."
line = line.strip()
if line != "":
# task the agent with this shell command
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT", str(line))
# dispatch this event
message = "[*] Tasked agent to run Python command: {}".format(line)
signal = json.dumps({
'print': False,
'message': message,
'command': line
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to run Python command: %s" % (line)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_pythonscript(self, line):
"Load and execute a python script"
path = line.strip()
if os.path.splitext(path)[-1] == '.py' and os.path.isfile(path):
filename = os.path.basename(path).rstrip('.py')
open_file = open(path, 'r')
script = open_file.read()
open_file.close()
script = script.replace('\r\n', '\n')
script = script.replace('\r', '\n')
encScript = base64.b64encode(script)
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SCRIPT_COMMAND", encScript)
# dispatch this event
message = "[*] Tasked agent to execute Python script: {}".format(filename)
signal = json.dumps({
'print': True,
'message': message,
'script_name': filename,
# note md5 is after replacements done on \r and \r\n above
'script_md5': hashlib.md5(script).hexdigest()
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "[*] Tasked agent to execute python script: " + filename
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
else:
print(helpers.color("[!] Please provide a valid path", color="red"))
def do_sysinfo(self, line):
"Task an agent to get system information."
# task the agent with this shell command
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_SYSINFO")
# dispatch this event
message = "[*] Tasked agent to get system information"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
self.mainMenu.agents.save_agent_log(self.sessionID, "Tasked agent to get system information")
def do_download(self, line):
"Task an agent to download a file into the C2."
line = line.strip()
if line != "":
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_DOWNLOAD", line)
# dispatch this event
message = "[*] Tasked agent to download: {}".format(line)
signal = json.dumps({
'print': False,
'message': message,
'download_filename': line
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to download: %s" % (line)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_upload(self, line):
"Task the C2 to upload a file into an agent."
# "upload /path/file.ext" or "upload /path/file/file.ext newfile.ext"
# absolute paths accepted
parts = line.strip().split(' ')
uploadname = ""
if len(parts) > 0 and parts[0] != "":
if len(parts) == 1:
# if we're uploading the file with its original name
uploadname = os.path.basename(parts[0])
else:
# if we're uploading the file as a different name
uploadname = parts[1].strip()
if parts[0] != "" and os.path.exists(parts[0]):
# TODO: reimplement Python file upload
# # read in the file and base64 encode it for transport
f = open(parts[0], 'rb')
fileData = f.read()
f.close()
# Get file size
size = os.path.getsize(parts[0])
if size > 1048576:
print(helpers.color("[!] File size is too large. Upload limit is 1MB."))
else:
print(helpers.color(
"[*] Original tasked size of %s for upload: %s" % (uploadname, helpers.get_file_size(fileData)),
color="green"))
original_md5 = hashlib.md5(fileData).hexdigest()
# update the agent log with the filename and MD5
msg = "Tasked agent to upload " + parts[0] + " : " + original_md5
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
# compress data before we base64
c = compress.compress()
start_crc32 = c.crc32_data(fileData)
comp_data = c.comp_data(fileData, 9)
fileData = c.build_header(comp_data, start_crc32)
# get final file size
fileData = helpers.encode_base64(fileData)
# upload packets -> "filename | script data"
if isinstance(fileData, bytes):
fileData = fileData.decode("utf-8")
data = uploadname + "|" + fileData
# dispatch this event
message = "[*] Starting upload of {}, final size {}".format(uploadname,
helpers.get_file_size(fileData))
signal = json.dumps({
'print': True,
'message': message,
'upload_name': uploadname,
'upload_md5': original_md5,
'upload_size': helpers.get_file_size(fileData)
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_UPLOAD", data)
else:
print(helpers.color("[!] Please enter a valid file path to upload"))
def do_usemodule(self, line):
"Use an Empire Python module."
# Strip asterisks added by MainMenu.complete_usemodule()
module = "python/%s" % (line.strip().rstrip("*"))
if module not in self.mainMenu.modules.modules:
print(helpers.color("[!] Error: invalid module"))
else:
module_menu = ModuleMenu(self.mainMenu, module, agent=self.sessionID)
module_menu.cmdloop()
def do_searchmodule(self, line):
"Search Empire module names/descriptions."
searchTerm = line.strip()
if searchTerm.strip() == "":
print(helpers.color("[!] Please enter a search term."))
else:
self.mainMenu.modules.search_modules(searchTerm)
def do_osx_screenshot(self, line):
"Use the python-mss module to take a screenshot, and save the image to the server. Not opsec safe"
if self.mainMenu.modules.modules['python/collection/osx/native_screenshot']:
module = self.mainMenu.modules.modules['python/collection/osx/native_screenshot']
module.options['Agent']['Value'] = self.mainMenu.agents.get_agent_name_db(self.sessionID)
# execute screenshot module
msg = "[*] Tasked agent to take a screenshot"
module_menu = ModuleMenu(self.mainMenu, 'python/collection/osx/native_screenshot')
print(helpers.color(msg, color="green"))
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
# dispatch this event
message = "[*] Tasked agent to take a screenshot"
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
module_menu.do_execute("")
else:
print(helpers.color("[!] python/collection/osx/screenshot module not loaded"))
def do_cat(self, line):
"View the contents of a file"
if line != "":
cmd = """
try:
output = ""
with open("%s","r") as f:
for line in f:
output += line
print(output)
except Exception as e:
print(str(e))
""" % (line)
# task the agent with this shell command
self.mainMenu.agents.add_agent_task_db(self.sessionID, "TASK_CMD_WAIT", str(cmd))
# dispatch this event
message = "[*] Tasked agent to cat file: {}".format(line)
signal = json.dumps({
'print': False,
'message': message,
'file_name': line
})
dispatcher.send(signal, sender="agents/{}".format(self.sessionID))
# update the agent log
msg = "Tasked agent to cat file %s" % (line)
self.mainMenu.agents.save_agent_log(self.sessionID, msg)
def do_loadpymodule(self, line):
"Import zip file containing a .py module or package with an __init__.py"
path = line.strip()
# check the file ext and confirm that the path given is a file
if os.path.splitext(path)[-1] == '.zip' and os.path.isfile(path):
# open a handle to the file and save the data to a variable, zlib compress
filename = os.path.basename(path).rstrip('.zip')
open_file = open(path, 'rb')
module_data = open_file.read()
open_file.close()
# dispatch this event
message = "[*] Tasked | |
<reponame>moibenko/enstore<filename>sbin/xfer_stats_monthly.py
#!/usr/bin/env python
###############################################################################
# $Author$
# $Date$
# $Id$
#
# public | xfer_by_day | table | enstore
# public | xfer_by_month | table | enstore
#
###############################################################################
import sys
import os
import string
import time
import math
import pg
import thread
import socket
import configuration_client
import enstore_constants
import histogram
import enstore_files
KB=1024.
MB=KB*KB
GB=KB*MB
TB=KB*GB
PB=KB*TB
SELECT_STMT="select date,sum(read),sum(write) from xfer_by_day where date between %s and %s group by date order by date desc"
SELECT_STMT1="select date,sum(read),sum(write) from xfer_by_day group by date order by date" # was xferby_month
SELECT_DELETED_BYTES ="select to_char(state.time, 'YYYY-MM-DD HH:MM:SS'), sum(file.size)::bigint from file, state where state.volume=file.volume and state.value='DELETED' group by state.time"
SELECT_WRITTEN_BYTES ="select substr(bfid,5,10), size, deleted from file where file.deleted = 'n' and file.volume in (select volume.id from volume where volume.media_type != 'null' and system_inhibit_0 != 'DELETED' ) "
def bfid2time(bfid):
if bfid[-1] == "L":
e = -6
else:
e = -5
if bfid[0].isdigit():
i = 0
elif bfid[3].isdigit():
i = 3
else:
i = 4
t = int(bfid[i:e])
if t > 1500000000:
t = t - 619318800
return t
def showError(msg):
sys.stderr.write("Error: " + msg)
def usage():
print "Usage: %s <file_family> "%(sys.argv[0],)
def decorate(h,color,ylabel,marker):
h.set_time_axis(True)
h.set_ylabel(ylabel);
h.set_xlabel("Date (year-month-day)")
h.set_line_color(color)
h.set_line_width(20)
h.set_marker_text(marker)
h.set_marker_type("impulses")
def get_min_max(h) :
y_max = 0
i_max = 0
y_min = 1.e+32
i_min = 0
for i in range(h.n_bins()) :
if ( h.get_bin_content(i) > y_max ) :
y_max = h.get_bin_content(i)
i_max = i
if ( h.get_bin_content(i) < y_min and h.get_bin_content(i) > 0 ) :
y_min = h.get_bin_content(i)
i_min = i
return y_min,i_min,y_max,i_max
def get_sum(h) :
sum=0.
for i in range(h.n_bins()) :
sum = sum + h.get_bin_content(i)
return sum
exitmutexes=[]
def fill_histograms(i,server_name,server_port,hlist):
config_server_client = configuration_client.ConfigurationClient((server_name, server_port))
acc = config_server_client.get("database", {})
db = pg.DB(host = acc.get('db_host', "localhost"),
dbname= acc.get('dbname', "enstoredb"),
port = acc.get('db_port', 5432),
user = acc.get('dbuser_reader', "enstore_reader"))
h = hlist[i]
res=db.query(SELECT_DELETED_BYTES)
for row in res.getresult():
if not row:
continue
h.fill(time.mktime(time.strptime(row[0],'%Y-%m-%d %H:%M:%S')),row[1]/TB)
db.close()
exitmutexes[i]=1
def fill_tape_histograms(i,server_name,server_port,hlist):
config_server_client = configuration_client.ConfigurationClient((server_name, server_port))
acc = config_server_client.get("database", {})
db = pg.DB(host = acc.get('db_host', "localhost"),
dbname= acc.get('dbname', "enstoredb"),
port = acc.get('db_port', 5432),
user = acc.get('dbuser_reader', "enstore_reader"))
now_time = time.time()
t = time.ctime(time.time())
Y, M, D, h, m, s, wd, jd, dst = time.localtime(now_time)
start_day = time.mktime((2000, 12, 31, 23, 59, 59, 0, 0, 0))
now_day = time.mktime((Y, 12, 31, 23, 59, 59, wd, jd, dst))
nbins = int((now_day-start_day)/(24.*3600.)+0.5)
q="select date,active_bytes,storage_group from historic_tape_bytes order by date asc"
res = db.query(q)
for row in res.getresult():
sg = row[2]
if not hlist.has_key(sg):
hlist[sg]=histogram.Histogram1D("on_tape_by_month_%s"%(sg,),"Total Bytes On Tape by Month By %s"%(sg,),nbins,float(start_day),float(now_day))
h = hlist[sg]
h.set_marker_text(sg)
h.set_line_color(0)
h.fill(time.mktime(time.strptime(row[0],'%Y-%m-%d %H:%M:%S')),row[1]/TB)
h.set_marker_type("impulses")
h.set_time_axis(True)
h.set_line_width(8)
db.close()
def plot_bpd():
#
# this function creates plots of bytes transferred per day and per month
# based on data on accounting database (*ensrv6)
#
intf = configuration_client.ConfigurationClientInterface(user_mode=0)
csc = configuration_client.ConfigurationClient((intf.config_host,
intf.config_port))
if ( 0 ) :
acc = csc.get(enstore_constants.ACCOUNTING_SERVER)
inq = csc.get('inquisitor')
inq_host=inq.get('www_host').split('/')[2]
servers=[]
servers=[]
servers=csc.get('known_config_servers')
histograms=[]
now_time = time.time()
t = time.ctime(time.time())
Y, M, D, h, m, s, wd, jd, dst = time.localtime(now_time)
start_day = time.mktime((2001, 12, 31, 23, 59, 59, 0, 0, 0))
now_day = time.mktime((Y+1, 12, 31, 23, 59, 59, wd, jd, dst))
nbins = int((now_day-start_day)/(30.*24.*3600.)+0.5)
color=1
s = histogram.Histogram1D("xfers_total_by_month",
"Total Bytes Transferred per Month By Enstore",
nbins,float(start_day),float(now_day))
s.set_time_axis(True)
plotter=histogram.Plotter("xfers_total_by_month",
"Total TBytes Transferred per Month By Enstore")
s_i = histogram.Histogram1D("integrated_xfers_total_by_month",
"Integarted total Bytes transferred per Month By Enstore",
nbins,float(start_day),float(now_day))
s_i.set_time_axis(True)
iplotter=histogram.Plotter("integrated_xfers_total_by_month",
"Integarted total Bytes transferred per Month By Enstore")
s1 = histogram.Histogram1D("writes_total_by_month",
"Total bytes written per month to Enstore",
nbins,float(start_day),float(now_day))
s1.set_time_axis(True)
plotter1=histogram.Plotter("writes_total_by_month",
"Total TBytes written per month by Enstore")
s1_i = histogram.Histogram1D("writes_total_by_month",
"Integrated Total bytes written per month to Enstore",
nbins,float(start_day),float(now_day))
s1_i.set_time_axis(True)
iplotter1=histogram.Plotter("integrated_writes_total_by_month",
"Integrated Total TBytes written per month by Enstore")
w_month=0.
r_month=0.
t_month=0.
n_month=0;
for server in servers:
server_name,server_port = servers.get(server)
if ( server_port != None ):
config_server_client = configuration_client.ConfigurationClient((server_name, server_port))
acc = config_server_client.get(enstore_constants.ACCOUNTING_SERVER)
db_server_name = acc.get('dbhost','localhost')
name = db_server_name.split('.')[0]
h = histogram.Histogram1D("xfers_total_by_month_%s"%(name,),
"Total Bytes Transferred per Month By %s"
%(server,),
nbins,float(start_day),float(now_day))
h.set_time_axis(True)
h.set_ylabel("Bytes");
h.set_xlabel("Date (year-month-day)")
h.set_line_color(color)
h.set_line_width(5)
h1 = histogram.Histogram1D("writes_by_month_%s"%(server,),
"Total Bytes Written by Month By %s"
%(server,),
nbins,float(start_day),float(now_day))
decorate(h1,color,"TiB/month",server)
histograms.append(h1)
color=color+1
db = pg.DB(host = acc.get('dbhost', 'localhost'),
dbname= acc.get('dbname', 'accounting'),
port = acc.get('dbport', 5432),
user = acc.get('dbuser_reader', 'enstore_reader'))
res=db.query(SELECT_STMT1)
for row in res.getresult():
if not row:
continue
h.fill(time.mktime(time.strptime(row[0],'%Y-%m-%d')),
(row[1]+row[2])/TB)
h1.fill(time.mktime(time.strptime(row[0],'%Y-%m-%d')),row[2]/TB)
db.close()
tmp=s+h
tmp.set_name("xfer_%s"%(server,))
tmp.set_data_file_name(server)
tmp.set_marker_text(server)
tmp.set_time_axis(True)
tmp.set_ylabel("TiB/month")
tmp.set_marker_type("impulses")
tmp.set_line_color(color)
tmp.set_line_width(5)
plotter.add(tmp)
s=tmp
integral = h.integral()
integral.set_marker_text(server)
integral.set_marker_type("impulses")
integral.set_ylabel("TiB");
tmp=s_i+integral
tmp.set_name("integrated_xfers_monthly_%s"%(integral.get_marker_text(),))
tmp.set_data_file_name("integrated_xfers_monthly_%s"%
(integral.get_marker_text(),))
tmp.set_marker_text(integral.get_marker_text())
tmp.set_time_axis(True)
tmp.set_ylabel(integral.get_ylabel())
tmp.set_marker_type(integral.get_marker_type())
tmp.set_line_color(color)
tmp.set_line_width(5)
iplotter.add(tmp)
s_i=tmp
tmp=s1+h1
tmp.set_name("deletes_monthly_%s"%(h1.get_marker_text(),))
tmp.set_data_file_name("deletes_monthly_%s"%(h1.get_marker_text(),))
tmp.set_marker_text(h1.get_marker_text())
tmp.set_time_axis(True)
tmp.set_ylabel(h1.get_ylabel())
tmp.set_marker_type(h1.get_marker_type())
tmp.set_line_color(color)
tmp.set_line_width(5)
plotter1.add(tmp)
s1=tmp
integral1 = h1.integral()
integral1.set_marker_text(h1.get_marker_text())
integral1.set_marker_type("impulses")
integral1.set_ylabel("TiB");
tmp=s1_i+integral1
tmp.set_name("integrated_deletes_monthly_%s"%(h1.get_marker_text(),))
tmp.set_data_file_name("integrated_deletes_monthly_%s"%
(h1.get_marker_text(),))
tmp.set_marker_text(h1.get_marker_text())
tmp.set_time_axis(True)
tmp.set_ylabel(h1.get_ylabel())
tmp.set_marker_type(h1.get_marker_type())
tmp.set_line_color(color)
tmp.set_line_width(5)
iplotter1.add(tmp)
s1_i=tmp
plotter.reshuffle()
tmp=plotter.get_histogram_list()[0]
t_month_min,i_month_min,t_month_max,i_month_max = get_min_max(tmp)
t_month = get_sum(tmp)
tmp.set_line_color(1)
delta = tmp.binarray[i_month_max]*0.05
tmp.add_text("set label \"%10d\" at \"%s\",%f right rotate font \"Helvetica,12\"\n"%(tmp.binarray[i_month_max]+0.5,
time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(tmp.get_bin_center(i_month_max))),
tmp.binarray[i_month_max]+delta,))
tmp.add_text("set label \"Total : %5d TiB \" at graph .05,.9 font \"Helvetica,13\"\n"%(t_month+0.5,))
tmp.add_text("set label \"Max : %5d TiB (on %s) \" at graph .05,.85 font \"Helvetica,13\"\n"%(t_month_max+0.5,
time.strftime("%Y-%m",time.localtime(tmp.get_bin_center(i_month_max))),))
plotter.plot()
iplotter.reshuffle()
tmp=iplotter.get_histogram_list()[0]
t_month_min,i_month_min,t_month_max,i_month_max = get_min_max(tmp)
tmp.add_text("set label \"Total Transferred : %5d TiB \" at graph .1,.8 font \"Helvetica,13\"\n"%(t_month_max+0.5,))
tmp.set_line_color(1)
tmp.set_marker_type("impulses")
iplotter.plot()
plotter1.reshuffle()
tmp=plotter1.get_histogram_list()[0]
t_month_min,i_month_min,t_month_max,i_month_max = get_min_max(tmp)
t_month = get_sum(tmp)
tmp.set_line_color(1)
delta = tmp.binarray[i_month_max]*0.05
tmp.add_text("set label \"%10d\" at \"%s\",%f right rotate font \"Helvetica,12\"\n"%(tmp.binarray[i_month_max]+0.5,
time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(tmp.get_bin_center(i_month_max))),
tmp.binarray[i_month_max]+delta,))
tmp.add_text("set label \"Total : %5d TiB \" at graph .05,.9 font \"Helvetica,13\"\n"%(t_month+0.5,))
tmp.add_text("set label \"Max : %5d TiB (on %s) \" at graph .05,.85 font \"Helvetica,13\"\n"%(t_month_max+0.5,
time.strftime("%Y-%m",time.localtime(tmp.get_bin_center(i_month_max))),))
plotter1.plot()
iplotter1.reshuffle()
tmp=iplotter1.get_histogram_list()[0]
t_month_min,i_month_min,t_month_max,i_month_max = get_min_max(tmp)
tmp.add_text("set label \"Total Written : %5d TiB \" at graph .1,.8 font \"Helvetica,13\"\n"%(t_month_max+0.5,))
tmp.set_line_color(1)
tmp.set_marker_type("impulses")
iplotter1.plot()
def plot_bytes():
#
# This function plots bytes written/deleted to/from Enstore base on data in file and volume tables
# from *ensrv0 postgres databases damn slow
#
intf = configuration_client.ConfigurationClientInterface(user_mode=0)
csc = configuration_client.ConfigurationClient((intf.config_host, intf.config_port))
servers=[]
servers=[]
servers=csc.get('known_config_servers')
histograms=[]
now_time = time.time()
t = time.ctime(time.time())
Y, M, D, h, m, s, wd, jd, dst = time.localtime(now_time)
start_day = time.mktime((2001, 12, 31, 23, 59, 59, 0, 0, 0))
now_day = time.mktime((Y+1, 12, 31, 23, 59, 59, wd, jd, dst))
nbins = int((now_day-start_day)/(30.*24.*3600.)+0.5)
s1 = histogram.Histogram1D("deletes_total_by_month","Total bytes deleted per month from Enstore",nbins,float(start_day),float(now_day))
s1.set_time_axis(True)
plotter1=histogram.Plotter("deletes_total_by_month","Total TiB deleted per month from Enstore")
s1_i = histogram.Histogram1D("deletes_total_by_month","Integrated Total bytes deleted per month from Enstore",nbins,float(start_day),float(now_day))
s1_i.set_time_axis(True)
iplotter1=histogram.Plotter("integrated_deletes_total_by_month","Integrated Total TiB deleted per month from Enstore")
i = 0
color=1
for server in servers:
server_name,server_port = servers.get(server)
if ( server_port != None ):
h = histogram.Histogram1D("deletes_by_month_%s"%(server,),"Total Bytes Deleted by Month By %s"%(server,),nbins,float(start_day),float(now_day))
decorate(h,color,"TiB/month",server)
histograms.append(h)
exitmutexes.append(0)
thread.start_new(fill_histograms, (i,server_name,server_port,histograms))
i=i+1
color=color+1
while 0 in exitmutexes:
time.sleep(60)
pass
i = 0
for i in range(len(histograms)):
h1 = histograms[i]
color = i + 2
tmp=s1+h1
tmp.set_name("deletes_monthly_%s"%(h1.get_marker_text(),))
tmp.set_data_file_name("deletes_monthly_%s"%(h1.get_marker_text(),))
tmp.set_marker_text(h1.get_marker_text())
tmp.set_time_axis(True)
tmp.set_ylabel(h1.get_ylabel())
tmp.set_marker_type(h1.get_marker_type())
tmp.set_line_color(color)
tmp.set_line_width(5)
plotter1.add(tmp)
s1=tmp
integral1 = h1.integral()
integral1.set_marker_text(h1.get_marker_text())
integral1.set_marker_type("impulses")
integral1.set_ylabel("TiB");
tmp=s1_i+integral1
tmp.set_name("integrated_deletes_monthly_%s"%(h1.get_marker_text(),))
tmp.set_data_file_name("integrated_deletes_monthly_%s"%(h1.get_marker_text(),))
tmp.set_marker_text(h1.get_marker_text())
tmp.set_time_axis(True)
tmp.set_ylabel(h1.get_ylabel())
tmp.set_marker_type(h1.get_marker_type())
tmp.set_line_color(color)
tmp.set_line_width(5)
iplotter1.add(tmp)
s1_i=tmp
i=i+1
plotters=[]
plotters.append(plotter1)
iplotters=[]
iplotters.append(iplotter1)
for p in plotters:
p.reshuffle()
tmp=p.get_histogram_list()[0]
tmp.set_line_color(1)
t_day_min,i_day_min,t_day_max,i_day_max = get_min_max(tmp)
t_day = get_sum(tmp)
delta = tmp.binarray[i_day_max]*0.05
tmp.add_text("set label \"%5d\" at \"%s\",%f right rotate font \"Helvetica,12\"\n"%(tmp.binarray[i_day_max]+0.5,
time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(tmp.get_bin_center(i_day_max))),
tmp.binarray[i_day_max]+delta,))
tmp.add_text("set label \"Total : %5d TiB \" at graph .8,.8 font \"Helvetica,13\"\n"%(t_day+0.5,))
tmp.add_text("set label \"Max : %5d TiB (on %s) \" at graph .8,.75 font \"Helvetica,13\"\n"%(t_day_max+0.5,
time.strftime("%m-%d",time.localtime(tmp.get_bin_center(i_day_max))),))
tmp.set_marker_type("impulses")
p.plot()
for p in iplotters:
p.reshuffle()
tmp=p.get_histogram_list()[0]
tmp.set_line_color(1)
t_day_min,i_day_min,t_day_max,i_day_max = get_min_max(tmp)
tmp.add_text("set label \"Total : %5d TiB \" at graph .1,.8 font \"Helvetica,13\"\n"%(t_day_max+0.5,))
tmp.set_marker_type("impulses")
p.plot()
def plot_tape_bytes():
#
# This function plots bytes written/deleted to/from Enstore base on data in file and volume tables
# from *ensrv0 postgres databases damn slow
#
intf = configuration_client.ConfigurationClientInterface(user_mode=0)
csc = configuration_client.ConfigurationClient((intf.config_host, intf.config_port))
servers=[]
servers=csc.get('known_config_servers')
histograms={}
now_time = time.time()
t = time.ctime(time.time())
Y, M, D, h, m, s, wd, jd, dst = time.localtime(now_time)
start_day = time.mktime((2000, 12, 31, 23, 59, 59, 0, 0, 0))
now_day = time.mktime((Y, 12, 31, 23, 59, 59, wd, jd, dst))
nbins = int((now_day-start_day)/(24.*3600.)+0.5)
s1 = histogram.Histogram1D("on_tape_total_by_month","Active bytes on tape in Enstore",nbins,float(start_day),float(now_day))
s1.set_time_axis(True)
plotter1=histogram.Plotter("on_tape_total_by_month","Active TiB on tape in Enstore")
i = 0
for server in servers:
server_name,server_port = servers.get(server)
if ( server_port != None ):
fill_tape_histograms(i,server_name,server_port,histograms)
i=i+1
values = histograms.values()
values.sort()
for h1 in values:
tmp=s1+h1
tmp.set_name("on_tape_monthly_%s"%(h1.get_marker_text(),))
tmp.set_data_file_name("on_tape_monthly_%s"%(h1.get_marker_text(),))
tmp.set_marker_text(h1.get_marker_text())
tmp.set_time_axis(True)
tmp.set_ylabel(h1.get_ylabel())
tmp.set_marker_type(h1.get_marker_type())
tmp.set_line_color(0)
tmp.set_line_width(8)
plotter1.add(tmp)
s1=tmp
i=i+1
plotters=[]
plotters.append(plotter1)
for p in plotters:
p.reshuffle()
p.add_command("set key outside width 2")
p.add_command("set xtics border nomirror rotate by | |
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from logging import getLogger
import operator as op
import re
from .._vendor.toolz import excepts
from ..common.compat import string_types, zip, zip_longest, text_type, with_metaclass
from ..exceptions import InvalidVersionSpec
log = getLogger(__name__)
# normalized_version() is needed by conda-env
# It is currently being pulled from resolve instead, but
# eventually it ought to come from here
def normalized_version(version):
return VersionOrder(version)
def ver_eval(vtest, spec):
return VersionSpec(spec).match(vtest)
version_check_re = re.compile(r'^[\*\.\+!_0-9a-z]+$')
version_split_re = re.compile('([0-9]+|[*]+|[^0-9*]+)')
version_cache = {}
class SingleStrArgCachingType(type):
def __call__(cls, arg):
if isinstance(arg, cls):
return arg
elif isinstance(arg, string_types):
try:
return cls._cache_[arg]
except KeyError:
val = cls._cache_[arg] = super(SingleStrArgCachingType, cls).__call__(arg)
return val
else:
return super(SingleStrArgCachingType, cls).__call__(arg)
@with_metaclass(SingleStrArgCachingType)
class VersionOrder(object):
"""
This class implements an order relation between version strings.
Version strings can contain the usual alphanumeric characters
(A-Za-z0-9), separated into components by dots and underscores. Empty
segments (i.e. two consecutive dots, a leading/trailing underscore)
are not permitted. An optional epoch number - an integer
followed by '!' - can preceed the actual version string
(this is useful to indicate a change in the versioning
scheme itself). Version comparison is case-insensitive.
Conda supports six types of version strings:
* Release versions contain only integers, e.g. '1.0', '2.3.5'.
* Pre-release versions use additional letters such as 'a' or 'rc',
for example '1.0a1', '1.2.beta3', '2.3.5rc3'.
* Development versions are indicated by the string 'dev',
for example '1.0dev42', '2.3.5.dev12'.
* Post-release versions are indicated by the string 'post',
for example '1.0post1', '2.3.5.post2'.
* Tagged versions have a suffix that specifies a particular
property of interest, e.g. '1.1.parallel'. Tags can be added
to any of the preceding four types. As far as sorting is concerned,
tags are treated like strings in pre-release versions.
* An optional local version string separated by '+' can be appended
to the main (upstream) version string. It is only considered
in comparisons when the main versions are equal, but otherwise
handled in exactly the same manner.
To obtain a predictable version ordering, it is crucial to keep the
version number scheme of a given package consistent over time.
Specifically,
* version strings should always have the same number of components
(except for an optional tag suffix or local version string),
* letters/strings indicating non-release versions should always
occur at the same position.
Before comparison, version strings are parsed as follows:
* They are first split into epoch, version number, and local version
number at '!' and '+' respectively. If there is no '!', the epoch is
set to 0. If there is no '+', the local version is empty.
* The version part is then split into components at '.' and '_'.
* Each component is split again into runs of numerals and non-numerals
* Subcomponents containing only numerals are converted to integers.
* Strings are converted to lower case, with special treatment for 'dev'
and 'post'.
* When a component starts with a letter, the fillvalue 0 is inserted
to keep numbers and strings in phase, resulting in '1.1.a1' == 1.1.0a1'.
* The same is repeated for the local version part.
Examples:
1.2g.beta15.rc => [[0], [1], [2, 'g'], [0, 'beta', 15], [0, 'rc']]
1!2.15.1_ALPHA => [[1], [2], [15], [1, '_alpha']]
The resulting lists are compared lexicographically, where the following
rules are applied to each pair of corresponding subcomponents:
* integers are compared numerically
* strings are compared lexicographically, case-insensitive
* strings are smaller than integers, except
* 'dev' versions are smaller than all corresponding versions of other types
* 'post' versions are greater than all corresponding versions of other types
* if a subcomponent has no correspondent, the missing correspondent is
treated as integer 0 to ensure '1.1' == '1.1.0'.
The resulting order is:
0.4
< 0.4.0
< 0.4.1.rc
== 0.4.1.RC # case-insensitive comparison
< 0.4.1
< 0.5a1
< 0.5b3
< 0.5C1 # case-insensitive comparison
< 0.5
< 0.9.6
< 0.960923
< 1.0
< 1.1dev1 # special case 'dev'
< 1.1a1
< 1.1.0dev1 # special case 'dev'
== 1.1.dev1 # 0 is inserted before string
< 1.1.a1
< 1.1.0rc1
< 1.1.0
== 1.1
< 1.1.0post1 # special case 'post'
== 1.1.post1 # 0 is inserted before string
< 1.1post1 # special case 'post'
< 1996.07.12
< 1!0.4.1 # epoch increased
< 1!3.1.1.6
< 2!0.4.1 # epoch increased again
Some packages (most notably openssl) have incompatible version conventions.
In particular, openssl interprets letters as version counters rather than
pre-release identifiers. For openssl, the relation
1.0.1 < 1.0.1a => True # for openssl
holds, whereas conda packages use the opposite ordering. You can work-around
this problem by appending a dash to plain version numbers:
1.0.1a => 1.0.1post.a # ensure correct ordering for openssl
"""
_cache_ = {}
def __init__(self, vstr):
# version comparison is case-insensitive
version = vstr.strip().rstrip().lower()
# basic validity checks
if version == '':
raise InvalidVersionSpec(vstr, "empty version string")
invalid = not version_check_re.match(version)
if invalid and '-' in version and '_' not in version:
# Allow for dashes as long as there are no underscores
# as well, by converting the former to the latter.
version = version.replace('-', '_')
invalid = not version_check_re.match(version)
if invalid:
raise InvalidVersionSpec(vstr, "invalid character(s)")
# when fillvalue == 0 => 1.1 == 1.1.0
# when fillvalue == -1 => 1.1 < 1.1.0
self.norm_version = version
self.fillvalue = 0
# find epoch
version = version.split('!')
if len(version) == 1:
# epoch not given => set it to '0'
epoch = ['0']
elif len(version) == 2:
# epoch given, must be an integer
if not version[0].isdigit():
raise InvalidVersionSpec(vstr, "epoch must be an integer")
epoch = [version[0]]
else:
raise InvalidVersionSpec(vstr, "duplicated epoch separator '!'")
# find local version string
version = version[-1].split('+')
if len(version) == 1:
# no local version
self.local = []
elif len(version) == 2:
# local version given
self.local = version[1].replace('_', '.').split('.')
else:
raise InvalidVersionSpec(vstr, "duplicated local version separator '+'")
# split version
self.version = epoch + version[0].replace('_', '.').split('.')
# split components into runs of numerals and non-numerals,
# convert numerals to int, handle special strings
for v in (self.version, self.local):
for k in range(len(v)):
c = version_split_re.findall(v[k])
if not c:
raise InvalidVersionSpec(vstr, "empty version component")
for j in range(len(c)):
if c[j].isdigit():
c[j] = int(c[j])
elif c[j] == 'post':
# ensure number < 'post' == infinity
c[j] = float('inf')
elif c[j] == 'dev':
# ensure '*' < 'DEV' < '_' < 'a' < number
# by upper-casing (all other strings are lower case)
c[j] = 'DEV'
if v[k][0].isdigit():
v[k] = c
else:
# components shall start with a number to keep numbers and
# strings in phase => prepend fillvalue
v[k] = [self.fillvalue] + c
def __str__(self):
return self.norm_version
def __repr__(self):
return "%s(\"%s\")" % (self.__class__.__name__, self)
def _eq(self, t1, t2):
for v1, v2 in zip_longest(t1, t2, fillvalue=[]):
for c1, c2 in zip_longest(v1, v2, fillvalue=self.fillvalue):
if c1 != c2:
return False
return True
def __eq__(self, other):
return self._eq(self.version, other.version) and self._eq(self.local, other.local)
def startswith(self, other):
# Tests if the version lists match up to the last element in "other".
if other.local:
if not self._eq(self.version, other.version):
return False
t1 = self.local
t2 = other.local
else:
t1 = self.version
t2 = other.version
nt = len(t2) - 1
if not self._eq(t1[:nt], t2[:nt]):
return False
v1 = [] if len(t1) <= nt else t1[nt]
v2 = t2[nt]
nt = len(v2) - 1
if not self._eq([v1[:nt]], [v2[:nt]]):
return False
c1 = self.fillvalue if len(v1) <= nt else v1[nt]
c2 = v2[nt]
if isinstance(c2, string_types):
return isinstance(c1, string_types) and c1.startswith(c2)
return c1 == c2
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
for t1, t2 in zip([self.version, self.local], [other.version, other.local]):
for v1, v2 in zip_longest(t1, t2, fillvalue=[]):
for c1, c2 in zip_longest(v1, v2, fillvalue=self.fillvalue):
if c1 == c2:
continue
elif isinstance(c1, string_types):
if not isinstance(c2, string_types):
# str < int
return True
elif isinstance(c2, string_types):
# not (int < str)
return False
# | |
"""
Provide a standard protocol for asking graph oriented questions of Translator data sources.
"""
import argparse
import json
import logging
import os
import traceback
from pathlib import Path
import jsonschema
import requests
import yaml
from flasgger import Swagger
from flask import Flask, request, abort, Response, send_from_directory, render_template, make_response
from flask_cors import CORS
from flask_restx import Api as BaseApi, Resource
from tranql.concept import ConceptModel
from tranql.exception import TranQLException
from tranql.main import TranQL, TranQLIncompleteParser
from tranql.tranql_ast import SelectStatement
from tranql.tranql_schema import GraphTranslator
from tranql.exception import TranQLException
from tranql.config import Config as TranqlConfig
logger = logging.getLogger(__name__)
template_folder_path = Path(__file__).parent / "web" / "build"
template_folder = str(template_folder_path)
static_folder_path = template_folder_path / "static"
static_folder = str(static_folder_path)
WEB_PREFIX = os.environ.get('WEB_PATH_PREFIX', '')
WEB_PREFIX = f"/{WEB_PREFIX.strip('/')}" if WEB_PREFIX else ''
app = Flask(__name__, template_folder=template_folder, static_folder=static_folder)
# Due to a bug with Flask-RESTPlus/RESTX, even when doc generation is disabled on the root path, you still can't serve to it.
# This is a workaround to manually override the portion that causes the problem.
class Api(BaseApi):
def _register_doc(self, app_or_blueprint):
pass
@property
def base_path(self):
return ""
api = Api(app)
CORS(app)
app.config['SWAGGER'] = {
'title': 'TranQL API',
'description': 'Translator Query Language (TranQL) API.'
'<div><a href="https://github.com/NCATS-Tangerine/tranql">'
'TranQL Source Code and Documentation.'
'</a></div>'
'<div><a href="https://researchsoftwareinstitute.github.io/data-translator/apps/tranql">'
'TranQL Web Page'
'</a></div>'
,
'uiversion': 3,
'openapi': '3.0.1'
}
filename = 'translator_interchange.yaml'
filename = os.path.join(os.path.dirname(__file__), 'backplane', filename)
definitions_filename = 'definitions.yaml'
definitions_filename = os.path.join(os.path.dirname(__file__), definitions_filename)
with open(filename, 'r') as file_obj:
template = {
"definitions": yaml.load(file_obj)["definitions"],
"tags": [
{"name": "query"},
{"name": "schema"},
{"name": "util"},
{"name": "configuration"},
{"name": "webapp"}
]
}
with open(definitions_filename, 'r') as definitions_file:
template['definitions'].update(yaml.load(definitions_file))
swagger = Swagger(app, template=template, config={
"headers": [
],
"specs": [
{
"endpoint": 'apispec_1',
"route": f'{WEB_PREFIX}/apispec_1.json',
"rule_filter": lambda rule: True, # ?
"model_filter": lambda tag: True, # ?
}
],
"swagger_ui": True,
"specs_route": f"{WEB_PREFIX}/apidocs/",
"openapi": "3.0.1",
'swagger_ui_bundle_js': 'https://rawcdn.githack.com/swagger-api/swagger-ui/v3.23.1/dist/swagger-ui-bundle.js',
'swagger_ui_standalone_preset_js': 'https://rawcdn.githack.com/swagger-api/swagger-ui/v3.23.1/dist/swagger-ui-standalone-preset.js',
'swagger_ui_css': 'https://rawcdn.githack.com/swagger-api/swagger-ui/v3.23.1/dist/swagger-ui.css',
'swagger_ui_js': 'https://rawcdn.githack.com/swagger-api/swagger-ui/v3.23.1/dist/swagger-ui.js'
})
class StandardAPIResource(Resource):
@staticmethod
def validate(request):
with open(filename, 'r') as file_obj:
specs = yaml.load(file_obj)
to_validate = specs["components"]["schemas"]["Message"]
to_validate["components"] = specs["components"]
to_validate["components"].pop("Message", None)
try:
jsonschema.validate(request.json, to_validate)
except jsonschema.exceptions.ValidationError as error:
logging.error(f"ERROR: {str(error)}")
abort(Response(str(error), 400))
@staticmethod
def handle_exception(e, warning=False):
result = {"errors": []}
if isinstance(e, list):
[result["errors"].extend(StandardAPIResource.handle_exception(exception)["errors"]) for exception in e]
elif isinstance(e, TranQLException):
result["errors"].append({
"message": str(e),
"details": str(e.details) if e.details else ''
})
elif isinstance(e, Exception):
result["errors"].append({
"message": str(e),
"details": ''
})
elif isinstance(e, str):
result["errors"].extend(StandardAPIResource.handle_exception(Exception(e))["errors"])
traceback.print_exc()
if warning:
result["status"] = "Warning"
else:
result["status"] = "Error"
return result
@staticmethod
def response(data):
status_code = 200
if isinstance(data, dict):
status = data.get('status', None)
if status == "Error":
status_code = 500
return (data, status_code)
class WebAppRoot(Resource):
def get(self):
"""
Web app root
---
tags: [webapp]
consumes': [ 'text/plain' ]
"""
return send_from_directory(template_folder, 'index.html')
# api.add_resource(WebAppRoot, '/', endpoint='webapp_root')
class WebAppPath(Resource):
def get(self, path, web_prefix):
"""
Web app path
---
tags: [webapp]
parameters:
- in: path
name: path
schema:
type: string
required: true
description: Resource path.
"""
logger.debug(f"........................PATH: {path}")
if path.endswith('index.html'):
return make_response(render_template(path, web_prefix=web_prefix), 200)
if path != "" and os.path.exists(template_folder + "/" + path):
return send_from_directory(template_folder, filename=path)
else:
abort(404)
# api.add_resource(WebAppPath, '/<path:path>', endpoint='webapp_path')
# api.add_resource(WebAppPath, '/', endpoint='webapp_root', defaults={'path': 'index.html'})
@app.errorhandler(404)
def page_not_found(e):
return request.path, 404
class Configuration(StandardAPIResource):
""" Configuration """
def get(self):
"""
TranQL Configuration
---
tags: [configuration]
description: TranQL Configuration
responses:
'200':
description: Message
content:
application/json:
schema:
type: object
"""
return self.response({
"api_url": config['API_URL'],
"robokop_url": config['ROBOKOP_URL']
})
class DecorateKG(StandardAPIResource):
""" Exposes an endpoint that allows for the decoration of a KGS 0.1.0 knowledge graph with TranQL's decorate method. """
def post(self):
"""
Decorate a Knowledge Graph
---
tags: [util]
description: Decorates a knowledge graph's elements with given data using TranQL's decorate method.
requestBody:
name: knowledge_graph
description: A KGS 0.1.0 compliant KGraph
required: true
content:
application/json:
schema:
$ref: '#/definitions/KGraph'
example:
nodes:
- id: n0
type: chemical_substance
- id: n1
type: gene
edges:
- id: e0
type: targets
source_id: n0
target_id: n1
parameters:
- in: query
name: reasoners
schema:
type: array
items:
type: string
example:
- rtx
- robokop
required: false
description: The reasoner that the knowledge graph originates from.
responses:
'200':
description: Knowledge Graph
content:
application/json:
schema:
$ref: '#/definitions/KGraph'
'500':
description: An error was encountered
content:
application/json:
schema:
$ref: '#/definitions/Error'
"""
message = {"knowledge_graph": request.json}
reasoners = request.args.getlist('reasoners', None)
options = {}
if reasoners != None:
options["schema"] = reasoners
SelectStatement.decorate_result(message, options)
return self.response(message["knowledge_graph"])
class MergeMessages(StandardAPIResource):
""" Exposes an endpoint that allows for the merging of an arbitrary amount of messages """
def post(self):
"""
Merge Messages
---
tags: [util]
description: Merge Message objects together using TranQL's merge_results method.
requestBody:
name: messages
description: An array of KGS 0.1.0 compliant message objects
required: true
content:
application/json:
schema:
type: array
items:
$ref: '#/definitions/Message'
example:
- knowledge_graph:
nodes:
- id: TEST:CS1
type: chemical_substance
- id: TEST:G1
type: gene
edges:
- type: targets
source_id: TEST:CS1
target_id: TEST:G1
- knowledge_graph:
nodes:
- id: TEST:merged
type:
- chemical_substance
- Drug
equivalent_identifiers:
- TEST:CS1
- id: TEST:CS2
type: chemical_substance
- id: TEST:G2
type: gene
edges:
- type: interacts_with
source_id: TEST:CS2
target_id: TEST:G2
parameters:
- in: query
name: name_based_merging
schema:
type: boolean
default: true
required: false
description: Tells the merger whether or not to merge elements with identical `name` properties.
- in: query
name: resolve_names
schema:
type: boolean
default: false
required: false
description: >
(Experimental) Tells the merger to invoke the Bionames API on nodes in order to get more equivalent identifiers.
Ideally, this should result in a more thoroughly merged graph, as fewer equivalent nodes will fail to be detected.
This currently should not be used on large queries (1000+ nodes), or it will end up flooding the Bionames API.
- in: query
name: question_graph
schema:
type: string
description: The JSON serialized question graph that the result should retain
- in: query
name: root_order
schema:
type: array
items:
type: string
required: false
description: >
If merging messages with separate paths, e.g. population_of_individual_organisms->chemical_substance and chemical_substance->disease,
the root order (["population_of_individual_organisms", "chemical_substance", "disease"]) of the two messages must be known in order to
successfully merge their knowledge maps together. If every message has the same order, you don't care about their knowledge maps, or
there is only one response, then this parameter is not required. If the parameter is not provided, then it will concatenate all each
response's knowledge map.
responses:
'200':
description: Message
content:
application/json:
schema:
$ref: '#/definitions/Message'
'500':
description: An error was encountered
content:
application/json:
schema:
$ref: '#/definitions/Error'
"""
messages = request.json
return self.response(SelectStatement.merge_results(messages))
class TranQLQuery(StandardAPIResource):
""" TranQL Resource. """
def post(self):
"""
Query TranQL
---
tags: [query]
description: Execute a TranQL query.
requestBody:
name: query
description: A valid TranQL program
required: true
content:
text/plain:
schema:
type: string
example: >
select chemical_substance->gene->disease
from \"/graph/gamma/quick\"
where disease=\"asthma\"
parameters:
- in: query
name: dynamic_id_resolution
schema:
type: boolean
required: false
default: false
description: Specifies if dynamic id lookup of curies will be performed
- in: query
name: asynchronous
schema:
type: boolean
required: false
default: true
description: Specifies if requests made by TranQL will be asynchronous.
responses:
'200':
description: Message
content:
application/json:
schema:
$ref: '#/definitions/Message'
'500':
description: An error was encountered
content:
application/json:
schema:
$ref: '#/definitions/Error'
"""
# self.validate (request)
result = {}
logging.debug(request.data)
query = request.data.decode('utf-8')
dynamic_id_resolution = request.args.get('dynamic_id_resolution', 'False').upper() == 'TRUE'
asynchronous = request.args.get('asynchronous', 'True').upper() == 'TRUE'
logging.debug(f"--> query: {query}")
tranql = TranQL(options={
"dynamic_id_resolution": dynamic_id_resolution,
"asynchronous": asynchronous,
"registry": app.config.get('registry', False),
# when testing new schema should be created as per the test case
"recreate_schema": app.config.get('TESTING', True)
})
try:
context = tranql.execute(query) # , cache=True)
result = context.mem.get('result', {})
logger.debug(f" -- backplane: {context.mem.get('backplane', '')}")
if len(context.mem.get('requestErrors', [])) > 0:
errors = self.handle_exception(context.mem['requestErrors'], warning=True)
result.update(errors)
except Exception as e:
traceback.print_exc()
errors = [e, *tranql.context.mem.get('requestErrors', [])]
result = self.handle_exception(errors)
return self.response(result)
class AnnotateGraph(StandardAPIResource):
""" Request the message object to be annotated by the backplane and return the annotated message """
def post(self):
"""
Annotate Graph
---
tags: [query]
description: Annotate a message's knowledge graph via the GNBR decorator.
requestBody:
name: message
description: A KGS 0.1.0 compliant Message
required: true
content:
application/json:
schema:
$ref: '#/definitions/Message'
responses:
'200':
description: Message
content:
application/json:
schema:
$ref: '#/definitions/Message'
'500':
description: An error was encountered
content:
application/json:
schema:
$ref: '#/definitions/Error'
"""
tranql = TranQL()
messageObject = request.json
url = tranql.context.mem.get('backplane') + '/graph/gnbr/decorate'
logger.info(url)
resp = requests.post(
url=url,
json=messageObject,
headers={
'accept': 'text/plain'
}
)
data = resp.json()
for result in data['results']:
type = | |
contains regulators that are not accounted for in the selected kinetics."
anti_dir = os.path.join(directory, group_name, 'antimony', group_name + '_' + str(ind) + '.txt')
with open(anti_dir, 'w') as f:
f.write(ant_str)
else:
rl = []
with open(os.path.join(directory, group_name, 'networks', net[1])) as nl:
for i, line in enumerate(nl):
if i == 0:
rl.append(int(line.strip()))
else:
rl.append([])
line_split = line.strip().split(',')
for j, each in enumerate(line_split):
if j == 0:
rl[-1].append(int(each))
elif j == 5:
rl[-1].append([])
each_split = each[1:-1].split(':')
for elem in each_split:
if elem:
rl[-1][-1].append(elem)
else:
rl[-1].append([])
each_split = each[1:-1].split(':')
for elem in each_split:
if elem:
rl[-1][-1].append(int(elem))
ant_str = buildNetworks.get_antimony_script(rl, ic_params, kinetics, rev_prob, add_enzyme)
anti_dir = os.path.join(directory, group_name, 'antimony', group_name + '_' + str(ind) + '.txt')
with open(anti_dir, 'w') as f:
f.write(ant_str)
sbml_dir = os.path.join(directory, group_name, 'sbml', group_name + '_' + str(ind) + '.sbml')
antimony.loadAntimonyString(ant_str)
sbml = antimony.getSBMLString()
with open(sbml_dir, 'w') as f:
f.write(sbml)
antimony.clearPreviousLoads()
def linear(verbose_exceptions=False, output_dir='models', group_name='linear', overwrite=True, n_models=1, n_species=10,
kinetics=None, add_enzyme=False, rev_prob=0, ic_params=None, net_plots=False):
"""
Generates a collection of linear models.
:param verbose_exceptions: Traceback for input errors are suppressed.
:param output_dir: Output directory.
:param group_name: Name of the group the models belong too and the directory they will be placed in.
:param overwrite: Overwrite the models in output_dir/models/group_name.
:param n_models: Number of models to produce.
:param n_species: Number of species per model.
:param kinetics: Describes the desired rate-laws and parameter ranges. Ultimately defaults to
['mass_action', 'loguniform', ['kf', 'kr', 'kc'], [[0.01, 100], [0.01, 100], [0.01, 100]]]
:param add_enzyme: Add a multiplicative parameter to the rate-law that may be used for perturbation
analysis.
:param rev_prob: Describes the probability that a reaction is reversible.
:param ic_params: Describes the initial condition sampling distributions. Ultimately defaults to ['uniform', 0, 10]
:param net_plots: Generate network plots.
"""
if net_plots and not found_pydot:
print('The pydot package was not found and network figures will not be produced.')
if kinetics is None:
kinetics = ['mass_action', 'loguniform', ['kf', 'kr', 'kc'], [[0.01, 100], [0.01, 100], [0.01, 100]]]
if rev_prob < 0 or rev_prob > 1:
if not verbose_exceptions:
sys.tracebacklimit = 0
raise Exception('Your reversibility probability is not between 0 and 1')
net_files = []
anti_files = []
sbml_files = []
if overwrite:
if os.path.exists(os.path.join(output_dir, group_name, 'antimony')):
shutil.rmtree(os.path.join(output_dir, group_name, 'antimony'))
os.makedirs(os.path.join(output_dir, group_name, 'antimony'))
else:
os.makedirs(os.path.join(output_dir, group_name, 'antimony'))
if os.path.exists(os.path.join(output_dir, group_name, 'sbml')):
shutil.rmtree(os.path.join(output_dir, group_name, 'sbml'))
os.makedirs(os.path.join(output_dir, group_name, 'sbml'))
else:
os.makedirs(os.path.join(output_dir, group_name, 'sbml'))
if os.path.exists(os.path.join(output_dir, group_name, 'networks')):
shutil.rmtree(os.path.join(output_dir, group_name, 'networks'))
os.makedirs(os.path.join(output_dir, group_name, 'networks'))
else:
os.makedirs(os.path.join(output_dir, group_name, 'networks'))
if os.path.exists(os.path.join(output_dir, group_name, 'net_figs')):
shutil.rmtree(os.path.join(output_dir, group_name, 'net_figs'))
os.makedirs(os.path.join(output_dir, group_name, 'net_figs'))
else:
os.makedirs(os.path.join(output_dir, group_name, 'net_figs'))
if os.path.exists(os.path.join(output_dir, group_name, 'dot_files')):
shutil.rmtree(os.path.join(output_dir, group_name, 'dot_files'))
os.makedirs(os.path.join(output_dir, group_name, 'dot_files'))
else:
os.makedirs(os.path.join(output_dir, group_name, 'dot_files'))
else:
if os.path.exists(os.path.join(output_dir, group_name, 'antimony')):
anti_files = [f for f in os.listdir(os.path.join(output_dir, group_name, 'antimony'))
if os.path.isfile(os.path.join(output_dir, group_name, 'antimony', f))]
else:
os.makedirs(os.path.join(output_dir, group_name, 'antimony'))
if os.path.exists(os.path.join(output_dir, group_name, 'sbml')):
sbml_files = [f for f in os.listdir(os.path.join(output_dir, group_name, 'sbml'))
if os.path.isfile(os.path.join(output_dir, group_name, 'sbml', f))]
else:
os.makedirs(os.path.join(output_dir, group_name, 'sbml'))
if os.path.exists(os.path.join(output_dir, group_name, 'networks')):
net_files = [f for f in os.listdir(os.path.join(output_dir, group_name, 'networks'))
if os.path.isfile(os.path.join(output_dir, group_name, 'networks', f))]
else:
os.makedirs(os.path.join(output_dir, group_name, 'networks'))
if os.path.exists(os.path.join(output_dir, group_name, 'net_figs')):
net_files = [f for f in os.listdir(os.path.join(output_dir, group_name, 'net_figs'))
if os.path.isfile(os.path.join(output_dir, group_name, 'net_figs', f))]
else:
os.makedirs(os.path.join(output_dir, group_name, 'net_figs'))
if os.path.exists(os.path.join(output_dir, group_name, 'dot_files')):
net_files = [f for f in os.listdir(os.path.join(output_dir, group_name, 'dot_files'))
if os.path.isfile(os.path.join(output_dir, group_name, 'dot_files', f))]
else:
os.makedirs(os.path.join(output_dir, group_name, 'dot_files'))
net_inds = [int(nf.split('_')[-1].split('.')[0]) for nf in net_files]
anti_inds = [int(nf.split('_')[-1].split('.')[0]) for nf in anti_files]
sbml_inds = [int(nf.split('_')[-1].split('.')[0]) for nf in sbml_files]
if set(net_inds) != set(anti_inds) or set(anti_inds) != set(sbml_inds) or set(net_inds) != set(sbml_inds):
if not verbose_exceptions:
sys.tracebacklimit = 0
raise Exception("There exists a discrepancy between the network, antimony, and sbml files.\n"
"Consider starting over and overwriting them all.")
for i in range(n_models):
rl, el = buildNetworks.generate_simple_linear(n_species)
if not rl[0]:
ant_str = "Network construction failed on this attempt, consider revising your settings."
anti_dir = os.path.join(output_dir, group_name, 'antimony', group_name + '_' + str(i) + '.txt')
with open(anti_dir, 'w') as f:
f.write(ant_str)
else:
net_dir = os.path.join(output_dir, group_name, 'networks', group_name + '_' + str(i) + '.csv')
with open(net_dir, 'w') as f:
for j, each in enumerate(rl):
if j == 0:
f.write(str(each))
else:
for k, item in enumerate(each):
if k == 0:
f.write(str(item))
else:
f.write(',(')
for m, every in enumerate(item):
if m == 0:
f.write(str(every))
else:
f.write(',' + str(every))
f.write(')')
f.write('\n')
if net_plots and found_pydot:
edges = []
for each in el:
edges.append(('S' + str(each[0]), 'S' + str(each[1])))
graph = pydot.Dot(graph_type="digraph")
graph.set_node_defaults(color='black', style='filled', fillcolor='#4472C4')
for each in edges:
graph.add_edge(pydot.Edge(each[0], each[1]))
graph.write_png(os.path.join(output_dir, group_name, 'net_figs', group_name + '_' + str(i)
+ '.png'))
graph.write(os.path.join(output_dir, group_name, 'dot_files', group_name + '_' + str(i) + '.dot'),
format='dot')
ant_str = buildNetworks.get_antimony_script(rl, ic_params, kinetics, rev_prob, add_enzyme)
anti_dir = os.path.join(output_dir, group_name, 'antimony', group_name + '_' + str(i) + '.txt')
with open(anti_dir, 'w') as f:
f.write(ant_str)
sbml_dir = os.path.join(output_dir, group_name, 'sbml', group_name + '_' + str(i) + '.sbml')
antimony.loadAntimonyString(ant_str)
sbml = antimony.getSBMLString()
with open(sbml_dir, 'w') as f:
f.write(sbml)
antimony.clearPreviousLoads()
def cyclic(verbose_exceptions=False, output_dir='models', group_name='cyclic', overwrite=True, min_species=10,
max_species=20, n_cycles=1, n_models=1, kinetics=None, add_enzyme=False, rev_prob=0, ic_params=None,
net_plots=False):
"""
Generates a collection of cyclic models.
:param verbose_exceptions: Traceback for input errors are suppressed.
:param output_dir: Output directory.
:param group_name: Name of the group the models belong too and the directory they will be placed in.
:param overwrite: Overwrite the models in output_dir/models/group_name.
:param min_species: Minimum number of species per cycle.
:param max_species: Maximum number of species per cycle.
:param n_cycles: Number of cycles per model.
:param n_models: Number of models to produce.
:param kinetics: Describes the desired rate-laws and parameter ranges. Ultimately defaults to
['mass_action', 'loguniform', ['kf', 'kr', 'kc'], [[0.01, 100], [0.01, 100], [0.01, 100]]]
:param add_enzyme: Add a multiplicative parameter to the rate-law that may be used for perturbation
analysis.
:param rev_prob: Describes the probability that a reaction is reversible.
:param ic_params: Describes the initial condition sampling distributions. Ultimately defaults to ['uniform', 0, 10]
:param net_plots: Generate network plots.
"""
if net_plots and not found_pydot:
print('The pydot package was not found and network figures will not be produced.')
if kinetics is None:
kinetics = ['mass_action', 'loguniform', ['kf', 'kr', 'kc'], [[0.01, 100], [0.01, 100], [0.01, 100]]]
if rev_prob < 0 or rev_prob > 1:
if not verbose_exceptions:
sys.tracebacklimit = 0
raise Exception('Your reversibility probability is not between 0 and 1')
net_files = []
anti_files = []
sbml_files = []
if overwrite:
if os.path.exists(os.path.join(output_dir, group_name, 'antimony')):
shutil.rmtree(os.path.join(output_dir, group_name, 'antimony'))
os.makedirs(os.path.join(output_dir, group_name, 'antimony'))
else:
os.makedirs(os.path.join(output_dir, group_name, 'antimony'))
if os.path.exists(os.path.join(output_dir, group_name, 'sbml')):
shutil.rmtree(os.path.join(output_dir, group_name, 'sbml'))
os.makedirs(os.path.join(output_dir, group_name, 'sbml'))
else:
os.makedirs(os.path.join(output_dir, group_name, 'sbml'))
if os.path.exists(os.path.join(output_dir, group_name, 'networks')):
shutil.rmtree(os.path.join(output_dir, group_name, 'networks'))
os.makedirs(os.path.join(output_dir, group_name, 'networks'))
else:
os.makedirs(os.path.join(output_dir, group_name, 'networks'))
if os.path.exists(os.path.join(output_dir, group_name, 'net_figs')):
shutil.rmtree(os.path.join(output_dir, group_name, 'net_figs'))
os.makedirs(os.path.join(output_dir, group_name, 'net_figs'))
else:
os.makedirs(os.path.join(output_dir, group_name, 'net_figs'))
if os.path.exists(os.path.join(output_dir, group_name, 'dot_files')):
shutil.rmtree(os.path.join(output_dir, group_name, 'dot_files'))
os.makedirs(os.path.join(output_dir, group_name, 'dot_files'))
else:
os.makedirs(os.path.join(output_dir, group_name, 'dot_files'))
else:
if os.path.exists(os.path.join(output_dir, group_name, 'antimony')):
anti_files = [f for f in os.listdir(os.path.join(output_dir, group_name, 'antimony'))
if os.path.isfile(os.path.join(output_dir, group_name, 'antimony', f))]
else:
os.makedirs(os.path.join(output_dir, group_name, 'antimony'))
if os.path.exists(os.path.join(output_dir, group_name, 'sbml')):
sbml_files = [f for f in os.listdir(os.path.join(output_dir, group_name, 'sbml'))
if os.path.isfile(os.path.join(output_dir, group_name, 'sbml', f))]
else:
os.makedirs(os.path.join(output_dir, group_name, 'sbml'))
if os.path.exists(os.path.join(output_dir, group_name, 'networks')):
net_files = [f for f in os.listdir(os.path.join(output_dir, group_name, 'networks'))
if os.path.isfile(os.path.join(output_dir, group_name, 'networks', f))]
else:
os.makedirs(os.path.join(output_dir, group_name, 'networks'))
if os.path.exists(os.path.join(output_dir, group_name, 'net_figs')):
net_files = [f for f in os.listdir(os.path.join(output_dir, group_name, 'net_figs'))
if os.path.isfile(os.path.join(output_dir, group_name, 'net_figs', f))]
else:
os.makedirs(os.path.join(output_dir, group_name, 'net_figs'))
if os.path.exists(os.path.join(output_dir, group_name, 'dot_files')):
net_files = [f for f in os.listdir(os.path.join(output_dir, group_name, 'dot_files'))
if os.path.isfile(os.path.join(output_dir, group_name, 'dot_files', f))]
else:
os.makedirs(os.path.join(output_dir, group_name, 'dot_files'))
net_inds = [int(nf.split('_')[-1].split('.')[0]) for nf in net_files]
anti_inds = [int(nf.split('_')[-1].split('.')[0]) for nf in anti_files]
sbml_inds = [int(nf.split('_')[-1].split('.')[0]) for nf in sbml_files]
if set(net_inds) != set(anti_inds) or set(anti_inds) != set(sbml_inds) or set(net_inds) != set(sbml_inds):
if not verbose_exceptions:
sys.tracebacklimit = 0
raise Exception("There exists a discrepancy between the network, antimony, and sbml files.\n"
"Consider starting over and overwriting them all.")
for i in range(n_models):
rl, el = buildNetworks.generate_simple_cyclic(min_species, max_species, n_cycles)
if not rl[0]:
ant_str = "Network construction failed on this attempt, consider revising your settings."
anti_dir = os.path.join(output_dir, group_name, 'antimony', | |
wait for here, but that doesn't seem to work reliably.
t.join(0.0001)
for attr in ('stdout', 'stderr'):
s = getattr(self, attr, None)
if isinstance(s, Capture):
s.add_stream(getattr(p, attr))
if not async_:
logger.debug('about to wait for process')
p.wait()
finally:
self.process_ready.set()
logger.debug('returning %s (%s)', self, self.process)
return self
def wait(self, timeout=None):
"""
Wait for a command's underlying sub-process to complete. The timeout
parameter only applies for Python >= 3.3 and has no effect otherwise.
"""
self.process_ready.wait()
p = self.process
if not p: # pragma: no cover
logger.warning('No process found for %s', self)
result = None
else:
if _wait_has_timeout:
result = p.wait(timeout)
else:
result = p.wait()
return result
def terminate(self):
"""
Terminate a command's underlying subprocess.
.. versionadded:: 0.1.1
"""
self.process_ready.wait()
p = self.process
if not p: # pragma: no cover
raise ValueError('There is no subprocess')
p.terminate()
def kill(self):
"""
Kill a command's underlying subprocess.
.. versionadded:: 0.1.1
"""
self.process_ready.wait()
p = self.process
if not p: # pragma: no cover
raise ValueError('There is no subprocess')
p.kill()
def poll(self):
"""
Poll a command's underlying subprocess.
.. versionadded:: 0.1.1
"""
self.process_ready.wait()
p = self.process
if not p: # pragma: no cover
raise ValueError('There is no subprocess')
return p.poll()
@property
def returncode(self):
self.process_ready.wait()
return self.process.returncode
class Node(object):
"""
This class represents a node in the AST built while parsing command lines.
It's basically an object container for various attributes, with a slightly
specialised representation to make it a little easier to debug the parser.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self): # pragma: no cover
chunks = []
d = dict(self.__dict__)
kind = d.pop('kind')
for k, v in sorted(d.items()):
chunks.append('%s=%s' % (k, v))
return '%sNode(%s)' % (kind.title(), ' '.join(chunks))
class CommandLineParser(object):
"""
This class implements a fairly unsophisticated recursive descent parser for
shell command lines as used in sh, bash and dash. On Windows, the cmd.exe
command shell has limited compatibility
"""
permitted_tokens = ('&&', '||', '|&', '>>')
def next_token(self):
t = self.lex.get_token()
if not t:
tt = None
else:
tt = self.lex.token_type
if tt in ('"', "'"):
tt = 'word'
t = t[1:-1]
elif tt == 'a':
try:
int(t)
tt = 'number'
except ValueError:
tt = 'word'
elif tt == 'c':
# the shlex parser will return arbitrary runs of 'control'
# characters, but only some runs will be valid for us. We
# split into the valid runs and push all the others back,
# keeping just one. Since all will have a token_type of
# 'c', we don't need to worry about pushing the type back.
if len(t) > 1:
valid = self.get_valid_controls(t)
t = valid.pop(0)
if valid:
for other in reversed(valid):
self.lex.push_token(other)
tt = t
return tt, t, self.lex.preceding
def get_valid_controls(self, t):
if len(t) == 1:
result = [t]
else:
result = []
last = None
for c in t:
if last is not None:
combined = last + c
if combined in self.permitted_tokens:
result.append(combined)
else:
result.append(last)
result.append(c)
last = None
elif c not in ('>', '&', '|'):
result.append(c)
else:
last = c
if last:
result.append(last)
# logger.debug('%s -> %s', t, result)
return result
def peek_token(self):
if self.peek is None:
self.peek = self.next_token()
return self.peek[0]
def consume(self, tt):
self.token = self.peek
self.peek = self.next_token()
if self.token[0] != tt:
raise ValueError('consume: expected %r', tt)
def parse(self, source, posix=None):
self.source = source
parse_logger.debug('starting parse of %r', source)
if posix is None:
posix = os.name == 'posix'
self.lex = shell_shlex(source, posix=posix, control=True)
self.token = None
self.peek = None
self.peek_token()
result = self.parse_list()
return result
def parse_list(self):
parts = [self.parse_pipeline()]
tt = self.peek_token()
while tt in (';', '&'):
self.consume(tt)
part = self.parse_pipeline()
parts.append(Node(kind='sync', sync=tt))
parts.append(part)
tt = self.peek_token()
if len(parts) == 1:
node = parts[0]
else:
node = Node(kind='list', parts=parts)
parse_logger.debug('returning %r', node)
return node
def parse_pipeline(self):
parts = [self.parse_logical()]
tt = self.peek_token()
while tt in ('&&', '||'):
self.consume(tt)
part = self.parse_logical()
parts.append(Node(kind='check', check=tt))
parts.append(part)
tt = self.peek_token()
if len(parts) == 1:
node = parts[0]
else:
node = Node(kind='pipeline', parts=parts)
parse_logger.debug('returning %r', node)
return node
def parse_logical(self):
tt = self.peek_token()
if tt == '(':
self.consume(tt)
node = self.parse_list()
self.consume(')')
else:
parts = [self.parse_command()]
tt = self.peek_token()
while tt in ('|', '|&'):
last_part = parts[-1]
if ((tt == '|' and 1 in last_part.redirects) or
(tt == '|&' and 2 in last_part.redirects)):
if last_part.redirects != SWAP_OUTPUTS:
raise ValueError(
'semantics: cannot redirect and pipe the '
'same stream')
self.consume(tt)
part = self.parse_command()
parts.append(Node(kind='pipe', pipe=tt))
parts.append(part)
tt = self.peek_token()
if len(parts) == 1:
node = parts[0]
else:
node = Node(kind='logical', parts=parts)
parse_logger.debug('returning %r', node)
return node
def add_redirection(self, node, fd, kind, dest):
if fd in node.redirects:
raise ValueError('semantics: cannot redirect stream %d twice' % fd)
node.redirects[fd] = (kind, dest)
def parse_command(self):
node = self.parse_command_part()
tt = self.peek_token()
while tt in ('word', 'number'):
part = self.parse_command_part()
node.command.extend(part.command)
for fd, v in part.redirects.items():
self.add_redirection(node, fd, v[0], v[1])
tt = self.peek_token()
parse_logger.debug('returning %r', node)
if node.redirects != SWAP_OUTPUTS:
d = dict(node.redirects)
d.pop(1, None)
d.pop(2, None)
if d:
raise ValueError('semantics: can only redirect stdout and '
'stderr, not %s' % list(d.keys()))
if sys.platform == 'win32': #pragma: no cover
from .utils import find_command
cmd = find_command(node.command[0])
if cmd:
exe, cmd = cmd
node.command[0] = cmd
if exe:
node.command.insert(0, exe)
return node
def parse_command_part(self):
node = Node(kind='command', command=[self.peek[1]], redirects={})
if self.peek[0] == 'word':
self.consume('word')
else:
self.consume('number')
tt = self.peek_token()
while tt in ('>', '>>'):
num = 1 # default value
if self.peek[2] == '':
# > or >> seen without preceding whitespace. So see if the
# last token is a positive integer. If it is, assume it's
# an fd to redirect and pop it, else leave it in as part of
# the command line.
try:
try_num = int(node.command[-1])
if try_num > 0:
num = try_num
node.command.pop()
except ValueError:
pass
redirect_kind = tt
self.consume(tt)
tt = self.peek_token()
if tt not in ('word', '&'):
raise ValueError('syntax: expecting filename or &')
if tt == 'word':
redirect_target = self.peek[1]
self.consume(tt)
else:
self.consume('&')
if self.peek_token() != 'number':
raise ValueError('syntax: number expected after &')
n = int(self.peek[1])
redirect_target = ('&', n)
self.consume('number')
self.add_redirection(node, num, redirect_kind, redirect_target)
tt = self.peek_token()
parse_logger.debug('returning %r', node)
return node
class Pipeline(WithMixin):
"""
This class represents a pipeline of commands.
:param source: The command line.
:type source: str
:param posix: Whether Posix conventions are used in the lexer.
:type posix: bool
:param kwargs: Whatever you might pass to :class:`subprocess.Popen'.
"""
def __init__(self, source, posix=None, **kwargs):
if posix is None:
posix = os.name == 'posix'
is_shell = kwargs.get('shell', False)
if isinstance(source, (list, tuple)) or is_shell:
if is_shell:
self.source = source
else:
self.source = ' '.join(source)
t = Node(kind='command', command=source, redirects={})
else:
self.source = source
t = CommandLineParser().parse(source, posix=posix)
self.tree = t
self.last = self.find_last_command(t)
self.events = []
self.kwargs = kwargs
self.stdout = kwargs.pop('stdout', None)
self.stderr = kwargs.pop('stderr', None)
self.lock = threading.RLock()
self.commands = []
def find_last_command(self, node):
"""
Find the last command node in a parse sub-tree.
:param node: The root of the sub-tree to search.
:type node: An AST node from the parser.
"""
if not hasattr(node, 'parts'):
result = node
else:
result = self.find_last_command(node.parts[-1])
assert result.kind == 'command'
return result
def run_node_in_thread(self, node, input, async_):
"""
Run a node in a separate thread.
A thread is created and the run_node method is run with the
specified arguments in that thread.
"""
# When the node is run in a separate thread, we need
# a sync point for when all the commands have been created
# for that node - even when there are delays because of e.g.
# sleep commands or other time-consuming commands. That's
# what these events are for - they're set at the end of
# run_node, and waited on in the pipeline's wait and run
# methods.
e = threading.Event()
with self.lock:
self.events.append(e)
t = threading.Thread(target=self.run_node, args=(node, input,
async_, e))
t.daemon = True
logger.debug('thread %s started to run node: %s', t.name, node)
t.start()
def | |
from matplotlib.pyplot import plot
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
import os
import csv
from tripedal_kinematics import TripedalKinematics
COS120 = math.cos(math.pi * 2 / 3)
SIN120 = math.sin(math.pi * 2 / 3)
COS240 = math.cos(-math.pi * 2 / 3)
SIN240 = math.sin(-math.pi * 2 / 3)
class WalkGenerator():
def __init__(self):
super().__init__()
def SetWalkParameter(self,
moveDirection: float,
bodyMovePointsCount: int,
legMovePointsCount: int,
stepLength: float,
stepHeight: float,
legXYDistanceFromCenter: float,
sit: float,
swayShift: int,
swayRadiusMin: float,
swayRadiusMax: float,
liftPush=0.4,
landPull=0.6,
damping=0,
incline=0):
# I recommend adjusting these values while checking the graph.
# This is not an algorithm created through any research, it is just an implementation of my idea.
self._moveDirection = moveDirection # angle of direction. leg a direction is 0. leg B direction is pi * 2/3
# 발을 움직이는 각
self._bodyMoveCount = bodyMovePointsCount # number of points when the legs are not floated.
# 3점 지지 (세 발이 다 바닥에 붙어있고 몸체가 움직임) 때 지점의 갯수
self._legMoveCount = legMovePointsCount # number of points when one leg is floating
# 발이 움직일때 지점의 갯수.
self._l = stepLength # The distance of one step.
# 보폭
self._h = stepHeight # The height of the step.
# 발을 들어올리는 높이. 모션을 만든 후에 (실시간 센서 값과 별다른 알고리즘 등을 통하여)
# 값을 조절하여 쓸 것이라면 높이를 설정하여도 무방하지만 이 코드대로 쓸 것이면 0 값을 추천함.
self._legToCenter = legXYDistanceFromCenter # The value of how far the foot is from the central axis.
# If 0, the three feet are clustered in the middle (of course not recommended).
# Increasing the value increases the distance of three feet.
# 중심 축으로부터 발이 얼만큼 떨어져 있는지 값임.
# 0이면 세 발이 가운데 모여있음 (당연히 권장 안함). 값을 증가시킬수록 세 발의 거리가 벌려짐.
self._sit = sit #
#
self._swayShift = swayShift # Adjust the timing of sway and foot lift.
# If this value is 1, the foot is lifted when the body is swayed to the maximum
# opposite direction of the moving foot.
# If this value is 0, the foot is floated when the body is swayed maximum.
# around 0.5 is recommended. do not set under -0.5 and over 1.5
# 이 값이 0이면 발이 뜰때, 1이면 발이 착지할 때 몸체가 최대로 sway 된다. 0.5 주변의 값을 추천함.
self._liftPush = liftPush # push the lifting foot backward when lifting the foot to gains momentum. 0.2 ~ 1.0 is recommended.
# 이 값을 증가시키면 발 들어올린 직후의 약간 발을 뒤로 함. 0이면 완전한 사인 곡선 형태로 움직임.
# 증가시킬수록 둥글게 됨. 0.2~1.0의 값을 추천함.
self._landPull = landPull # Before put the foot down, go forward more and pull back when landing.
# 이 값을 증가시키면 발을 착륙하기 직전에 발을 앞으로 함. 0이면 완전한 사인 곡선
# 형태로 착륙함. 증가시킬수록 둥글게 됨. 0.2~1.0의 값을 추천함.
self._swayRadiusMin = swayRadiusMin # minimum length to sway
self._swayRadiusMax = swayRadiusMax # maximum length to sway in the opposite direction of the moving foot.
self._damping = damping # // not implemented yet
self._incline = incline # tangent angle of incline // not implemented yet
#버리게 될 거:
'''
self._swayRadiusMin
self._swayRadiusMax
'''
def InitProperty(self):
cosMov = math.cos(self._moveDirection)
sinMov = math.sin(self._moveDirection)
self._InitialPointA = [self._legToCenter, 0, 0]
rx = COS120 * self._legToCenter
ry = SIN120 * self._legToCenter
self._InitialPointB = [rx, ry, 0]
rx = COS240 * self._legToCenter
ry = SIN240 * self._legToCenter
self._InitialPointC = [rx, ry, 0]
self._cycleLength = (self._bodyMoveCount * 3 + self._legMoveCount * 3)
self._cycleCount = int(0)
self._notWalkPoitCount = (self._bodyMoveCount * 3 + self._legMoveCount * 2)
self._liftedVectorA = [0.0, 0.0, 0.0]
self._liftedVectorB = [0.0, 0.0, 0.0]
self._liftedVectorC = [0.0, 0.0, 0.0]
self._puttedVectorA = [0.0, 0.0, 0.0]
self._puttedVectorB = [0.0, 0.0, 0.0]
self._puttedVectorC = [0.0, 0.0, 0.0]
self._targetToPutVectorA = [0.0, 0.0, 0.0]
self._targetToPutVectorB = [0.0, 0.0, 0.0]
self._targetToPutVectorC = [0.0, 0.0, 0.0]
self._moveVectorA = [0.0, 0.0, 0.0]
self._moveVectorB = [0.0, 0.0, 0.0]
self._moveVectorC = [0.0, 0.0, 0.0]
self._resultVectorA = [0.0, 0.0, 0.0]
self._resultVectorB = [0.0, 0.0, 0.0]
self._resultVectorC = [0.0, 0.0, 0.0]
self._swayVector = [0.0, 0.0, self._sit]
self._swayLength = 0.0
self._dragVectorChangeSpeed = 0
self._dragVectorChangeSpeedMax = 3.0
self._dragVectorChanged = False
self._dragVectorMult = (3 * self._bodyMoveCount + 2 * self._legMoveCount) / (2 * self._bodyMoveCount +
2 * self._legMoveCount)
self._dragVectorX = 0.0
self._dragVectorY = 0.0
self._dragVectorX_target = 0.0
self._dragVectorY_target = 0.0
def MakeNextPoint(self):
isThreeSupport = None # bool
progThreeSupport = None # float
progFloatX = None # float
progDragA = None # float
progDragB = None # float
progDragC = None # float
FloatingLegVectorX = None # float
FloatingLegVectorZ = None # float
i = self._cycleCount % (self._legMoveCount + self._bodyMoveCount)
if i >= self._bodyMoveCount:
# foot lift
progFloatX = (i + 1 - self._bodyMoveCount) / self._legMoveCount # 0 ~ 1
isThreeSupport = False
else:
# three foots suport
progThreeSupport = (i + 1) / self._bodyMoveCount # 0 ~ 1
isThreeSupport = True
cyclecountA = (self._cycleCount + self._bodyMoveCount * 2 + self._legMoveCount * 2) % self._cycleLength
cyclecountB = (self._cycleCount + self._bodyMoveCount + self._legMoveCount) % self._cycleLength
cyclecountC = (self._cycleCount) % self._cycleLength
cosMov = math.cos(self._moveDirection)
sinMov = math.sin(self._moveDirection)
difX = self._dragVectorX_target - self._dragVectorX
difY = self._dragVectorY_target - self._dragVectorY
distXY = math.sqrt(difX * difX + difY * difY)
if (isThreeSupport == True):
if progThreeSupport < 0.7 and progThreeSupport > 0.3:
dragVecX = -self._l * self._dragVectorMult * cosMov
dragVecY = -self._l * self._dragVectorMult * sinMov
# if target drag vector changed
if (self._dragVectorX_target != dragVecX or self._dragVectorY_target != dragVecY):
#change target drag vector
self._dragVectorX_target = -self._l * self._dragVectorMult * cosMov
self._dragVectorY_target = -self._l * self._dragVectorMult * sinMov
difX = self._dragVectorX_target - self._dragVectorX
difY = self._dragVectorY_target - self._dragVectorY
distXY = math.sqrt(difX * difX + difY * difY)
if (distXY > 0):
count = math.ceil(
(distXY / self._dragVectorChangeSpeedMax) / (self._bodyMoveCount + self._legMoveCount))
self._dragVectorChangeSpeed = distXY / ((self._bodyMoveCount + self._legMoveCount) * count)
else:
self._dragVectorChangeSpeed = 0.0
else:
t = progFloatX # 0 ~ 1
sin_tpi = math.sin(t * math.pi)
x = (2 * t + (1 - t) * self._liftPush * -sin_tpi + t * self._landPull * sin_tpi) / 2 #0~1
FloatingLegVectorX = x
FloatingLegVectorZ = sin_tpi * self._h
if (distXY > 0):
if (distXY >= self._dragVectorChangeSpeed):
if difX != 0.0:
vecx = self._dragVectorChangeSpeed * difX / distXY
self._dragVectorX = self._dragVectorX + vecx
if difY != 0.0:
vecy = self._dragVectorChangeSpeed * difY / distXY
self._dragVectorY = self._dragVectorY + vecy
else:
self._dragVectorX = self._dragVectorX_target
self._dragVectorY = self._dragVectorY_target
# A
if (cyclecountA < self._notWalkPoitCount):
# drag
progDragA = float(cyclecountA + 1) / self._notWalkPoitCount
self._moveVectorA[0] = self._moveVectorA[0] + self._dragVectorX / self._notWalkPoitCount
self._moveVectorA[1] = self._moveVectorA[1] + self._dragVectorY / self._notWalkPoitCount
self._moveVectorA[2] = self._sit
if (progDragA == 1.0):
# ready to float
self._liftedVectorA[0] = self._moveVectorA[0]
self._liftedVectorA[1] = self._moveVectorA[1]
self._targetToPutVectorA[0] = -(self._dragVectorX / 2)
self._targetToPutVectorA[1] = -(self._dragVectorY / 2)
else:
# float
progFloatA = progFloatX
self._moveVectorA[0] = self._targetToPutVectorA[0] * FloatingLegVectorX + (
1 - FloatingLegVectorX) * self._liftedVectorA[0]
self._moveVectorA[1] = self._targetToPutVectorA[1] * FloatingLegVectorX + (
1 - FloatingLegVectorX) * self._liftedVectorA[1]
self._moveVectorA[2] = self._sit + FloatingLegVectorZ
if (progFloatX == 1.0):
# put
self._puttedVectorA[0] = self._moveVectorA[0]
self._puttedVectorA[1] = self._moveVectorA[1]
self._puttedVectorA[2] = self._moveVectorA[2]
# B
if (cyclecountB < self._notWalkPoitCount):
# drag
progDragB = float(cyclecountB + 1) / self._notWalkPoitCount
self._moveVectorB[0] = self._moveVectorB[0] + self._dragVectorX / self._notWalkPoitCount
self._moveVectorB[1] = self._moveVectorB[1] + self._dragVectorY / self._notWalkPoitCount
self._moveVectorB[2] = self._sit
if (progDragB == 1.0):
# ready to float
self._liftedVectorB[0] = self._moveVectorB[0]
self._liftedVectorB[1] = self._moveVectorB[1]
self._targetToPutVectorB[0] = -(self._dragVectorX / 2)
self._targetToPutVectorB[1] = -(self._dragVectorY / 2)
else:
# float
progFloatB = progFloatX
self._moveVectorB[0] = self._targetToPutVectorB[0] * FloatingLegVectorX + (
1 - FloatingLegVectorX) * self._liftedVectorB[0]
self._moveVectorB[1] = self._targetToPutVectorB[1] * FloatingLegVectorX + (
1 - FloatingLegVectorX) * self._liftedVectorB[1]
self._moveVectorB[2] = self._sit + FloatingLegVectorZ
if (progFloatX == 1.0):
# put
self._puttedVectorB[0] = self._moveVectorB[0]
self._puttedVectorB[1] = self._moveVectorB[1]
self._puttedVectorB[2] = self._moveVectorB[2]
# C
if (cyclecountC < self._notWalkPoitCount):
# drag
progDragC = float(cyclecountC + 1) / self._notWalkPoitCount
self._moveVectorC[0] = self._moveVectorC[0] + self._dragVectorX / self._notWalkPoitCount
self._moveVectorC[1] = self._moveVectorC[1] + self._dragVectorY / self._notWalkPoitCount
self._moveVectorC[2] = self._sit
if (progDragC == 1.0):
# ready to float
self._liftedVectorC[0] | |
<filename>dbcArchives/2021/000_0-sds-3-x-projects/student-project-17_group-TowardsScalableTDA/00_introduction.py
# Databricks notebook source
# MAGIC %md
# MAGIC ScaDaMaLe Course [site](https://lamastex.github.io/scalable-data-science/sds/3/x/) and [book](https://lamastex.github.io/ScaDaMaLe/index.html)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC # Density Estimation via Voronoi Diagrams in High Dimensions
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC <NAME> and <NAME>
# MAGIC
# MAGIC [Video of project presentation](https://drive.google.com/file/d/14E_igECN6hDZieWNn9VVTepCo5mu-rzy/view?usp=sharing)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Introduction
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC **Density estimation** is a wide sub-area of statistics, tasked with understanding an underlying probability distribution of a given set of points, sampled from an unknown distribution. It can be used as a way of data investigation, like determining the location of low- and high-density regions in data, clusters and outliers, as well as for visualization purposes.
# MAGIC
# MAGIC A histogram can be considered as a simple density estimator. Other well-known methods include:
# MAGIC - a k-nearest-neighbor density estimator, which describes the density *p()* at a point *x* as $$p(x) \cong \frac{1}{d_k(x)}$$
# MAGIC where d_k(x) is the distance to the *k*th nearest neighbor of *x*;
# MAGIC - a kernel density estimator, which requires a selection of a kernel probability distribution *K* and a bandwidth *h* and essentially places the distributions at the data points, giving the density estimation
# MAGIC $$p(x) \cong \sum_i K(\frac{x - x_i}{h})$$
# MAGIC
# MAGIC All of the mentioned methods are sensitive to parameter selection, such as choosing the right number of neighbors or a fitting bandwidth.
# COMMAND ----------
# MAGIC %md
# MAGIC **Voronoi diagrams** are widely used in many areas, including computer science, and provide a natural cell decomposition of space based on the nearest-neighbor rule. For a given data point *x*, its corresponding cell contains all the points of the metric space, for which *x* is the closest point among all in the dataset.
# MAGIC
# MAGIC An example of a 2D Voronoi diagram built over a set of points sampled from a normal distribution can be seen below in the methodology part.
# MAGIC
# MAGIC One of the biggest drawbacks of Voronoi diagrams is their geometric complexity, which grows exponentially with dimensionality and essentially prevents their exact computation in dimensions above 6 for a reasonable number of points. In the worst case, the number of geometric elements of the diagram (such as Voronoi vertices, edges and polyhedra of different dimensions that arise on the cell boundaries) grows as
# MAGIC
# MAGIC $$O(n^{\lceil{d/2}\rceil})$$
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC **Our method.**
# MAGIC In this work, we use some intuition about the Voronoi diagrams to develop a new method of density estimation. In addition, we apply a methodology from our previous work which allows one to work with Voronoi diagrams in high dimensions without their explicit construction.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Methodology
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC **Intuition:** if we construct a Voronoi diagram over a set of points sampled from an unknown distribution then Voronoi cells in regions with higher density will be of a smaller *size*.
# MAGIC
# MAGIC Consider the image below, which depicts a Voronoi diagram in a two-dimensional space built over points sampled from a Gaussian distribution. Voronoi cells in the center of the distribution appear naturally smaller in comparison with other cells, and the cell size increases when we move away from the center.
# MAGIC
# MAGIC <img width=400pt src="files/group17/images/voronoi_gaussian.png"/>
# MAGIC
# MAGIC This intuition follows, in a way, a one-nearest-neighbor density estimator: the distance *d* to the nearest neighbor is inversly proportional to the estimated density of the point, and at the same time, a ball of radius *d/2* centered at the query point always fits into (and touches the boundary of) the Voronoi cell.
# MAGIC
# MAGIC On the discussed image, one of the cells is marked with a blue color. Assume that the point inside that cell is our query point, at which we want to understand the density, and all other points are the training (unlabeled) data that provides information about the density. Then, let us try to find a reasonable approximation of the density in a form of
# MAGIC
# MAGIC $$p(x) = \frac{c}{size(Cell(x))}$$
# MAGIC
# MAGIC where *c* is some constant, *Cell* denotes the Voronoi cell of *x*, and *size* is some measure of a cell.
# MAGIC
# MAGIC Note: at any moment, the Voronoi diagram consists of only one query point and all dataset points.
# COMMAND ----------
# MAGIC %md
# MAGIC **Volume function**
# MAGIC
# MAGIC Let us assume for a while that cell's geometry is known to us. What would be a natural way to describe the size of the cell?
# MAGIC
# MAGIC Perhaps, one of the first ideas that comes to mind is to use the cell's *volume* as a size measure. Here we run into an issue of infinite cells, whose volume would also be infinite. Potentially, this could be resolved by computing a weighted volume with an integrable weight function that rapidly decays at infinity.
# MAGIC
# MAGIC However, instead, we propose a way to describe the size via *volume functions*, inspired by how alpha-complexes are motivated and constructed in the area of topological data analysis, where we consider a set of balls of an increasing radius with intersection with voronoi cells:
# MAGIC
# MAGIC <img width=250pt src="files/group17/images/alpha_1.png"/>
# MAGIC <img width=250pt src="files/group17/images/alpha_2.png"/>
# MAGIC <img width=250pt src="files/group17/images/alpha_3.png"/>
# MAGIC
# MAGIC We define the volume function as follows:
# MAGIC
# MAGIC $$\overline{Vol}_d(x)(r) = \frac{Vol_d(Cell(x) \cap B_r(x))}{Vol_d(B_r)}$$
# MAGIC
# MAGIC Here, *r* is a positive radius, *Vol()* denotes the standard d-dimensional volume, and *B_r(x)* is a d-dimensional ball of radius *r* centered at *x*. The volume function of *x* returns a function that takes a radius *r* and returns a ratio of the volume of the intersection of the ball with the cell to the whole volume of the ball. Clearly, at the limit to zero, the ratio is equal to 1 (when the ball fully fits inside the cell), but starts to decrease as soon as parts of the ball start to leave the boundary.
# MAGIC
# MAGIC Below are two images. On the left, a simple rectangular Voronoi cell with a point, generating it. On the right, a depiction of the volume function for this cell.
# MAGIC
# MAGIC <img width=300pt src="files/group17/images/rect.png"/>
# MAGIC <img width=300pt src="files/group17/images/rect_vol.png"/>
# MAGIC
# MAGIC If we go into higher dimensions, we will not be able to see the steps that the function makes anymore. Below is an example, which we approximated (with a method described below) on MNIST data (784-dimensional) some time ago of volume functions for different data points:
# MAGIC
# MAGIC <img width=400pt src="files/group17/images/mnist_vol.png"/>
# MAGIC
# MAGIC On the picture above, we can guess that, for example, the point with the light-blue volume curve is located in a lower-density region than other given points, based on the fact that its volume function is greater than other functions at every radius.
# MAGIC
# MAGIC A couple of things to consider here.
# MAGIC 1. If a cell is infinite, then its volume function will not tend to 0 at infinity. Instead, it will tend to the angular size of this infinity.
# MAGIC 2. If one cell can be placed inside another cell, identifying their generator points and rotating arbitrarily, the first volume function will be below the second volume function.
# MAGIC
# MAGIC The second bullet point provides an idea that maybe we want to integrate this volume functions and compare them: a function with a larger integral would denote a lower-density region. At the same time, the first bullet point tells us that the functions are not always integrable. Thus, in this project we do the following modifications: we do not consider the directions of the balls which end | |
import hashlib
import json
import os
import shutil
import time
from collections import Counter
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple, Union
import numpy as np
import pandas as pd
from fire import Fire
from pydantic import BaseModel
from pydantic.main import Extra
from tqdm import tqdm
Span = Tuple[int, int]
BasicValue = Union[str, int, bool, float]
def train_test_split(*args, **kwargs) -> list:
raise NotImplementedError
def find_sublist_index(items: list, query: list):
length = len(query)
for i in range(len(items) - length + 1):
if items[i : i + length] == query:
return i
return -1
def test_find_sublist_query():
items = [1, 6, 3, 5, 7]
print(dict(items=items))
for query in [[6], [7], [6, 3], [3, 5, 7], [7, 5]]:
print(dict(query=query, i=find_sublist_index(items, query)))
def find_sublist_indices(items: list, query: list) -> List[int]:
i = find_sublist_index(items, query)
if i == -1:
return []
return list(range(i, i + len(query)))
def test_find_sublist_indices():
items = [1, 6, 3, 5, 7]
assert find_sublist_indices(items, [6, 3, 5]) == [1, 2, 3]
print(dict(test_find_sublist_indices=True))
class WikiProperty(BaseModel):
"""
# https://query.wikidata.org
# All properties with descriptions and aliases and types
SELECT ?p ?pType ?pLabel ?pDescription ?pAltLabel WHERE {
?p wikibase:propertyType ?pType .
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
ORDER BY ASC(xsd:integer(STRAFTER(STR(?p), 'P')))
"""
p: str
pType: str
pLabel: str
pDescription: str
pAltLabel: str
@property
def id(self) -> str:
return self.p.split("/")[-1]
@property
def aliases(self) -> List[str]:
names = [n.strip() for n in self.pAltLabel.split(",")]
return sorted(set(names))
def load_wiki_relation_map(path: str) -> Dict[str, WikiProperty]:
df = pd.read_csv(path)
props = [WikiProperty(**r) for r in df.to_dict(orient="records")]
return {p.id: p for p in props}
def load_label_to_properties(
path: str, use_alias: bool = True
) -> Dict[str, WikiProperty]:
relation_map = load_wiki_relation_map(path)
mapping = {}
for p in relation_map.values():
if not p.pLabel in mapping.keys():
mapping[p.pLabel] = p
if use_alias:
for p in relation_map.values():
for a in p.aliases:
if a not in mapping.keys():
mapping[a] = p
return mapping
def test_load_wiki():
relation_map = load_wiki_relation_map("data/wiki_properties.csv")
for k, v in list(relation_map.items())[:3]:
print(dict(k=k, v=v, aliases=v.aliases))
class DynamicModel(BaseModel):
class Config:
arbitrary_types_allowed = True
validate_assignment = True
class StrictModel(BaseModel):
class Config:
extra = Extra.forbid
frozen = True
validate_assignment = True
def compute_macro_PRF(
predicted_idx: np.ndarray, gold_idx: np.ndarray, i=-1, empty_label=None
) -> Tuple[float, float, float]:
# https://github.com/dinobby/ZS-BERT/blob/master/model/evaluation.py
"""
This evaluation function follows work from Sorokin and Gurevych(https://www.aclweb.org/anthology/D17-1188.pdf)
code borrowed from the following link:
https://github.com/UKPLab/emnlp2017-relation-extraction/blob/master/relation_extraction/evaluation/metrics.py
"""
if i == -1:
i = len(predicted_idx)
complete_rel_set = set(gold_idx) - {empty_label}
avg_prec = 0.0
avg_rec = 0.0
for r in complete_rel_set:
r_indices = predicted_idx[:i] == r
tp = len((predicted_idx[:i][r_indices] == gold_idx[:i][r_indices]).nonzero()[0])
tp_fp = len(r_indices.nonzero()[0])
tp_fn = len((gold_idx == r).nonzero()[0])
prec = (tp / tp_fp) if tp_fp > 0 else 0
rec = tp / tp_fn
avg_prec += prec
avg_rec += rec
f1 = 0.0
avg_prec = avg_prec / len(set(predicted_idx[:i]))
avg_rec = avg_rec / len(complete_rel_set)
if (avg_rec + avg_prec) > 0:
f1 = 2.0 * avg_prec * avg_rec / (avg_prec + avg_rec)
return avg_prec, avg_rec, f1
def test_compute_prf():
a = np.array([0, 0, 0, 0, 0])
b = np.array([0, 0, 1, 1, 0])
print(compute_macro_PRF(a, b))
def glob_rmtree(folder: str, pattern: str, verbose=True):
for path in Path(folder).glob(pattern):
shutil.rmtree(path)
if verbose:
print(dict(rmtree=path))
def test_glob_rmtree():
folder = "tmp/test_glob_rmtree"
Path(folder).mkdir(exist_ok=False, parents=True)
glob_rmtree("tmp", "test_glob*")
def hash_text(x: str) -> str:
return hashlib.md5(x.encode()).hexdigest()
def check_overlap(a: Span, b: Span) -> bool:
# Assumes end in (start, end) is exclusive like python slicing
return (
a[0] <= b[0] < a[1]
or a[0] <= b[1] - 1 < a[1]
or b[0] <= a[0] < b[1]
or b[0] <= a[1] - 1 < b[1]
)
class RelationSentence(BaseModel):
tokens: List[str]
head: List[int]
tail: List[int]
label: str
head_id: str = ""
tail_id: str = ""
label_id: str = ""
error: str = ""
raw: str = ""
score: float = 0.0
zerorc_included: bool = True
def as_tuple(self) -> Tuple[str, str, str]:
head = " ".join([self.tokens[i] for i in self.head])
tail = " ".join([self.tokens[i] for i in self.tail])
return head, self.label, tail
def as_line(self) -> str:
return self.json() + "\n"
def is_valid(self) -> bool:
for x in [self.tokens, self.head, self.tail, self.label]:
if len(x) == 0:
return False
for x in [self.head, self.tail]:
if -1 in x:
return False
return True
@property
def text(self) -> str:
return " ".join(self.tokens)
@classmethod
def from_spans(cls, text: str, head: str, tail: str, label: str, strict=True):
tokens = text.split()
sent = cls(
tokens=tokens,
head=find_span(head, tokens),
tail=find_span(tail, tokens),
label=label,
)
if strict:
assert sent.is_valid(), (head, label, tail, text)
return sent
def as_marked_text(self) -> str:
tokens = list(self.tokens)
for i, template in [
(self.head[0], "[H {}"),
(self.head[-1], "{} ]"),
(self.tail[0], "[T {}"),
(self.tail[-1], "{} ]"),
]:
tokens[i] = template.format(tokens[i])
return " ".join(tokens)
def align_span_to_tokens(span: str, tokens: List[str]) -> Tuple[int, int]:
# Eg align("<NAME>, Jr.", ['John', 'R.', 'Allen', ',', 'Jr.'])
char_word_map = {}
num_chars = 0
for i, w in enumerate(tokens):
for _ in w:
char_word_map[num_chars] = i
num_chars += 1
char_word_map[num_chars] = len(tokens)
query = span.replace(" ", "")
text = "".join(tokens)
assert query in text
i = text.find(query)
start = char_word_map[i]
end = char_word_map[i + len(query) - 1]
assert 0 <= start <= end
return start, end + 1
def test_align_span(
span: str = "<NAME>, Jr.",
tokens=("The", "John", "R.", "Allen", ",", "Jr.", "is", "here"),
):
start, end = align_span_to_tokens(span, tokens)
print(dict(start=start, end=end, span=tokens[start:end]))
def find_span(span: str, tokens: List[str]) -> List[int]:
if span == "":
return []
start = find_sublist_index(tokens, span.split())
if start >= 0:
return [start + i for i in range(len(span.split()))]
else:
start, end = align_span_to_tokens(span, tokens)
return list(range(start, end))
def test_find_span(
span: str = "Hohenzollern",
text: str = "<NAME> ( born 26 March 1949",
):
tokens = text.split()
indices = find_span(span, tokens)
print(dict(test_find_span=[tokens[i] for i in indices]))
class QualifierSentence(RelationSentence):
qualifier: str = ""
qualifier_id: str
value: List[int]
value_type: str
def as_tuple(self) -> Tuple[str, str, str, str, str]:
head = " ".join([self.tokens[i] for i in self.head])
tail = " ".join([self.tokens[i] for i in self.tail])
value = " ".join([self.tokens[i] for i in self.value])
return head, self.label, tail, self.qualifier, value
class RelationData(BaseModel):
sents: List[RelationSentence]
@classmethod
def load(cls, path: Path):
with open(path) as f:
lines = f.readlines()
sents = [
RelationSentence(**json.loads(x))
for x in tqdm(lines, desc="RelationData.load")
]
return cls(sents=sents)
def save(self, path: Path):
path.parent.mkdir(exist_ok=True, parents=True)
with open(path, "w") as f:
f.write("".join([s.as_line() for s in self.sents]))
@property
def unique_labels(self) -> List[str]:
return sorted(set([s.label for s in self.sents]))
def train_test_split(
self, test_size: Union[int, float], random_seed: int, by_label: bool = False
):
if by_label:
labels_train, labels_test = train_test_split(
self.unique_labels, test_size=test_size, random_state=random_seed
)
train = [s for s in self.sents if s.label in labels_train]
test = [s for s in self.sents if s.label in labels_test]
else:
groups = self.to_sentence_groups()
keys_train, keys_test = train_test_split(
sorted(groups.keys()), test_size=test_size, random_state=random_seed
)
train = [s for k in keys_train for s in groups[k]]
test = [s for k in keys_test for s in groups[k]]
# Enforce no sentence overlap
texts_test = set([s.text for s in test])
train = [s for s in train if s.text not in texts_test]
data_train = RelationData(sents=train)
data_test = RelationData(sents=test)
if by_label:
assert len(data_test.unique_labels) == test_size
assert not set(data_train.unique_labels).intersection(
data_test.unique_labels
)
info = dict(
sents_train=len(data_train.sents),
sents_test=len(data_test.sents),
labels_train=len(data_train.unique_labels),
labels_test=len(data_test.unique_labels),
)
print(json.dumps(info, indent=2))
return data_train, data_test
def to_sentence_groups(self) -> Dict[str, List[RelationSentence]]:
groups = {}
for s in self.sents:
groups.setdefault(s.text, []).append(s)
return groups
def to_label_groups(self) -> Dict[str, List[RelationSentence]]:
groups = {}
for s in self.sents:
groups.setdefault(s.label, []).append(s)
return groups
def filter_group_sizes(self, min_size: int = 0, max_size: int = 999):
groups = self.to_sentence_groups()
sents = [
s
for k, lst in groups.items()
for s in lst
if min_size <= len(lst) <= max_size
]
return RelationData(sents=sents)
def filter_errors(self):
def check_valid_span(span: List[int]) -> bool:
start = sorted(span)[0]
end = sorted(span)[-1] + 1
return span == list(range(start, end))
sents = []
for s in self.sents:
if s.is_valid():
if check_valid_span(s.head) and check_valid_span(s.tail):
sents.append(s)
print(dict(filter_errors_success=len(sents) / len(self.sents)))
return RelationData(sents=sents)
def analyze(self, header: Optional[str] = None):
labels = self.unique_labels
groups = self.to_sentence_groups()
spans = []
words = []
for s in self.sents:
head, label, tail = s.as_tuple()
spans.append(head)
spans.append(tail)
words.extend(s.tokens)
info = dict(
header=header,
sents=len(self.sents),
labels=str([len(labels), labels]),
unique_texts=len(groups.keys()),
unique_spans=len(set(spans)),
unique_words=len(set(words)),
group_sizes=str(Counter([len(lst) for lst in groups.values()])),
)
print(json.dumps(info, indent=2))
return info
def wiki_uri_to_id(uri: str) -> str:
i = uri.split("/")[-1]
if i[0] in "QP" and i[1:].isdigit():
return i
else:
return ""
def split_common_prefix(texts: List[str]) | |
<filename>pythoms/spectrum.py
"""
This class is designed to efficiently combine, add to, or otherwise manipulate
spectra together whose dimensions are not equal.
For example, combining mass spectra together where resolution to the 3rd
decimal (not to the 10th decimal) is desired.
Upon initialization, specify the number of decimal places desired.
Start and end values for the x bounds may also be specified, and
an input spectrum can be provided (this spectrum will be added to
the object on initialization).
When adding a value to the Spectrum object, it will find the closest x value
with the decimal place specified and add the y value to that x in the object.
e.g. if the decimal place is 3, adding x=545.34898627,y=10 will add 10 to x=545.349
Once the desired spectrum has been constructed, calling Spectrum.trim() will return
an [[x values],[y values]] list with only the x values that have intensities. Other
manipulations are available, see below for details.
IGNORE:
CHANGELOG
---2.5---
- added the ability to not provide start and end points for an unfilled spectrum
---2.6
- added applycharge function to apply the charge to a mass list
IGNORE
"""
import numpy as np
from random import random
from bisect import bisect_left as bl
def weighted_average(xvals, yvals):
"""
Determines the weighted average of a group of masses and abundances
:param list xvals: x values
:param list yvals: y values
:return: weighted average, summed intensity
:rtype: tuple of float
"""
if sum(yvals) == 0: # catch for no intensity
return sum(xvals) / len(xvals), 0.
return (
sum([x * y for x, y in zip(xvals, yvals)]) / sum(yvals), # weighted m/z
sum(yvals) # summed intensity
)
def full_spectrum_list(start, end, decpl, filler=None):
"""
Generates two paired lists (one m/z, one None) from start to end with a specified number of decimal places.
:param float start: The start value for the x list.
:param float end: The end value for the x list.
:param int decpl: The decimal places to use for the generated list.
:param filler: The filler value for the y list
:return: A list of x values and a list of y values (specified by the ``filler`` keyword argument) of the
same length.
:rtype: tuple of lists
**Notes**
The maximum x value will be larger than end by 10^-``decpl`` to include the
actual end value in the x list.
"""
x = np.arange( # generate x values
start,
end + 10 ** -decpl,
10 ** -decpl
)
return (
x.tolist(), # convert to list
[filler] * len(x), # generate y list of equal length
)
class Spectrum(object):
_start = -np.inf
_end = np.inf
_charge = 1
def __init__(self,
decpl,
start=50.,
end=2000.,
empty=False,
filler=None,
specin=None,
):
"""
A class for subtracting, combining, adding-to, and otherwise manipulating spectra with non-equal dimensions.
The object will track *x* values to a specified decimal place and can efficiently add a new value to a growing
list of values. e.g. adding two spectra together that do not have an intensity value for every *x* value
(a common operation for combining mass spectra). On initialization, specify the number of decimal places to
track using the ``decpl`` argument. Other behaviour of the class can be tweaked with the keyword arguments.
:param int decpl: The decimal places to track the *x* values two. e.g. a value of 3 will track
*x* values to the nearest 0.001.
:param float,None start: The minimum *x* value to track. Attempts to add an *x* value less than this
will be ignored by the instance.
:param float,None end: The maximum *x* value to track. Attempts to add an *x* value greater than this will be
ignored by the instance.
:param list specin: An spectrum to be added to the object on initialization. The format should be
``[[x values],[y values]]``.
:param bool empty: Whether the spectrum object should be filled or empty. An empty spectrum will have no *x*
or *y* values on initialization, and will add values with each call of ``Spectrum.addvalue()``. A filled
spectrum will generate an *x* list from *start* to *end* with spacing 10^-``decpl`` and a *y* list of equal
length filled with the value specified by the ``filler`` kwarg. If the number of items to be contained in
the spectrum is substantially less than ``(end-start)*10^decpl`` it can be more efficient to set this to
``True``. If not, then set this to False to reduce computational overhead.
:param filler: The y value to use if there is no y value. This can affect the functionality of some of the
functions in this class. If ``Spectrum.addelement()`` is to be used (e.g. by the Molecule class),
filler must be ``0.``.
**Basic Examples**
Specify the number of decimal places to track on initialization.
>>> spec = Spectrum(3)
*x*, *y* pairs may be added using the ``add_value`` method
>>> spec.add_value(55.67839, 100)
When the spectrum has been manipulated to the user's satisfaction, it may be easily converted to
``[[x values], [y values]`` format using the ``trim()`` method.
>>> spec.trim()
[[55.678], [100]]
The incoming x value will be compared to the current x list for equivalent x values. If a matching x value is
found, the y value is added to the existing value.
>>> spec.add_value(55.67799, 100) # equivalent to 55.678
>>> spec.trim()
[[55.678], [200]]
>>> spec.add_value(55.67744, 99) # equivalent to 55.677
>>> spec.trim()
[[55.677, 55.678], [99, 200]]
**y-value manipulation**
The y values may be manipulated in a variety of ways.
- The ``normalize()`` method will normalize the y values in the instance to the specified value.
- The ``threshold()`` method will drop y values below a certain value (either relative or absolute).
- The ``keep_top_n()`` method keeps the top n peaks.
- The ``consolidate()`` method groups values together using a weighted average algorithm to keep the lowest
y value above a given threshold but still retain the information in the spectrum.
**spectrum constraint methods**
- Values below a certain x value may be dropped by calling the ``drop_below()`` method.
- Values above a certain x value may be dropped by calling the ``drop_above()`` method.
"""
self.x = []
self.y = []
self.decpl = decpl
self.empty = empty
self.filler = filler
if empty is False and any([val is None for val in [start, end]]):
raise ValueError(f'A start and end value must be specified for a filled '
f'{self.__class__.__name__} instance. ')
# set start and end values for the spectrum
if start is not None:
self._start = start
if end is not None:
self._end = end
if self.empty is False:
self.x, self.y = full_spectrum_list( # m/z and intensity lists
self.start,
self.end,
decpl=decpl,
filler=filler,
)
if specin is not None:
self.add_spectrum(specin[0], specin[1])
def __str__(self):
return f'Full spectrum from {self.start} to {self.end} keeping {self.decpl} decimal places'
def __repr__(self):
return f'{self.__class__.__name__}({self.start}, {self.end}, {self.decpl})'
def __getinitargs__(self):
return (
self.decpl,
self.start,
self.end,
self.empty,
self.filler,
[self.x, self.y]
)
def __reduce__(self):
return (
self.__class__,
self.__getinitargs__()
)
def __copy__(self):
return Spectrum(
*self.__getinitargs__()
)
def __deepcopy__(self, memodict={}):
return self.__copy__()
def __len__(self):
return len(self.x)
def __getitem__(self, ind):
"""
if supplied index is an integer, return the x and y value of that index in the list
if a float, return the intensity of that x value
"""
if type(ind) is int:
return [self.x[ind], self.y[ind]]
elif type(ind) is float: # returns the intensity value of the specified m/z
if ind < self.start or ind > self.end:
raise IndexError(
'The supplied float %f is outside of the m/z range of this Spectrum instance (%.3f -%.3f)' % (
ind, self.start, self.end))
return self.y[self.index(ind)]
def __add__(self, x):
"""
Since addition to this class requires generating a complete copy of the class then addition,
using the built in addition methods is recommended
e.g. to add a single value use .addvalue()
to add a spectrum use .addspectrum()
"""
kwargs = {
'empty': self.empty,
'filler': self.filler,
}
if isinstance(x, self.__class__) is True: # if it is another Spectrum instance
if x.decpl != self.decpl:
raise ValueError(
'The decimal places of the two spectra to be added are not | |
if 66 - 66: I11i + iII111i
iI1IIII1ii1 += I111IoOo0oOOO0o . encode ( )
I111IoOo0oOOO0o . print_record ( " " , True )
if 50 - 50: IiII
if ( OOOoo0ooooo0 == 0 ) : return ( iI1IIII1ii1 )
if 33 - 33: OOooOOo % I1IiiI - I1IiiI / IiII
for ooOiiI1Ii11 in ddt_entry . delegation_set :
i1iIiII = lisp_rloc_record ( )
i1iIiII . rloc = ooOiiI1Ii11 . delegate_address
i1iIiII . priority = ooOiiI1Ii11 . priority
i1iIiII . weight = ooOiiI1Ii11 . weight
i1iIiII . mpriority = 255
i1iIiII . mweight = 0
i1iIiII . reach_bit = True
iI1IIII1ii1 += i1iIiII . encode ( )
i1iIiII . print_record ( " " )
if 22 - 22: ooOoO0o * ooOoO0o % o0oOOo0O0Ooo * Ii1I . OoO0O00
return ( iI1IIII1ii1 )
if 55 - 55: OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1 - i11iIiiIii / i1IIi / II111iiii
if 37 - 37: Ii1I + o0oOOo0O0Ooo
if 74 - 74: Oo0Ooo / O0 + i1IIi . I1IiiI + OoO0O00 / Oo0Ooo
if 13 - 13: o0oOOo0O0Ooo / Ii1I . II111iiii
if 8 - 8: I11i - I11i % IiII
if 8 - 8: I1IiiI . IiII * O0 * o0oOOo0O0Ooo
if 17 - 17: I1IiiI . oO0o + Oo0Ooo + I11i / o0oOOo0O0Ooo
def lisp_etr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl ) :
if 25 - 25: iII111i / iII111i % OoOoOO00 / ooOoO0o
if ( map_request . target_group . is_null ( ) ) :
o0Oo00OOOo00 = lisp_db_for_lookups . lookup_cache ( map_request . target_eid , False )
else :
o0Oo00OOOo00 = lisp_db_for_lookups . lookup_cache ( map_request . target_group , False )
if ( o0Oo00OOOo00 ) : o0Oo00OOOo00 = o0Oo00OOOo00 . lookup_source_cache ( map_request . target_eid , False )
if 10 - 10: iIii1I11I1II1 - iIii1I11I1II1 + o0oOOo0O0Ooo / OoOoOO00 % iIii1I11I1II1 / O0
oo0ooooO = map_request . print_prefix ( )
if 86 - 86: IiII + Ii1I / Oo0Ooo / O0 % iII111i - oO0o
if ( o0Oo00OOOo00 == None ) :
lprint ( "Database-mapping entry not found for requested EID {}" . format ( green ( oo0ooooO , False ) ) )
if 3 - 3: i11iIiiIii / I1ii11iIi11i % I1Ii111 + o0oOOo0O0Ooo + O0
return
if 42 - 42: IiII / i11iIiiIii % o0oOOo0O0Ooo / II111iiii / IiII
if 97 - 97: OOooOOo . OoOoOO00 / I11i - IiII - iIii1I11I1II1
Oo0OooI11IIIiiiI = o0Oo00OOOo00 . print_eid_tuple ( )
if 94 - 94: I11i * ooOoO0o . I1IiiI / Ii1I - I1IiiI % OoooooooOO
lprint ( "Found database-mapping EID-prefix {} for requested EID {}" . format ( green ( Oo0OooI11IIIiiiI , False ) , green ( oo0ooooO , False ) ) )
if 32 - 32: OoO0O00
if 22 - 22: II111iiii . I11i
if 61 - 61: OOooOOo % O0 . I1ii11iIi11i . iIii1I11I1II1 * I11i
if 29 - 29: ooOoO0o + i1IIi % IiII * Ii1I
if 94 - 94: OOooOOo / IiII
I1i11111i = map_request . itr_rlocs [ 0 ]
if ( I1i11111i . is_private_address ( ) and lisp_nat_traversal ) :
I1i11111i = source
if 22 - 22: OoOoOO00 - Oo0Ooo
if 41 - 41: iIii1I11I1II1 * I1Ii111 / OoO0O00
iIiIi1i1Iiii = map_request . nonce
i1iiIi1 = lisp_nonce_echoing
II1i = map_request . keys
if 99 - 99: ooOoO0o * OOooOOo * I1ii11iIi11i - I11i . I11i . iIii1I11I1II1
o0Oo00OOOo00 . map_replies_sent += 1
if 99 - 99: I1IiiI
iI1IIII1ii1 = lisp_build_map_reply ( o0Oo00OOOo00 . eid , o0Oo00OOOo00 . group , o0Oo00OOOo00 . rloc_set , iIiIi1i1Iiii ,
LISP_NO_ACTION , 1440 , map_request . rloc_probe , II1i , i1iiIi1 , True , ttl )
if 41 - 41: O0 % iIii1I11I1II1
if 59 - 59: I1ii11iIi11i . I1IiiI + I1IiiI % I1Ii111
if 22 - 22: o0oOOo0O0Ooo % OOooOOo - I11i + ooOoO0o / OOooOOo
if 98 - 98: I11i * O0 + IiII - oO0o
if 35 - 35: OoooooooOO * Ii1I
if 73 - 73: ooOoO0o . OoO0O00 % I1ii11iIi11i - oO0o
if 67 - 67: o0oOOo0O0Ooo . I11i + i1IIi
if 100 - 100: Oo0Ooo - I1IiiI . OOooOOo % iIii1I11I1II1 . I11i
if 83 - 83: OoOoOO00 * iII111i
if 75 - 75: i11iIiiIii . o0oOOo0O0Ooo / oO0o . OoO0O00 % Ii1I % Ii1I
if ( map_request . rloc_probe and len ( lisp_sockets ) == 4 ) :
oO00o0 = ( I1i11111i . is_private_address ( ) == False )
ooo0O = I1i11111i . print_address_no_iid ( )
if ( oO00o0 and lisp_rtr_list . has_key ( ooo0O ) ) :
lisp_encapsulate_rloc_probe ( lisp_sockets , I1i11111i , None , iI1IIII1ii1 )
return
if 94 - 94: iII111i . Ii1I
if 71 - 71: o0oOOo0O0Ooo * II111iiii / OOooOOo . OoO0O00
if 73 - 73: I1Ii111 * OoO0O00 / OoOoOO00 . II111iiii
if 87 - 87: OoO0O00 + Oo0Ooo + O0 % OoooooooOO - iIii1I11I1II1
if 100 - 100: Oo0Ooo + IiII
if 81 - 81: iIii1I11I1II1 + iIii1I11I1II1
lisp_send_map_reply ( lisp_sockets , iI1IIII1ii1 , I1i11111i , sport )
return
if 19 - 19: ooOoO0o + i1IIi / Oo0Ooo * II111iiii * I1Ii111 / ooOoO0o
if 23 - 23: I1Ii111
if 76 - 76: Ii1I + Ii1I / i1IIi % o0oOOo0O0Ooo . iIii1I11I1II1 . OoOoOO00
if 75 - 75: I11i . Ii1I / I1ii11iIi11i
if 99 - 99: Ii1I
if 85 - 85: I1Ii111 + I1Ii111 + OoOoOO00 / ooOoO0o / o0oOOo0O0Ooo . Oo0Ooo
if 41 - 41: i1IIi % Ii1I . i1IIi * OoooooooOO % Ii1I
def lisp_rtr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl ) :
if 21 - 21: iII111i
if 72 - 72: I11i % o0oOOo0O0Ooo . iIii1I11I1II1 - I1Ii111 / i11iIiiIii
if 75 - 75: OoooooooOO
if 24 - 24: oO0o % iII111i - II111iiii / Ii1I + O0
I1i11111i = map_request . itr_rlocs [ 0 ]
if ( I1i11111i . is_private_address ( ) ) : I1i11111i = source
iIiIi1i1Iiii = map_request . nonce
if 37 - 37: I1Ii111 - i1IIi / iIii1I11I1II1
i1OO0o = map_request . target_eid
Oo000o0o0 = map_request . target_group
if 53 - 53: Ii1I - iIii1I11I1II1 % I1ii11iIi11i * i11iIiiIii + ooOoO0o
oOo0oOOOoOoo = [ ]
for III1IIIi1 in [ lisp_myrlocs [ 0 ] , lisp_myrlocs [ 1 ] ] :
if ( III1IIIi1 == None ) : continue
i1IIIIi1Ii111 = lisp_rloc ( )
i1IIIIi1Ii111 . rloc . copy_address ( III1IIIi1 )
i1IIIIi1Ii111 . priority = 254
oOo0oOOOoOoo . append ( i1IIIIi1Ii111 )
if 14 - 14: iII111i / oO0o . oO0o - OOooOOo * i1IIi - i1IIi
if 70 - 70: OoooooooOO
i1iiIi1 = lisp_nonce_echoing
II1i = map_request . keys
if 60 - 60: OOooOOo - Ii1I * Ii1I
iI1IIII1ii1 = lisp_build_map_reply ( i1OO0o , Oo000o0o0 , oOo0oOOOoOoo , iIiIi1i1Iiii , LISP_NO_ACTION ,
1440 , True , II1i , i1iiIi1 , True , ttl )
lisp_send_map_reply ( lisp_sockets , iI1IIII1ii1 , I1i11111i , sport )
return
if 69 - 69: i11iIiiIii . IiII + o0oOOo0O0Ooo % Ii1I - OoO0O00
if 46 - 46: OoOoOO00 + iII111i * o0oOOo0O0Ooo - I1ii11iIi11i / oO0o + IiII
if 1 - 1: iIii1I11I1II1 / OoooooooOO + Oo0Ooo . Ii1I
if 25 - 25: I1ii11iIi11i / i1IIi * oO0o - II111iiii * i1IIi
if 57 - 57: OoO0O00 % OoO0O00
if 67 - 67: O0 . i11iIiiIii + iIii1I11I1II1
if 86 - 86: iIii1I11I1II1
if 81 - 81: OOooOOo / I11i / OoooooooOO
if 74 - 74: I11i + OoooooooOO % II111iiii % o0oOOo0O0Ooo
if 27 - 27: OoO0O00 * Oo0Ooo
def lisp_get_private_rloc_set ( target_site_eid , seid , group ) :
oOo0oOOOoOoo = target_site_eid . registered_rlocs
if 80 - 80: i11iIiiIii . OoO0O00 - I11i % I11i
Ii1 = lisp_site_eid_lookup ( seid , group , False )
if ( Ii1 == None ) : return ( oOo0oOOOoOoo )
if 45 - 45: oO0o * | |
<gh_stars>1-10
from __future__ import division
import ConfigParser
import numpy as np
from scipy.optimize import minimize
import csv
import cPickle as pickle
import timeit
import os
import multiprocessing
import sys
import math
import shutil
sys.path.insert(0,os.path.realpath('../reactions'))
import parent
import hairpin
import helix
import bubble
import three_waystranddisplacement
import four_waystrandexchange
import myenums
DATASET_PATH = '../dataset'
PATH_AUXILIARY= "simplifiedstatespace"
use_all_data = False
for_plot= False
iter = 0
class ForMultiProcess(object):
"""This class used for multiprocessing"""
def __init__(self, function_name, arguments) :
self.function_name = function_name
self.arguments = arguments
def open_document(document) :
"""open a csv file"""
my_CSV = list(csv.reader(open(document, 'rb')))
return my_CSV
#Note that for each dataset a separate function is used, started with read_, since the data sets have different fields!
def read_DabbyThesis(ss , counter_cell , document, theta , done_queue , row, dataset_name , docID, name ) :
docID = name +docID
[ error , predicted_log_10_rate, real_log_10_rate , stuctureCounterUniLocal, half_context_biLocal] = four_waystrandexchange.main(ss, float( row[counter_cell][8]) , float( row[counter_cell][13]) , int(row[counter_cell][1]) , int(row[counter_cell][2]) , row[counter_cell][3] , row[counter_cell][4] , row[counter_cell][5] , 6,6 , theta, 1000/ float (row[counter_cell][6] )- 273.15 , np.max ( ( float (row[counter_cell][16] ), float (row[counter_cell][17] ) ) ) , float (row[counter_cell][11]) ,float (row[counter_cell][12]) , dataset_name, docID , name)
done_queue.put( ( name , error , counter_cell, document, predicted_log_10_rate, real_log_10_rate , stuctureCounterUniLocal, half_context_biLocal ) )
def read_Machinek ( ss, counter_cell , document, theta , done_queue , row, dataset_name , docID, name) :
docID = name + docID
real_log_10_rate = float( row[counter_cell][9])
[ error , predicted_log_10_rate, real_log_10_rate , stuctureCounterUniLocal, half_context_biLocal] = three_waystranddisplacement.main(ss, True, row[counter_cell][3] , real_log_10_rate, int(row[counter_cell][1]) , "" , "", theta, float ( row[counter_cell][7]) , np.max(( float (row[counter_cell][16]) , float (row[counter_cell][17]) )), float(row[counter_cell][13]) , float (row[counter_cell][14]), "" , dataset_name, docID , name, row[counter_cell][4][16:] , row[counter_cell][5], row[counter_cell][6], int( row[counter_cell][2]))
done_queue.put( ( name , error ,counter_cell, document, predicted_log_10_rate, real_log_10_rate, stuctureCounterUniLocal, half_context_biLocal ) )
def read_Zhang(ss, counter_cell , document, theta , done_queue , row,dataset_name , docID, name) :
docID = name + docID
real_log_10_rate = math.pow(10, float( row[counter_cell][7]) )
[ error , predicted_log_10_rate, real_log_10_rate , stuctureCounterUniLocal, half_context_biLocal] = three_waystranddisplacement.main(ss, True, row[counter_cell][2], real_log_10_rate, int ( row[counter_cell][1] ) ,row[counter_cell][3], row[counter_cell][4], theta, 1000/ float (row[counter_cell][5]) - 273.15 , np.max ( ( float (row[counter_cell][16] ), float (row[counter_cell][17] ) ) ) , float (row[counter_cell][9]) , float (row[counter_cell][10]) , "" , dataset_name, docID , name, "" , "", "", "" )
done_queue.put( ( name , error ,counter_cell, document, predicted_log_10_rate, real_log_10_rate, stuctureCounterUniLocal, half_context_biLocal ) )
def read_ReyanldoSequential(ss, counter_cell , document, theta , done_queue , row, dataset_name , docID, name ) :
docID = name +docID
real_log_10_rate =float( row[counter_cell][5])
[ error , predicted_log_10_rate, real_log_10_rate , stuctureCounterUniLocal, half_context_biLocal] = three_waystranddisplacement.main(ss, False ,"" , real_log_10_rate, 0 ,row[counter_cell][2] ,"" , theta, float( row[counter_cell][3]) , np.max ( ( float (row[counter_cell][16] ), float (row[counter_cell][17] ) ) ) , float (row[counter_cell][7]) , float( row[counter_cell][8]) , row[counter_cell][9], dataset_name, docID , name , "" , "", "", "" )
done_queue.put( ( name , error , counter_cell, document , predicted_log_10_rate, real_log_10_rate , stuctureCounterUniLocal, half_context_biLocal) )
def read_AltanBonnet(ss, counter_cell , document, theta , done_queue,row, dataset_name, docID, name) :
docID = name + docID
flurPosition = 17
real_log_10_rate = 1 / float( row[counter_cell][5])
[ error , predicted_log_10_rate, real_log_10_rate , stuctureCounterUniLocal, half_context_biLocal] = bubble.main(ss, real_log_10_rate , theta, row[counter_cell][1].rstrip(),row[counter_cell][2].rstrip(),row[counter_cell][3].rstrip(), (1000/ float (row[counter_cell][4] ))-273.15, float (row[counter_cell][8] ), float (row[counter_cell][9] ), 0, flurPosition, dataset_name, docID )
done_queue.put( ( name , error , counter_cell, document , predicted_log_10_rate, real_log_10_rate , stuctureCounterUniLocal, half_context_biLocal) )
def read_Morrison(ss, counter_cell , document, theta , done_queue, _zip, row, dataset_name , docID, name ) :
docID = name + docID
[ error , predicted_log_10_rate, real_log_10_rate, stuctureCounterUniLocal, half_context_biLocal] = helix.main(ss, math.pow(10, float (row[counter_cell][5] )) , theta, row[counter_cell][1].rstrip(), _zip, 1000/ float (row[counter_cell][3] ) - 273.15, np.max ( ( float (row[counter_cell][16] ), float (row[counter_cell][17] ) ) ) , float (row[counter_cell][8] ), 0, "" , dataset_name, docID , name )
done_queue.put( ( name , error , counter_cell, document, predicted_log_10_rate, real_log_10_rate, stuctureCounterUniLocal, half_context_biLocal ) )
def read_ReynaldoDissociate(ss, counter_cell , document, theta , done_queue, _zip, row, dataset_name , docID, name) :
docID = name + docID
[ error , predicted_log_10_rate, real_log_10_rate,stuctureCounterUniLocal, half_context_biLocal] = helix.main(ss, float( row[counter_cell][5] ) , theta, row[counter_cell][2].rstrip(), _zip, float (row[counter_cell][3] ), np.max ( ( float (row[counter_cell][16] ), float (row[counter_cell][17] ) ) ) , float (row[counter_cell][7] ), float (row[counter_cell][8] ),row[counter_cell][9] , dataset_name, docID , name )
done_queue.put( ( name , error , counter_cell, document, predicted_log_10_rate, real_log_10_rate , stuctureCounterUniLocal, half_context_biLocal) )
def read_Bonnet(ss, counter_cell , document,theta , done_queue, _zip , row, dataset_name, docID, name ):
docID = name +docID
magnesium = 0
[ error , predicted_log_10_rate, real_log_10_rate, stuctureCounterUniLocal, half_context_biLocal] = hairpin.main(ss, float (row[counter_cell][5]) , theta, row[counter_cell][1].rstrip(),row[counter_cell][2].rstrip(), _zip, 1000/ float( row[counter_cell][3] )- 273.15, float ( row[counter_cell][7] ) , float ( row[counter_cell][8] ) , magnesium , dataset_name, docID )
done_queue.put( ( name , error , counter_cell, document, predicted_log_10_rate, real_log_10_rate, stuctureCounterUniLocal, half_context_biLocal ) )
def read_Kim(ss, counter_cell , document,theta , done_queue, _zip , row, dataset_name, docID ,name):
docID = name +docID
magnesium = 0
[ error , predicted_log_10_rate, real_log_10_rate, stuctureCounterUniLocal, half_context_biLocal] = hairpin.main(ss, float (row[counter_cell][5]) , theta, row[counter_cell][1].rstrip(),row[counter_cell][2].rstrip(), _zip, 1000/ float( row[counter_cell][3] )- 273.15, float ( row[counter_cell][7] ) , float ( row[counter_cell][8] ) , magnesium , dataset_name, docID )
done_queue.put( ( name , error , counter_cell, document, predicted_log_10_rate, real_log_10_rate, stuctureCounterUniLocal, half_context_biLocal ) )
def read_BonnetThesis(ss, counter_cell , document, theta , done_queue, _zip,row, dataset_name, docID , name):
docID = name +docID
real_log_10_rate = 1 / float( row[counter_cell][4])
[ error , predicted_log_10_rate, real_log_10_rate, stuctureCounterUniLocal, half_context_biLocal] = hairpin.main(ss, real_log_10_rate, theta, row[counter_cell][1].rstrip(),row[counter_cell][2].rstrip(), _zip, 1000/ float (row[counter_cell][3] ) - 273.15, float (row[counter_cell][7] ), float (row[counter_cell][8] ), 0 , dataset_name, docID )
done_queue.put( ( name , error ,counter_cell, document, predicted_log_10_rate, real_log_10_rate, stuctureCounterUniLocal, half_context_biLocal ) )
def multi_process(done_queue , dataset_list , iter , countS , local_context_uni, local_context_bi) :
"""multi processing function """
global predicted_logreactionrateconstants , experimental_logreactionrateconstants
error = 0
pool = multiprocessing.Pool( processes = n_processors)
for ds in dataset_list:
compile_error = pool.apply_async( ds.function_name , ds.arguments )
#print "Errors: " + str(compile_error.get())
pool.close( )
pool.join ()
while not done_queue.empty():
(name, s , counter_cell, document , predictedRate ,real_log_10_rate , local_context_uni_l, local_context_bi_l )= done_queue.get()
if for_plot== True :
predicted_logreactionrateconstants[iter, counter_cell, document ] = predictedRate
experimental_logreactionrateconstants [ iter, counter_cell, document ] = real_log_10_rate
error += s
if iter == 0 and parent.rate_method == myenums.ModelName.ARRHENIUSMODELNAME.value:
for i in local_context_uni:
local_context_uni [i] += local_context_uni_l[i]
for i in local_context_bi:
local_context_bi[i] += local_context_bi_l[i]
if name in countS :
countS [name ] += s
else :
countS[name ] = s
return error
def check_directories (directories) :
for dir in directories:
if not os.path.exists(dir):
os.makedirs(dir)
def objective_function(thetap):
"""For the MCMC approach, receives an parameter set and returns an approximation of the log posterior. For the MAP approach it returns an approximation of the negative log posterior"""
global iter
start_time = timeit.default_timer()
theta =[]
for x in thetap :
theta.append(x)
if parent.rate_method == myenums.ModelName.ARRHENIUSMODELNAME.value:
theta = [thetap[0] , thetap[1] , thetap[2], thetap[3] , thetap[4] , thetap[5], thetap[6] ,thetap[7], thetap[8], thetap[9], thetap[10] , thetap[11], thetap[12] , thetap[13], thetap[14] ]
alpha = theta [14]
elif parent.rate_method == myenums.ModelName.METROPOLISMODELNAME.value :
theta = [thetap[0] , thetap[1]]
alpha =1
else:
raise ValueError('Error: Please specify rate_method to be Arrhenius or Metropolis!')
sigma = thetap[len(thetap)-1]
if alpha <= 0 or sigma <= 0 or (parent.rate_method ==myenums.ModelName.METROPOLISMODELNAME.value and ( theta[0] <= 0 or theta[1] <= 0 ) ) :
if METHOD == myenums.MethodName.MCMCNAME.value:
return -np.inf
elif METHOD ==myenums.MethodName.MAPNAME.value:
return np.inf
parameter_file = open(parameter_file_name, 'a')
parameter_file.write("Iteration " + str(iter) +" "+str(theta) + " " + str(sigma) + '\n')
error = 0
n = 0
done_queue = multiprocessing.Manager().Queue()
dataset_list = []
directories =[]
if use_all_data == False :
set = [myenums.SetName.TRAIN.value]
elif use_all_data == True :
set = [myenums.SetName.TRAIN.value, myenums.SetName.TEST.value]
# Zhang
my_name = '/three_waystranddisplacement/Fig3b'
dataset_name, document, row = initconf(my_name, directories)
for set_type in set:
for counter_cell in traintestset[document, set_type]:
ss = {myenums.Permanent_Folder.PSD.value:dict() , myenums.Permanent_Folder.TRANSITION_STRUCTURE.value:dict( )}
dataset_list.append(ForMultiProcess(read_Zhang, ( ss, counter_cell, document,theta, done_queue, row, dataset_name , str(counter_cell) , myenums.DatasetName.ZHANG.value ) ))
n +=1
#Dabby
my_name= '/four_waystrandexchange/Table5.2'
dataset_name, document , row = initconf(my_name , directories)
for set_type in set:
for counter_cell in traintestset[document, set_type]:
ss = {myenums.Permanent_Folder.PSD.value:dict() , myenums.Permanent_Folder.TRANSITION_STRUCTURE.value:dict( )}
dataset_list.append(ForMultiProcess(read_DabbyThesis ,(ss, counter_cell , document, theta, done_queue, row , dataset_name , str(counter_cell) , myenums.DatasetName.DABBY.value )))
n +=1
#Reynaldo
my_name = '/three_waystranddisplacement1/Fig6b'
dataset_name, document , row = initconf(my_name , directories)
for set_type in set:
for counter_cell in traintestset[document, set_type]:
ss = {myenums.Permanent_Folder.PSD.value:dict() , myenums.Permanent_Folder.TRANSITION_STRUCTURE.value:dict( )}
dataset_list.append(ForMultiProcess(read_ReyanldoSequential, (ss, counter_cell , document,theta, done_queue, row, dataset_name, str(counter_cell) , myenums.DatasetName.REYNALDOSEQUENTIAL.value) ))
n +=1
#ReynaldoDissociate
my_name = '/helix1/Fig6a'
dataset_name, document , row = initconf(my_name , directories)
for _zip in [False]:
for set_type in set:
for counter_cell in traintestset [document, set_type ] :
ss = {myenums.Permanent_Folder.PSD.value:dict() , myenums.Permanent_Folder.TRANSITION_STRUCTURE.value:dict( )}
dataset_list.append( ForMultiProcess( read_ReynaldoDissociate, (ss , counter_cell , document, theta , done_queue, _zip , row , dataset_name ,str(_zip) + str(counter_cell) , myenums.DatasetName.REYNALDODISSOCIATE.value)))
n +=1
#Morrison
for _zip in [True , False ]:
my_name = '/helix/Fig6_' +str(int(_zip))
dataset_name, document , row = initconf(my_name , directories)
for set_type in set:
for counter_cell in traintestset[document, set_type]:
ss = {myenums.Permanent_Folder.PSD.value:dict() , myenums.Permanent_Folder.TRANSITION_STRUCTURE.value:dict( )}
dataset_list.append( ForMultiProcess( read_Morrison, (ss, counter_cell , document, theta , done_queue, _zip , row, dataset_name , str(_zip) | |
<filename>Lib/defcon/test/objects/test_glyph.py
import unittest
from defcon import Font, Glyph, Contour, Component, Anchor, Guideline, Layer
from defcon.test.testTools import getTestFontPath
class GlyphTest(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
def test_identifiers(self):
glyph = Glyph()
pointPen = glyph.getPointPen()
pointPen.beginPath(identifier="contour 1")
pointPen.addPoint((0, 0), identifier="point 1")
pointPen.addPoint((0, 0), identifier="point 2")
pointPen.endPath()
pointPen.beginPath(identifier="contour 2")
pointPen.endPath()
pointPen.addComponent("A", (1, 1, 1, 1, 1, 1),
identifier="component 1")
pointPen.addComponent("A", (1, 1, 1, 1, 1, 1),
identifier="component 2")
guideline = Guideline()
guideline.identifier = "guideline 1"
glyph.appendGuideline(guideline)
guideline = Guideline()
guideline.identifier = "guideline 2"
glyph.appendGuideline(guideline)
self.assertEqual([contour.identifier for contour in glyph],
["contour 1", "contour 2"])
self.assertEqual([point.identifier for point in glyph[0]],
["point 1", "point 2"])
self.assertEqual(
[component.identifier for component in glyph.components],
["component 1", "component 2"])
with self.assertRaises(AssertionError):
pointPen.beginPath(identifier="contour 1")
pointPen.endPath()
pointPen.beginPath()
pointPen.addPoint((0, 0))
with self.assertRaises(AssertionError):
pointPen.addPoint((0, 0), identifier="point 1")
pointPen.endPath()
with self.assertRaises(AssertionError):
pointPen.addComponent("A", (1, 1, 1, 1, 1, 1),
identifier="component 1")
g = Guideline()
g.identifier = "guideline 1"
with self.assertRaises(AssertionError):
glyph.appendGuideline(g)
self.assertEqual(
sorted(glyph.identifiers),
["component 1", "component 2", "contour 1", "contour 2",
"guideline 1", "guideline 2", "point 1", "point 2"])
glyph.removeContour(glyph[0])
self.assertEqual(
sorted(glyph.identifiers),
["component 1", "component 2", "contour 2",
"guideline 1", "guideline 2"])
glyph.removeComponent(glyph.components[0])
self.assertEqual(
sorted(glyph.identifiers),
["component 2", "contour 2", "guideline 1", "guideline 2"])
glyph.removeGuideline(glyph.guidelines[0])
self.assertEqual(
sorted(glyph.identifiers),
["component 2", "contour 2", "guideline 2"])
def test_name_set(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.name = "RenamedGlyph"
self.assertEqual(glyph.name, "RenamedGlyph")
self.assertEqual(sorted(font.keys()), ["B", "C", "RenamedGlyph"])
font = Font(getTestFontPath())
glyph = font["A"]
glyph.name = "A"
self.assertFalse(glyph.dirty)
def test_name_get(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.name, "A")
def test_unicodes_get(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.unicodes, [65])
def test_unicodes_set(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.unicodes = [123, 456]
self.assertEqual(glyph.unicodes, [123, 456])
self.assertTrue(glyph.dirty)
def test_unicode_get(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.unicode, 65)
def test_unicode_set(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.unicode = 123
self.assertEqual(glyph.unicodes, [123])
glyph.unicode = 456
self.assertEqual(glyph.unicodes, [456])
self.assertTrue(glyph.dirty)
def test_bounds(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.bounds, (0, 0, 700, 700))
glyph = font["B"]
self.assertEqual(glyph.bounds, (0, 0, 700, 700))
glyph = font["C"]
self.assertEqual(glyph.bounds, (0.0, 0.0, 700.0, 700.0))
def test_controlPointBounds(self):
from defcon.test.testTools import getTestFontPath
from defcon.objects.font import Font
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.controlPointBounds, (0, 0, 700, 700))
glyph = font["B"]
self.assertEqual(glyph.controlPointBounds, (0, 0, 700, 700))
glyph = font["C"]
self.assertEqual(glyph.controlPointBounds, (0.0, 0.0, 700.0, 700.0))
def test_leftMargin_get(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.leftMargin, 0)
glyph = font["B"]
self.assertEqual(glyph.leftMargin, 0)
def test_leftMargin_set(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.leftMargin = 100
self.assertEqual(glyph.leftMargin, 100)
self.assertEqual(glyph.width, 800)
self.assertTrue(glyph.dirty)
def test_rightMargin_get(self):
from defcon.test.testTools import getTestFontPath
from defcon.objects.font import Font
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.rightMargin, 0)
def test_rightMargin_set(self):
from defcon.test.testTools import getTestFontPath
from defcon.objects.font import Font
font = Font(getTestFontPath())
glyph = font["A"]
glyph.rightMargin = 100
self.assertEqual(glyph.rightMargin, 100)
self.assertEqual(glyph.width, 800)
self.assertTrue(glyph.dirty)
def test_bottomMargin_get(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.bottomMargin, 0)
glyph = font["B"]
self.assertEqual(glyph.bottomMargin, 0)
# empty glyph
glyph = font.newGlyph("D")
self.assertIsNone(glyph.bottomMargin)
def test_bottomMargin_set(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.bottomMargin = 100
self.assertEqual(glyph.bottomMargin, 100)
self.assertEqual(glyph.height, 600)
self.assertEqual(glyph.verticalOrigin, 500)
self.assertTrue(glyph.dirty)
# now glyph.verticalOrigin is defined
glyph.bottomMargin = 50
self.assertEqual(glyph.bottomMargin, 50)
self.assertEqual(glyph.height, 550)
self.assertEqual(glyph.verticalOrigin, 500)
self.assertTrue(glyph.dirty)
# empty glyph
glyph = font.newGlyph("D")
glyph.dirty = False
glyph.bottomMargin = 10
self.assertIsNone(glyph.bottomMargin)
self.assertEqual(glyph.height, 0)
self.assertIsNone(glyph.verticalOrigin)
self.assertFalse(glyph.dirty)
def test_topMargin_get(self):
from defcon.test.testTools import getTestFontPath
from defcon.objects.font import Font
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.topMargin, -200)
# empty glyph
glyph = font.newGlyph("D")
self.assertIsNone(glyph.topMargin)
def test_topMargin_set(self):
from defcon.test.testTools import getTestFontPath
from defcon.objects.font import Font
font = Font(getTestFontPath())
glyph = font["A"]
glyph.topMargin = 100
self.assertEqual(glyph.topMargin, 100)
self.assertEqual(glyph.height, 800)
self.assertEqual(glyph.verticalOrigin, 800)
self.assertTrue(glyph.dirty)
# now glyph.verticalOrigin is defined
glyph.topMargin = 50
self.assertEqual(glyph.topMargin, 50)
self.assertEqual(glyph.height, 750)
self.assertEqual(glyph.verticalOrigin, 750)
self.assertTrue(glyph.dirty)
# empty glyph
glyph = font.newGlyph("D")
glyph.dirty = False
glyph.topMargin = 10
self.assertIsNone(glyph.topMargin)
self.assertEqual(glyph.height, 0)
self.assertIsNone(glyph.verticalOrigin)
self.assertFalse(glyph.dirty)
def test_width_get(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.width, 700)
def test_width_set(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.width = 100
self.assertEqual(glyph.width, 100)
self.assertTrue(glyph.dirty)
def test_height_get(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(glyph.height, 500)
def test_height_set(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.height = 100
self.assertEqual(glyph.height, 100)
self.assertEqual(glyph.verticalOrigin, None)
self.assertTrue(glyph.dirty)
def test_markColor(self):
from defcon.objects.font import Font
font = Font()
font.newGlyph("A")
glyph = font["A"]
self.assertIsNone(glyph.markColor)
glyph.markColor = "1,0,1,0"
self.assertEqual(glyph.markColor, "1,0,1,0")
glyph.markColor = "1,0,1,0"
self.assertEqual(glyph.markColor, "1,0,1,0")
glyph.markColor = None
self.assertIsNone(glyph.markColor)
def test_verticalOrigin(self):
from defcon.test.testTools import getTestFontPath
from defcon.objects.font import Font
font = Font()
font.newGlyph("A")
glyph = font["A"]
self.assertIsNone(glyph.verticalOrigin)
self.assertEqual(glyph.height, 0)
glyph.verticalOrigin = 1000
self.assertEqual(glyph.verticalOrigin, 1000)
self.assertEqual(glyph.height, 0)
glyph.verticalOrigin = 0
self.assertEqual(glyph.verticalOrigin, 0)
self.assertEqual(glyph.height, 0)
glyph.verticalOrigin = -10
self.assertEqual(glyph.verticalOrigin, -10)
self.assertEqual(glyph.height, 0)
glyph.verticalOrigin = None
self.assertIsNone(glyph.verticalOrigin)
self.assertEqual(glyph.height, 0)
font = Font(getTestFontPath())
glyph = font["A"]
self.assertIsNone(glyph.verticalOrigin)
self.assertEqual(glyph.height, 500)
glyph.verticalOrigin = 1000
self.assertEqual(glyph.verticalOrigin, 1000)
self.assertEqual(glyph.height, 500)
glyph.verticalOrigin = 0
self.assertEqual(glyph.verticalOrigin, 0)
self.assertEqual(glyph.height, 500)
glyph.verticalOrigin = -10
self.assertEqual(glyph.verticalOrigin, -10)
self.assertEqual(glyph.height, 500)
glyph.verticalOrigin = None
self.assertIsNone(glyph.verticalOrigin)
self.assertEqual(glyph.height, 500)
def test_appendContour(self):
glyph = Glyph()
glyph.dirty = False
contour = Contour()
glyph.appendContour(contour)
self.assertEqual(len(glyph), 1)
self.assertTrue(glyph.dirty)
self.assertEqual(contour.getParent(), glyph)
def test_removeContour(self):
font = Font(getTestFontPath())
glyph = font["A"]
contour = glyph[0]
glyph.removeContour(contour)
self.assertFalse(contour in glyph._contours)
self.assertIsNone(contour.getParent())
def test_contourIndex(self):
font = Font(getTestFontPath())
glyph = font["A"]
contour = glyph[0]
self.assertEqual(glyph.contourIndex(contour), 0)
contour = glyph[1]
self.assertEqual(glyph.contourIndex(contour), 1)
def test_clearContours(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.clearContours()
self.assertEqual(len(glyph), 0)
def test_components(self):
font = Font(getTestFontPath())
glyph = font["C"]
self.assertEqual(len(glyph.components), 2)
def test_appendComponent(self):
glyph = Glyph()
glyph.dirty = False
component = Component()
glyph.appendComponent(component)
self.assertEqual(len(glyph.components), 1)
self.assertTrue(glyph.dirty)
self.assertEqual(component.getParent(), glyph)
def test_removeComponent(self):
font = Font(getTestFontPath())
glyph = font["C"]
component = glyph.components[0]
glyph.removeComponent(component)
self.assertFalse(component in glyph.components)
self.assertIsNone(component.getParent())
def test_componentIndex(self):
font = Font(getTestFontPath())
glyph = font["C"]
component = glyph.components[0]
self.assertEqual(glyph.componentIndex(component), 0)
component = glyph.components[1]
self.assertEqual(glyph.componentIndex(component), 1)
def test_clearComponents(self):
font = Font(getTestFontPath())
glyph = font["C"]
glyph.clearComponents()
self.assertEqual(len(glyph.components), 0)
def test_decomposeComponent(self):
font = Font()
font.newGlyph("baseGlyph")
baseGlyph = font["baseGlyph"]
pointPen = baseGlyph.getPointPen()
pointPen.beginPath(identifier="contour1")
pointPen.addPoint((0, 0), "move", identifier="point1")
pointPen.addPoint((0, 100), "line")
pointPen.addPoint((100, 100), "line")
pointPen.addPoint((100, 0), "line")
pointPen.addPoint((0, 0), "line")
pointPen.endPath()
font.newGlyph("referenceGlyph")
referenceGlyph = font["referenceGlyph"]
pointPen = referenceGlyph.getPointPen()
pointPen.addComponent("baseGlyph", (1, 0, 0, 1, 0, 0))
self.assertEqual(len(referenceGlyph.components), 1)
self.assertEqual(len(referenceGlyph), 0)
referenceGlyph.decomposeAllComponents()
self.assertEqual(len(referenceGlyph.components), 0)
self.assertEqual(len(referenceGlyph), 1)
self.assertEqual(referenceGlyph[0].identifier, "contour1")
self.assertEqual(referenceGlyph[0][0].identifier, "point1")
pointPen.addComponent("baseGlyph", (1, 0, 0, 1, 100, 100))
self.assertEqual(len(referenceGlyph.components), 1)
self.assertEqual(len(referenceGlyph), 1)
component = referenceGlyph.components[0]
referenceGlyph.decomposeComponent(component)
self.assertEqual(len(referenceGlyph.components), 0)
self.assertEqual(len(referenceGlyph), 2)
self.assertEqual(referenceGlyph[0].identifier, "contour1")
self.assertEqual(referenceGlyph[0][0].identifier, "point1")
referenceGlyph[1].identifier
referenceGlyph[1][0].identifier
def test_decomposeComponent_nested_components(self):
font = Font()
font.newGlyph("baseGlyph")
baseGlyph = font["baseGlyph"]
pointPen = baseGlyph.getPointPen()
pointPen.beginPath(identifier="contour1")
pointPen.addPoint((0, 0), "move", identifier="point1")
pointPen.addPoint((0, 100), "line")
pointPen.addPoint((100, 100), "line")
pointPen.addPoint((100, 0), "line")
pointPen.addPoint((0, 0), "line")
pointPen.endPath()
font.newGlyph("referenceGlyph1")
referenceGlyph1 = font["referenceGlyph1"]
pointPen = referenceGlyph1.getPointPen()
pointPen.addComponent("baseGlyph", (1, 0, 0, 1, 3, 6))
font.newGlyph("referenceGlyph2")
referenceGlyph2 = font["referenceGlyph2"]
pointPen = referenceGlyph2.getPointPen()
pointPen.addComponent("referenceGlyph1", (1, 0, 0, 1, 10, 20))
referenceGlyph2.decomposeAllComponents()
self.assertEqual(len(referenceGlyph2.components), 0)
self.assertEqual(len(referenceGlyph1.components), 1)
self.assertEqual(len(referenceGlyph2), 1)
self.assertEqual(referenceGlyph2.bounds, (13, 26, 113, 126))
def test_anchors(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(len(glyph.anchors), 2)
def test_appendAnchor(self):
glyph = Glyph()
glyph.dirty = False
anchor = Anchor()
glyph.appendAnchor(anchor)
self.assertEqual(len(glyph.anchors), 1)
self.assertTrue(glyph.dirty)
self.assertEqual(anchor.getParent(), glyph)
def test_removeAnchor(self):
font = Font(getTestFontPath())
glyph = font["A"]
anchor = glyph.anchors[0]
glyph.removeAnchor(anchor)
self.assertFalse(anchor in glyph.anchors)
self.assertIsNone(anchor.getParent())
def test_anchorIndex(self):
font = Font(getTestFontPath())
glyph = font["A"]
anchor = glyph.anchors[0]
self.assertEqual(glyph.anchorIndex(anchor), 0)
anchor = glyph.anchors[1]
self.assertEqual(glyph.anchorIndex(anchor), 1)
def test_clearAnchors(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.clearAnchors()
self.assertEqual(len(glyph.anchors), 0)
def test_duplicatedAnchors(self):
font = Font(getTestFontPath())
glyph = font["A"]
anchor = glyph.anchors[0]
with self.assertRaises(AssertionError):
glyph.appendAnchor(anchor)
def test_appendGuideline(self):
glyph = Glyph()
glyph.dirty = False
guideline = Guideline()
glyph.appendGuideline(guideline)
self.assertEqual(len(glyph.guidelines), 1)
self.assertTrue(glyph.dirty)
self.assertEqual(guideline.getParent(), glyph)
def test_removeGuideline(self):
font = Font(getTestFontPath())
glyph = font.layers["Layer 1"]["A"]
guideline = glyph.guidelines[0]
glyph.removeGuideline(guideline)
self.assertFalse(guideline in glyph.guidelines)
self.assertIsNone(guideline.getParent())
def test_clearGuidelines(self):
font = Font(getTestFontPath())
glyph = font["A"]
glyph.clearGuidelines()
self.assertEqual(len(glyph.guidelines), 0)
def test_duplicatedGuideline(self):
font = Font(getTestFontPath())
glyph = font.layers["Layer 1"]["A"]
guideline = glyph.guidelines[0]
with self.assertRaises(AssertionError):
glyph.appendGuideline(guideline)
def test_len(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual(len(glyph), 2)
def test_iter(self):
font = Font(getTestFontPath())
glyph = font["A"]
self.assertEqual([len(contour) for contour in glyph], [4, 4])
def test_copyDataFromGlyph(self):
source = Glyph()
source.name = "a"
source.width = 1
source.height = 2
source.unicodes = [3, 4]
source.note = "test image"
source.image = dict(fileName="test image", xScale=1, xyScale=1,
yxScale=1, yScale=1, xOffset=0, yOffset=0,
color=None)
source.anchors = [dict(x=100, y=200, name="test anchor")]
source.guidelines = [dict(x=10, y=20, name="test guideline")]
source.lib = {"foo": "bar"}
pen = source.getPointPen()
pen.beginPath()
pen.addPoint((100, 200), segmentType="line")
pen.addPoint((300, 400), segmentType="line")
pen.endPath()
component = Component()
component.base = "b"
source.appendComponent(component)
dest = Glyph()
dest.copyDataFromGlyph(source)
self.assertNotEqual(source.name, dest.name)
self.assertEqual(source.width, dest.width)
self.assertEqual(source.height, dest.height)
self.assertEqual(source.unicodes, dest.unicodes)
self.assertEqual(source.note, dest.note)
self.assertEqual(source.image.items(), dest.image.items())
self.assertEqual([g.items() for | |
"""Django-like (in the sense of "class-based") forms for tkinter"""
import inspect
import copy
import tkinter as tk
import tkinter.messagebox as tk_msg
from .. import tkstuff as mtk
try:
import typing
except ImportError:
typing = None
import enum
import collections
class FormWidget(mtk.ContainingWidget):
"""Provide a subclass of ContainingWidget for forms
Provide a way to validate contents and automaically display errors
The widgets are expected to have a .validate() method returning
a tuple consisting of a boolean indicating the validity of the data
and the data itself or an error message,
such as the one provided by misc.Validator
Alternatively, the widgets may provide a .get() method returning the data.
Additional validation (e.g. checking if entries match)
can be done by overriding the .clean_data() method
To validate the data, call the .validate() method.
This will use the .clean_data() method for getting data
and display any errors in the way specified in __init__
It returns a boolean indicating if all data is valid
After .validate() returned True, the data is available under.data
By default, the submit action calls onsubmit (passed as argument)
with the data if validation succeeds"""
class ErrorHandle(enum.Flag):
"""Flags for how to display errors to the user
LABEL: attach a Label to the widget
POPUP: show a messagebox with the errors
CUSTOM: call the .custom_error_handle() method
The flags may be combined, execution order is not guaranteed"""
LABEL = enum.auto()
POPUP = enum.auto()
CUSTOM = enum.auto()
class SubmitOnReturn(enum.Enum):
"""Information regarding submit bindings on <Return> for elements
NONE: do not bind
LAST: bind to the last form element
ALL: bind to all form elements
NOT_FIRST: bind to all form elements except the first
"""
NONE = enum.auto()
LAST = enum.auto()
ALL = enum.auto()
NOT_FIRST = enum.auto()
def __init__(self, master, *widgets,
error_handle=ErrorHandle.LABEL | ErrorHandle.POPUP,
error_display_options={},
submit_button=True,
onsubmit=lambda data: None,
default_content={},
take_focus=False,
submit_on_return=SubmitOnReturn.NONE,
**container_options):
"""Create a form.
`widgets` are (<key>, (<class>, <kwargs>)) of the contained widgets
The key is used in self.widget_dict, self.data and self.errors
Note the default implementation of self.clean_data() ignores
widgets whose keys start with ignore
`error_handle` is a flag from FormWidget.ErrorHandle.
See its __doc__ for details
`error_display_options` are options for error display
the following keys will be used:
'label_font' the font used for error messages on labels
'label_fg' the foreground color for labels, default red
'label_position' the position of the label relative to the widget
'popup_title' the title for the popup
'popup_intro' the introducing text on the popup
'popup_field_name_resolver' callable to get the display name for a particular field
`submit_button` may be a dictionary containing options for an
automatically generated one, any other truthy value to
automatically generate a default one and a falsey value to
suppress automatic generation of a button.
`onsubmit` is a callable taking the forms data if the submit_action
is triggered and self.validate() returned True.
If finer-grained control over the process is wished,
overriding `.submit_action` may be more appropriate.
`default_content` is a mapping from field names to
their default content for this form. The fields must
have a setting method recognized by misc.tkstuff.get_setter.
Nonexistent fields are ignored
`take_focus` specifies if the first form element should take focus
`submit_on_return` is an element of FormWidget.SubmitOnReturn.
see its __doc__ for details
`container_options` are passed along to ContainingWidget
By default, the direction for the ContainingWidget is set to `tk.BOTTOM`
See ContainingWidget.__init__ for more details
"""
self.ERROR_LABEL_ID = object()
self.error_handle = error_handle
self.onsubmit = onsubmit
self.error_display_options = {'label_fg': 'red',
'label_position': tk.RIGHT}
self.error_display_options.update(error_display_options)
widget_keys = []
pass_widgets = []
for key, widget in widgets:
if self.ErrorHandle.LABEL & error_handle:
widget = (mtk.LabeledWidget,
{'widget': widget,
'text': '',
'position': self.error_display_options['label_position'],
'label_id': self.ERROR_LABEL_ID})
widget_keys.append(key)
pass_widgets.append(widget)
if submit_button:
sb_options = {'text': 'Submit', 'command': self.submit_action}
if isinstance(submit_button, dict):
sb_options.update(submit_button)
pass_widgets.append((tk.Button, sb_options))
options = {'direction': (tk.BOTTOM, tk.RIGHT)}
options.update(container_options)
super().__init__(master, *pass_widgets, **options)
self.widget_dict = {k: w for k, w in zip(widget_keys, self.widgets)}
for k in default_content.keys() & self.widget_dict.keys():
mtk.get_setter(self.widget_dict[k])(default_content[k])
if submit_on_return is FormWidget.SubmitOnReturn.LAST:
self.widgets[-1].bind('<Return>', self.submit_action)
elif submit_on_return is FormWidget.SubmitOnReturn.ALL:
for w in self.widgets:
w.bind('<Return>', self.submit_action)
elif submit_on_return is FormWidget.SubmitOnReturn.NOT_FIRST:
for w in self.widgets[1:]:
w.bind('<Return>', self.submit_action)
if take_focus:
self.widgets[0].focus()
def validate(self):
"""Validate the form data and, if applicable,
display errors according to self.error_handle
Return a boolean indicating the validity of the entered data
After the call, the processsed data is availabe under self.data"""
self.data = {}
self.errors = collections.defaultdict(set)
self.clean_data()
if self.ErrorHandle.LABEL & self.error_handle:
options = {'fg': self.error_display_options['label_fg']}
if self.error_display_options.get('label_font'):
options['font'] = self.error_display_options['label_font']
for k, w in self.widget_dict.items():
w.labels[self.ERROR_LABEL_ID].config(
text='\n'.join(self.errors[k]), **options)
if self.ErrorHandle.POPUP & self.error_handle:
text = [self.error_display_options.get('popup_intro', '')]
for k, v in self.errors.items():
if v:
text.append('{}: {}'.format(
self.error_display_options.get('popup_field_name_resolver',
lambda txt: txt)(k),
'\n'.join(v)))
if len(text) > 1:
tk_msg.showerror(self.error_display_options.get('popup_title'),
'\n\n'.join(text))
if self.ErrorHandle.CUSTOM & self.error_handle:
self.custom_error_handle()
return not any(e for e in self.errors.values())
def clean_data(self):
"""Use the .validate() methods of elements to validate form data.
Override to validate in a finer-grained way
Ignore elements whose keys start with 'ignore'"""
for k, w in self.widget_dict.items():
if k.startswith('ignore'):
continue
try:
validator = w.validate
except AttributeError:
def validator(): return True, w.get()
valid, data = validator()
if valid:
self.data[k] = data
else:
self.data[k] = None
self.errors[k].add(data)
def custom_error_handle(self):
pass
def submit_action(self, event=None):
if self.validate():
self.onsubmit(self.data)
class ProtoWidget(tuple):
DEFAULT_DATA = {'groups': (), 'opt': 'out'}
def __new__(cls, iterable=(), options={}):
self = super().__new__(cls, iterable)
data = cls.DEFAULT_DATA.copy()
data.update(options)
data['groups'] = set(data['groups'])
for k, v in data.items():
setattr(self, k, v)
return self
def use(self, widgets, groups):
return bool(self[0] in widgets or self.groups & groups
) ^ (self.opt == 'out')
class Form:
"""Factory for FormWidget.
May be created by subclassing.
See __init_subclass__ for more information
The FormWidget is created by calling the subclass passing the master widget
Templates are supported. They are created by passing `template=True`
to the class creation. A template may contain elements
and helper methods like any other form, but cannot be used as a
factory. It can be used as a superclass for more secialised forms.
In this case, it is not neccessary fot the form to explicitly
inherit from `Form`.
By default, the elements of a template are positioned after
the elements of the using form. To override this, place the
assignment `_position_over_ = True` in the template body.
If templates are used for other templates, the `template=True`
argument must be passed for each class definition.
"""
def __new__(cls, master, elements=(), groups=(), **options):
"""Create a new form.
`options` override the options defined in the form class
`elements` is a container of widget keys to opt in/out*
`groups` is an iterable of widget groups to opt in/out*
* depending on setting in element definition
"""
groups = set(groups)
kwargs = cls.__formwidget_options.copy()
kwargs.update(options)
widgets = [copy.deepcopy(w) for w in cls.__widgets if w.use(elements, groups)]
return cls.__form_class(master, *widgets, **kwargs)
def __init_subclass__(cls, autogen_names=True, template=False):
"""Prepare a new form.
elements are marked by Element (as annotation or type)
Store the elements internally for use.
If the `template` argument is true, only store the widgets for the
given form elements. They will be used as soon as a
non-template subclass is created. See Form.__doc__
options for the FormWidget may be stored in a FormWidget nested class
this applies to initialisation options and method overriding
all data is available in the methods
Note: the ** keyword arguments should be stored
in a mapping with the corresponding name, not separately
the FormWidget nested class is used as class for the widget;
inheritance is added if not already present.
An element is a class and will be initialised with its master
widget. To add custom arguments (such as colors or fonts),
create a subclass and add your customisations in __init__
If an element does not have a .validate() method, it is converted
to a ValidatedWidget with an empty validator. This only
work if it has a .get() or a .curselection() method
If `autogen_names` (argument) is True (default),
the elements are created as LabeledWidget instances.
The user-facing name is chosen by the `get_name()` method,
if it is not present, the variable name is used.
if present, the `get_name` method is also used as
error_display_options['popup_field_name_resolver'] | |
included. Rather
# than include a complicated search, this is a
# hard-coded path. It could bail out if X11 libs are
# not found...
# tk_include_dirs.append('/usr/X11R6/include')
frameworks = ['-framework', 'Tcl', '-framework', 'Tk']
ext.include_dirs.extend(tk_include_dirs)
ext.extra_link_args.extend(frameworks)
ext.extra_compile_args.extend(frameworks)
# you're still here? ok we'll try it this way...
else:
# There are 3 methods to try, in decreasing order of "smartness"
#
# 1. Parse the tclConfig.sh and tkConfig.sh files that have
# all the information we need
#
# 2. Guess the include and lib dirs based on the location of
# Tkinter's 'tcl_library' and 'tk_library' variables.
#
# 3. Use some hardcoded locations that seem to work on a lot
# of distros.
# Query Tcl/Tk system for library paths and version string
try:
tcl_lib_dir, tk_lib_dir, tk_ver = self.query_tcltk()
except:
tk_ver = ''
result = self.hardcoded_tcl_config()
else:
result = self.parse_tcl_config(tcl_lib_dir, tk_lib_dir)
if result is None:
result = self.guess_tcl_config(
tcl_lib_dir, tk_lib_dir, tk_ver)
if result is None:
result = self.hardcoded_tcl_config()
# Add final versions of directories and libraries to ext lists
(tcl_lib_dir, tcl_inc_dir, tcl_lib,
tk_lib_dir, tk_inc_dir, tk_lib) = result
ext.include_dirs.extend([tcl_inc_dir, tk_inc_dir])
ext.library_dirs.extend([tcl_lib_dir, tk_lib_dir])
ext.libraries.extend([tcl_lib, tk_lib])
class BackendGtk(OptionalBackendPackage):
name = "gtk"
def check_requirements(self):
try:
import gtk
except ImportError:
raise CheckFailed("Requires pygtk")
except RuntimeError:
raise CheckFailed('pygtk present, but import failed.')
else:
version = (2, 2, 0)
if gtk.pygtk_version < version:
raise CheckFailed(
"Requires pygtk %d.%d.%d or later. "
"Found %d.%d.%d" % (version + gtk.pygtk_version))
ext = self.get_extension()
self.add_flags(ext)
check_include_file(ext.include_dirs,
os.path.join("gtk", "gtk.h"),
'gtk')
check_include_file(ext.include_dirs,
os.path.join("pygtk", "pygtk.h"),
'pygtk')
return 'Gtk: %s pygtk: %s' % (
".".join(str(x) for x in gtk.gtk_version),
".".join(str(x) for x in gtk.pygtk_version))
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def get_extension(self):
sources = [
'src/_backend_gdk.c'
]
ext = make_extension('matplotlib.backends._backend_gdk', sources)
self.add_flags(ext)
Numpy().add_flags(ext)
return ext
def add_flags(self, ext):
if sys.platform == 'win32':
def getoutput(s):
ret = os.popen(s).read().strip()
return ret
if 'PKG_CONFIG_PATH' not in os.environ:
# If Gtk+ is installed, pkg-config is required to be installed
os.environ['PKG_CONFIG_PATH'] = 'C:\\GTK\\lib\\pkgconfig'
# popen broken on my win32 plaform so I can't use pkgconfig
ext.library_dirs.extend(
['C:/GTK/bin', 'C:/GTK/lib'])
ext.include_dirs.extend(
['win32_static/include/pygtk-2.0',
'C:/GTK/include',
'C:/GTK/include/gobject',
'C:/GTK/include/gext',
'C:/GTK/include/glib',
'C:/GTK/include/pango',
'C:/GTK/include/atk',
'C:/GTK/include/X11',
'C:/GTK/include/cairo',
'C:/GTK/include/gdk',
'C:/GTK/include/gdk-pixbuf',
'C:/GTK/include/gtk',
])
pygtkIncludes = getoutput(
'pkg-config --cflags-only-I pygtk-2.0').split()
gtkIncludes = getoutput(
'pkg-config --cflags-only-I gtk+-2.0').split()
includes = pygtkIncludes + gtkIncludes
ext.include_dirs.extend([include[2:] for include in includes])
pygtkLinker = getoutput('pkg-config --libs pygtk-2.0').split()
gtkLinker = getoutput('pkg-config --libs gtk+-2.0').split()
linkerFlags = pygtkLinker + gtkLinker
ext.libraries.extend(
[flag[2:] for flag in linkerFlags if flag.startswith('-l')])
ext.library_dirs.extend(
[flag[2:] for flag in linkerFlags if flag.startswith('-L')])
ext.extra_link_args.extend(
[flag for flag in linkerFlags if not
(flag.startswith('-l') or flag.startswith('-L'))])
# visual studio doesn't need the math library
if (sys.platform == 'win32' and
win32_compiler == 'msvc' and
'm' in ext.libraries):
ext.libraries.remove('m')
elif sys.platform != 'win32':
pkg_config.setup_extension(ext, 'pygtk-2.0')
pkg_config.setup_extension(ext, 'gtk+-2.0')
class BackendGtkAgg(BackendGtk):
name = "gtkagg"
def check(self):
try:
return super(BackendGtkAgg, self).check()
except:
raise
else:
BackendAgg.force = True
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def get_extension(self):
sources = [
'src/agg_py_transforms.cpp',
'src/_gtkagg.cpp',
'src/mplutils.cpp'
]
ext = make_extension('matplotlib.backends._gtkagg', sources)
self.add_flags(ext)
LibAgg().add_flags(ext)
CXX().add_flags(ext)
Numpy().add_flags(ext)
return ext
def backend_gtk3agg_internal_check(x):
try:
import gi
except ImportError:
return (False, "Requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except ValueError:
return (False, "Requires gtk3 development files to be installed.")
except AttributeError:
return (False, "pygobject version too old.")
try:
from gi.repository import Gtk, Gdk, GObject
except (ImportError, RuntimeError):
return (False, "Requires pygobject to be installed.")
return (True, "version %s.%s.%s" % (
Gtk.get_major_version(),
Gtk.get_micro_version(),
Gtk.get_minor_version()))
class BackendGtk3Agg(OptionalBackendPackage):
name = "gtk3agg"
def check_requirements(self):
if 'TRAVIS' in os.environ:
raise CheckFailed("Can't build with Travis")
if PY3:
raise CheckFailed("gtk3agg backend does not work on Python 3")
# This check needs to be performed out-of-process, because
# importing gi and then importing regular old pygtk afterward
# segfaults the interpreter.
try:
p = multiprocessing.Pool()
except:
return "unknown (can not use multiprocessing to determine)"
try:
success, msg = p.map(backend_gtk3agg_internal_check, [0])[0]
except:
success = False
msg = "Could not determine"
finally:
p.close()
p.join()
if success:
BackendAgg.force = True
return msg
else:
raise CheckFailed(msg)
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def backend_gtk3cairo_internal_check(x):
try:
import cairocffi
except ImportError:
try:
import cairo
except ImportError:
return (False, "Requires cairocffi or pycairo to be installed.")
try:
import gi
except ImportError:
return (False, "Requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except ValueError:
return (False, "Requires gtk3 development files to be installed.")
except AttributeError:
return (False, "pygobject version too old.")
try:
from gi.repository import Gtk, Gdk, GObject
except (RuntimeError, ImportError):
return (False, "Requires pygobject to be installed.")
return (True, "version %s.%s.%s" % (
Gtk.get_major_version(),
Gtk.get_micro_version(),
Gtk.get_minor_version()))
class BackendGtk3Cairo(OptionalBackendPackage):
name = "gtk3cairo"
def check_requirements(self):
if 'TRAVIS' in os.environ:
raise CheckFailed("Can't build with Travis")
# This check needs to be performed out-of-process, because
# importing gi and then importing regular old pygtk afterward
# segfaults the interpreter.
try:
p = multiprocessing.Pool()
except:
return "unknown (can not use multiprocessing to determine)"
success, msg = p.map(backend_gtk3cairo_internal_check, [0])[0]
p.close()
p.join()
if success:
BackendAgg.force = True
return msg
else:
raise CheckFailed(msg)
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
class BackendWxAgg(OptionalBackendPackage):
name = "wxagg"
def check_requirements(self):
try:
import wxversion
except ImportError:
raise CheckFailed("requires wxPython")
try:
_wx_ensure_failed = wxversion.AlreadyImportedError
except AttributeError:
_wx_ensure_failed = wxversion.VersionError
try:
wxversion.ensureMinimal('2.8')
except _wx_ensure_failed:
pass
try:
import wx
backend_version = wx.VERSION_STRING
except ImportError:
raise CheckFailed("requires wxPython")
# Extra version check in case wxversion lacks AlreadyImportedError;
# then VersionError might have been raised and ignored when
# there really *is* a problem with the version.
major, minor = [int(n) for n in backend_version.split('.')[:2]]
if major < 2 or (major < 3 and minor < 8):
raise CheckFailed(
"Requires wxPython 2.8, found %s" % backend_version)
BackendAgg.force = True
return "version %s" % backend_version
class BackendMacOSX(OptionalBackendPackage):
name = 'macosx'
def check_requirements(self):
if sys.platform != 'darwin':
raise CheckFailed("Mac OS-X only")
return 'darwin'
def get_extension(self):
sources = [
'src/_macosx.m',
'src/agg_py_transforms.cpp',
'src/path_cleanup.cpp'
]
ext = make_extension('matplotlib.backends._macosx', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
CXX().add_flags(ext)
ext.extra_link_args.extend(['-framework', 'Cocoa'])
return ext
class Windowing(OptionalBackendPackage):
"""
Builds the windowing extension.
"""
name = "windowing"
def check_requirements(self):
if sys.platform != 'win32':
raise CheckFailed("Microsoft Windows only")
config = self.get_config()
if config is False:
raise CheckFailed("skipping due to configuration")
return "installing"
def get_extension(self):
sources = [
"src/_windowing.cpp"
]
ext = make_extension('matplotlib._windowing', sources)
ext.include_dirs.extend(['C:/include'])
ext.libraries.extend(['user32'])
ext.library_dirs.extend(['C:/lib'])
ext.extra_link_args.append("-mwindows")
return ext
class BackendQtBase(OptionalBackendPackage):
def convert_qt_version(self, version):
version = '%x' % version
temp = []
while len(version) > 0:
version, chunk = version[:-2], version[-2:]
temp.insert(0, str(int(chunk, 16)))
return '.'.join(temp)
def check_requirements(self):
'''
If PyQt4/PyQt5 is already imported, importing PyQt5/PyQt4 will fail
so we need to test in a subprocess (as for Gtk3).
'''
try:
p = multiprocessing.Pool()
except:
# Can't do multiprocessing, fall back to normal approach ( this will fail if importing both PyQt4 and PyQt5 )
try:
# Try in-process
msg = self.callback(self)
except RuntimeError:
raise CheckFailed("Could not import: are PyQt4 & PyQt5 both installed?")
except:
# Raise any other exceptions
raise
else:
# Multiprocessing OK
try:
msg = p.map(self.callback, [self])[0]
except:
# If we hit an error on multiprocessing raise it
raise
finally:
# Tidy up multiprocessing
p.close()
p.join()
return msg
def backend_qt4_internal_check(self):
try:
from PyQt4 import QtCore
except ImportError:
raise CheckFailed("PyQt4 not found")
try:
qt_version = QtCore.QT_VERSION
pyqt_version_str = QtCore.QT_VERSION_STR
except AttributeError:
raise CheckFailed('PyQt4 not correctly imported')
else:
BackendAgg.force = True
return ("Qt: %s, PyQt: %s" % (self.convert_qt_version(qt_version), pyqt_version_str))
class BackendQt4(BackendQtBase):
name = "qt4agg"
def __init__(self, *args, **kwargs):
BackendQtBase.__init__(self, *args, **kwargs)
self.callback = backend_qt4_internal_check
def backend_qt5_internal_check(self):
try:
from PyQt5 import QtCore
except ImportError:
raise CheckFailed("PyQt5 not found")
try:
qt_version = QtCore.QT_VERSION
pyqt_version_str = QtCore.QT_VERSION_STR
except AttributeError:
raise CheckFailed('PyQt5 not correctly imported')
else:
BackendAgg.force = True
return ("Qt: %s, PyQt: %s" % (self.convert_qt_version(qt_version), pyqt_version_str))
class BackendQt5(BackendQtBase):
name = "qt5agg"
def __init__(self, *args, **kwargs):
BackendQtBase.__init__(self, *args, **kwargs)
self.callback = backend_qt5_internal_check
def backend_pyside_internal_check(self):
try:
from PySide import __version__
from PySide import QtCore
except ImportError:
raise CheckFailed("PySide not found")
else:
BackendAgg.force = True
return ("Qt: %s, PySide: %s" %
(QtCore.__version__, __version__))
class BackendPySide(BackendQtBase):
name = "pyside"
def __init__(self, *args, **kwargs):
BackendQtBase.__init__(self, *args, **kwargs)
self.callback = backend_pyside_internal_check
class BackendCairo(OptionalBackendPackage):
name = "cairo"
def check_requirements(self):
try:
import cairocffi
except ImportError:
try:
import cairo
except ImportError:
raise CheckFailed("cairocffi or pycairo not found")
else:
return "pycairo version %s" % cairo.version
else:
| |
import abc
import random
from . import diceast as ast
from . import errors
__all__ = (
"Number", "Expression", "Literal", "UnOp", "BinOp", "Parenthetical", "Set", "Dice", "Die",
"SetOperator", "SetSelector"
)
# ===== ast -> expression models =====
class Number(abc.ABC, ast.ChildMixin): # num
"""
The base class for all expression objects.
Note that Numbers implement all the methods of a :class:`~d20.ast.ChildMixin`.
"""
__slots__ = ("kept", "annotation")
def __init__(self, kept=True, annotation=None):
self.kept = kept
self.annotation = annotation
@property
def number(self):
"""
Returns the numerical value of this object.
:rtype: int or float
"""
return sum(n.number for n in self.keptset)
@property
def total(self):
"""
Returns the numerical value of this object with respect to whether it's kept.
Generally, this is preferred to use over ``number``, as this will return 0 if
the number node was dropped.
:rtype: int or float
"""
return self.number if self.kept else 0
@property
def set(self):
"""
Returns the set representation of this object.
:rtype: list[Number]
"""
raise NotImplementedError
@property
def keptset(self):
"""
Returns the set representation of this object, but only including children whose values
were not dropped.
:rtype: list[Number]
"""
return [n for n in self.set if n.kept]
def drop(self):
"""
Makes the value of this Number node not count towards a total.
"""
self.kept = False
def __int__(self):
return int(self.total)
def __float__(self):
return float(self.total)
def __repr__(self):
return f"<Number total={self.total} kept={self.kept}>"
# overridden methods for typechecking
def set_child(self, index, value):
"""
Sets the ith child of this Number.
:param int index: Which child to set.
:param value: The Number to set it to.
:type value: Number
"""
super().set_child(index, value)
@property
def children(self):
""":rtype: list[Number]"""
raise NotImplementedError
class Expression(Number):
"""Expressions are usually the root of all Number trees."""
__slots__ = ("roll", "comment")
def __init__(self, roll, comment, **kwargs):
"""
:type roll: Number
"""
super().__init__(**kwargs)
self.roll = roll
self.comment = comment
@property
def number(self):
return self.roll.number
@property
def set(self):
return self.roll.set
@property
def children(self):
return [self.roll]
def set_child(self, index, value):
self._child_set_check(index)
self.roll = value
def __repr__(self):
return f"<Expression roll={self.roll} comment={self.comment}>"
class Literal(Number):
"""A literal integer or float."""
__slots__ = ("values", "exploded")
def __init__(self, value, **kwargs):
"""
:type value: int or float
"""
super().__init__(**kwargs)
self.values = [value] # history is tracked to support mi/ma op
self.exploded = False
@property
def number(self):
return self.values[-1]
@property
def set(self):
return [self]
@property
def children(self):
return []
def explode(self):
self.exploded = True
def update(self, value):
"""
:type value: int or float
"""
self.values.append(value)
def __repr__(self):
return f"<Literal {self.number}>"
class UnOp(Number):
"""Represents a unary operation."""
__slots__ = ("op", "value")
UNARY_OPS = {
"-": lambda v: -v,
"+": lambda v: +v
}
def __init__(self, op, value, **kwargs):
"""
:type op: str
:type value: Number
"""
super().__init__(**kwargs)
self.op = op
self.value = value
@property
def number(self):
return self.UNARY_OPS[self.op](self.value.total)
@property
def set(self):
return [self]
@property
def children(self):
return [self.value]
def set_child(self, index, value):
self._child_set_check(index)
self.value = value
def __repr__(self):
return f"<UnOp op={self.op} value={self.value}>"
class BinOp(Number):
"""Represents a binary operation."""
__slots__ = ("op", "left", "right")
BINARY_OPS = {
"+": lambda l, r: l + r,
"-": lambda l, r: l - r,
"*": lambda l, r: l * r,
"/": lambda l, r: l / r,
"//": lambda l, r: l // r,
"%": lambda l, r: l % r,
"<": lambda l, r: int(l < r),
">": lambda l, r: int(l > r),
"==": lambda l, r: int(l == r),
">=": lambda l, r: int(l >= r),
"<=": lambda l, r: int(l <= r),
"!=": lambda l, r: int(l != r),
}
def __init__(self, left, op, right, **kwargs):
"""
:type op: str
:type left: Number
:type right: Number
"""
super().__init__(**kwargs)
self.op = op
self.left = left
self.right = right
@property
def number(self):
try:
return self.BINARY_OPS[self.op](self.left.total, self.right.total)
except ZeroDivisionError:
raise errors.RollValueError("Cannot divide by zero.")
@property
def set(self):
return [self]
@property
def children(self):
return [self.left, self.right]
def set_child(self, index, value):
self._child_set_check(index)
if self.children[index] is self.left:
self.left = value
else:
self.right = value
def __repr__(self):
return f"<BinOp left={self.left} op={self.op} right={self.right}>"
class Parenthetical(Number):
"""Represents a value inside parentheses."""
__slots__ = ("value", "operations")
def __init__(self, value, operations=None, **kwargs):
"""
:type value: Number
:type operations: list[SetOperator]
"""
super().__init__(**kwargs)
if operations is None:
operations = []
self.value = value
self.operations = operations
@property
def total(self):
return self.value.total if self.kept else 0
@property
def set(self):
return self.value.set
@property
def children(self):
return [self.value]
def set_child(self, index, value):
self._child_set_check(index)
self.value = value
def __repr__(self):
return f"<Parenthetical value={self.value} operations={self.operations}>"
class Set(Number):
"""Represents a set of values."""
__slots__ = ("values", "operations")
def __init__(self, values, operations=None, **kwargs):
"""
:type values: list[Number]
:type operations: list[SetOperator]
"""
super().__init__(**kwargs)
if operations is None:
operations = []
self.values = values
self.operations = operations
@property
def set(self):
return self.values
@property
def children(self):
return self.values
def set_child(self, index, value):
self._child_set_check(index)
self.values[index] = value
def __repr__(self):
return f"<Set values={self.values} operations={self.operations}>"
class Dice(Set):
"""A set of Die."""
__slots__ = ("num", "size", "_context")
def __init__(self, num, size, values, operations=None, context=None, **kwargs):
"""
:type num: int
:type size: int|str
:type values: list of Die
:type operations: list[SetOperator]
:type context: dice.RollContext
"""
super().__init__(values, operations, **kwargs)
self.num = num
self.size = size
self._context = context
@classmethod
def new(cls, num, size, context=None):
return cls(num, size, [Die.new(size, context=context) for _ in range(num)], context=context)
def roll_another(self):
self.values.append(Die.new(self.size, context=self._context))
@property
def children(self):
return []
def __repr__(self):
return f"<Dice num={self.num} size={self.size} values={self.values} operations={self.operations}>"
class Die(Number): # part of diceexpr
"""Represents a single die."""
__slots__ = ("size", "values", "_context")
def __init__(self, size, values, context=None):
"""
:type size: int
:type values: list of Literal
:type context: dice.RollContext
"""
super().__init__()
self.size = size
self.values = values
self._context = context
@classmethod
def new(cls, size, context=None):
inst = cls(size, [], context=context)
inst._add_roll()
return inst
@property
def number(self):
return self.values[-1].total
@property
def set(self):
return [self.values[-1]]
@property
def children(self):
return []
def _add_roll(self):
if self.size != '%' and self.size < 1:
raise errors.RollValueError("Cannot roll a 0-sided die.")
if self._context:
self._context.count_roll()
if self.size == '%':
n = Literal(random.randrange(0, 100, 10))
else:
n = Literal(random.randrange(self.size) + 1) # 200ns faster than randint(1, self._size)
self.values.append(n)
def reroll(self):
if self.values:
self.values[-1].drop()
self._add_roll()
def explode(self):
if self.values:
self.values[-1].explode()
# another Die is added by the explode operator
def force_value(self, new_value):
if self.values:
self.values[-1].update(new_value)
def __repr__(self):
return f"<Die size={self.size} values={self.values}>"
# noinspection PyUnresolvedReferences
# selecting on Dice will always return Die
class SetOperator: # set_op, dice_op
"""Represents an operation on a set."""
__slots__ = ("op", "sels")
def __init__(self, op, sels):
"""
:type op: str
:type sels: list[SetSelector]
"""
self.op = op
self.sels = sels
@classmethod
def from_ast(cls, node):
return cls(node.op, [SetSelector.from_ast(n) for n in node.sels])
def select(self, target, max_targets=None):
"""
Selects the operands in a target set.
:param target: The source of the operands.
:type target: Number
:param max_targets: The maximum number of targets to select.
:type max_targets: Optional[int]
"""
out = set()
for selector in self.sels:
batch_max = None
if max_targets is not None:
batch_max = max_targets - len(out)
if batch_max == 0:
break
out.update(selector.select(target, max_targets=batch_max))
return out
def operate(self, target):
"""
Operates in place on the values in a target set.
:param target: The source of the operands.
:type target: Number
"""
operations = {
"k": self.keep,
"p": self.drop,
# dice only
"rr": self.reroll,
"ro": self.reroll_once,
"ra": self.explode_once,
"e": self.explode,
"mi": self.minimum,
"ma": self.maximum
}
operations[self.op](target)
def keep(self, target):
"""
:type target: Set
"""
for value in target.keptset:
if value not in self.select(target):
value.drop()
def drop(self, target):
"""
:type target: Set
"""
for value in self.select(target):
value.drop()
def reroll(self, target):
"""
:type target: Dice
"""
to_reroll = self.select(target)
while to_reroll:
for die in to_reroll:
die.reroll()
to_reroll = self.select(target)
def reroll_once(self, target):
"""
:type target: Dice
"""
for die in self.select(target):
die.reroll()
def explode(self, target):
"""
:type target: Dice
"""
to_explode = self.select(target)
already_exploded = set()
while to_explode:
for die in to_explode:
die.explode()
target.roll_another()
already_exploded.update(to_explode)
to_explode = self.select(target).difference(already_exploded)
def explode_once(self, target):
"""
:type target: Dice
"""
for die in self.select(target, max_targets=1):
die.explode()
target.roll_another()
def minimum(self, target): # immediate
"""
:type target: Dice
"""
selector = self.sels[-1]
if selector.cat is not | |
<filename>abs_templates_ec/analog_mos/planar.py<gh_stars>0
# -*- coding: utf-8 -*-
from typing import TYPE_CHECKING, Dict, Any, List, Optional, Union, Tuple
import math
from collections import namedtuple
from bag.math import lcm
from bag.util.search import BinaryIterator
from bag.layout.util import BBox
from bag.layout.routing import WireArray, TrackID
from bag.layout.template import TemplateBase
from bag.layout.routing.fill import fill_symmetric_min_density_info, fill_symmetric_interval
from .core import MOSTech
if TYPE_CHECKING:
from bag.layout.tech import TechInfoConfig
RowInfo = namedtuple('RowInfo', [
'od_x',
'od_y',
'od_type',
'po_y',
])
AdjRowInfo = namedtuple('AdjRowInfo', [
'od_y',
'od_type',
'po_y',
])
EdgeInfo = namedtuple('EdgeInfo', [
'od_type',
])
class ExtInfo(
namedtuple('ExtInfoBase', [
'margins', 'od_h', 'imp_min_h', 'm1_sub_h', 'mtype', 'thres', 'po_types', 'edgel_info',
'edger_info'
])):
__slots__ = ()
def reverse(self):
return self._replace(
po_types=tuple(reversed(self.po_types)),
edgel_info=self.edger_info,
edger_info=self.edgel_info)
class MOSTechPlanarGeneric(MOSTech):
"""A generic implementation of MOSTech for planar technologies.
Parameters
----------
config : Dict[str, Any]
the technology configuration dictionary.
tech_info : TechInfo
the TechInfo object.
mos_entry_name : str
name of the entry that contains technology parameters for transistors in
the given configuration dictionary.
"""
def __init__(self, config, tech_info, mos_entry_name='mos'):
# type: (Dict[str, Any], TechInfoConfig, str) -> None
MOSTech.__init__(self, config, tech_info, mos_entry_name=mos_entry_name)
def get_mos_yloc_info(self, lch_unit, w, **kwargs):
# type: (int, float, **kwargs) -> Dict[str, Any]
# get transistor constants
mos_constants = self.get_mos_tech_constants(lch_unit)
od_spy = mos_constants['od_spy']
po_spy = mos_constants['po_spy']
mx_gd_spy = mos_constants['mx_gd_spy']
od_gd_spy = mos_constants['od_gd_spy']
po_od_exty = mos_constants['po_od_exty']
g_via_info = mos_constants['g_via']
d_via_info = mos_constants['d_via']
g_drc_info = self.get_conn_drc_info(lch_unit, 'g')
g_m1_w = g_drc_info[1]['w']
drc_info = self.get_conn_drc_info(lch_unit, 'd')
mx_spy = max((info['sp_le'] for info in drc_info.values()))
# convert w to resolution units
layout_unit = self.config['layout_unit']
res = self.res
w_unit = int(round(w / layout_unit / res))
# get minimum metal lengths
md_min_len = self.get_md_min_len(lch_unit)
# compute gate location, based on PO-PO spacing
po_yb = po_spy // 2
g_co_yb = po_yb + g_via_info['bot_enc_le'][0]
g_co_yt = g_co_yb + g_via_info['dim'][0][1]
g_co_yc = (g_co_yb + g_co_yt) // 2
g_m1_yb = g_co_yc - g_m1_w // 2
g_m1_yt = g_m1_yb + g_m1_w
g_mx_yt = g_co_yc + g_via_info['dim'][1][1] + g_via_info['top_enc_le'][1]
g_mx_yb = g_mx_yt - md_min_len
# compute drain/source location
# first, get OD location from od_gd_spy
od_yb = g_m1_yt + od_gd_spy
od_yt = od_yb + w_unit
od_yc = (od_yb + od_yt) // 2
# get number of vias
d_v0_h = d_via_info['dim'][0][1]
d_v0_sp = d_via_info['sp'][0]
d_v0_od_ency = d_via_info['bot_enc_le'][0]
d_v0_m1_ency = d_via_info['top_enc_le'][0]
d_v0_n = (w_unit - 2 * d_v0_od_ency + d_v0_sp) // (d_v0_h + d_v0_sp)
d_v0_arrh = d_v0_n * (d_v0_h + d_v0_sp) - d_v0_sp
# get metal length and bottom metal coordinate
mx_h = max(md_min_len, d_v0_arrh + 2 * d_v0_m1_ency)
d_mx_yb = od_yc - mx_h // 2
# check sp_gd_m1 spec, move everything up if necessary
delta = mx_gd_spy - (d_mx_yb - g_mx_yt)
if delta > 0:
d_mx_yb += delta
od_yt += delta
od_yb += delta
od_yc += delta
# compute final locations
d_mx_yt = d_mx_yb + mx_h
# find PO and block top Y coordinate
po_yt = od_yt + po_od_exty
blk_yt = po_yt + po_spy // 2
arr_y = 0, blk_yt
# compute extension information
g_y_list = [(g_m1_yb, g_m1_yt), (g_mx_yb, g_mx_yt)]
d_y_list = [(od_yb, od_yt), (d_mx_yb, d_mx_yt)]
return dict(
blk=arr_y,
po=(po_yb, po_yt),
od=(od_yb, od_yt),
top_margins=dict(
od=(blk_yt - od_yt, od_spy),
po=(blk_yt - po_yt, po_spy),
m1=(blk_yt - d_mx_yt, mx_spy),
mx=(blk_yt - d_mx_yt, mx_spy),
),
bot_margins=dict(
od=(od_yb, od_spy),
po=(po_yb, po_spy),
m1=(g_m1_yb, mx_spy),
mx=(g_mx_yb, mx_spy),
),
fill_info={},
g_y_list=g_y_list,
d_y_list=d_y_list,
)
def get_sub_yloc_info(self, lch_unit, w, **kwargs):
# type: (int, float, **kwargs) -> Dict[str, Any]
dnw_mode = kwargs.get('dnw_mode', '')
blk_pitch = kwargs.get('blk_pitch', 1)
mos_pitch = self.get_mos_pitch(unit_mode=True)
md_min_len = self.get_md_min_len(lch_unit)
mos_constants = self.get_mos_tech_constants(lch_unit)
od_spy = mos_constants['od_spy']
imp_od_ency = mos_constants['imp_od_ency']
po_spy = mos_constants['po_spy']
d_via_info = mos_constants['d_via']
nw_dnw_ovl = mos_constants['nw_dnw_ovl']
nw_dnw_ext = mos_constants['nw_dnw_ext']
sub_m1_enc_le = mos_constants['sub_m1_enc_le']
layout_unit = self.config['layout_unit']
res = self.res
od_h = int(round(w / layout_unit / (2 * res))) * 2
# step 0: figure out implant/OD enclosure
if dnw_mode:
imp_od_ency = max(imp_od_ency, (nw_dnw_ovl + nw_dnw_ext - od_h) // 2)
# step 1: find OD coordinate
od_yb = imp_od_ency
od_yt = od_yb + od_h
blk_yt = od_yt + imp_od_ency
# fix substrate height quantization, then recenter OD location
blk_pitch = lcm([blk_pitch, mos_pitch])
blk_yt = -(-blk_yt // blk_pitch) * blk_pitch
od_yb = (blk_yt - od_h) // 2
od_yt = od_yb + od_h
od_yc = (od_yb + od_yt) // 2
# step 2: find metal height
drc_info = self.get_conn_drc_info(lch_unit, 'd')
mx_spy = max((info['sp_le'] for info in drc_info.values()))
d_v0_h = d_via_info['dim'][0][1]
d_v0_sp = d_via_info['sp'][0]
d_v0_od_ency = d_via_info['bot_enc_le'][0]
d_v0_n = (od_h - 2 * d_v0_od_ency + d_v0_sp) // (d_v0_h + d_v0_sp)
d_v0_arrh = d_v0_n * (d_v0_h + d_v0_sp) - d_v0_sp
mx_h = max(md_min_len, d_v0_arrh + 2 * sub_m1_enc_le)
d_mx_yb = od_yc - mx_h // 2
d_mx_yt = d_mx_yb + mx_h
mx_y = (d_mx_yb, d_mx_yt)
return dict(
blk=(0, blk_yt),
po=(od_yb, od_yb),
od=(od_yb, od_yt),
top_margins=dict(
od=(blk_yt - od_yt, od_spy),
po=(blk_yt, po_spy),
mx=(blk_yt - d_mx_yt, mx_spy),
m1=(blk_yt - d_mx_yt, mx_spy),
),
bot_margins=dict(
od=(od_yb, od_spy),
po=(blk_yt, po_spy),
mx=(d_mx_yb, mx_spy),
m1=(d_mx_yb, mx_spy),
),
fill_info={},
g_conn_y=mx_y,
d_conn_y=mx_y,
)
def get_edge_info(self, lch_unit, guard_ring_nf, is_end, **kwargs):
# type: (int, int, bool, **kwargs) -> Dict[str, Any]
dnw_margins = self.config['dnw_margins']
mos_constants = self.get_mos_tech_constants(lch_unit)
imp_od_encx = mos_constants['imp_od_encx']
nw_dnw_ovl = mos_constants['nw_dnw_ovl']
nw_dnw_ext = mos_constants['nw_dnw_ext']
sd_pitch = mos_constants['sd_pitch']
edge_margin = mos_constants['edge_margin']
fg_gr_min = mos_constants['fg_gr_min']
fg_outer_min = mos_constants['fg_outer_min']
po_od_extx = mos_constants['po_od_extx']
is_sub_ring = kwargs.get('is_sub_ring', False)
dnw_mode = kwargs.get('dnw_mode', '')
if 0 < guard_ring_nf < fg_gr_min:
raise ValueError('guard_ring_nf = %d < %d' % (guard_ring_nf, fg_gr_min))
if is_sub_ring and guard_ring_nf <= 0:
raise ValueError('guard_ring_nf = %d must be positive '
'in substrate ring' % guard_ring_nf)
# step 0: figure out implant/OD enclosure and outer edge margin
outer_margin = edge_margin
if dnw_mode:
od_w = (fg_gr_min - 1) * sd_pitch + lch_unit + 2 * po_od_extx
imp_od_encx = max(imp_od_encx, (nw_dnw_ovl + nw_dnw_ext - od_w) // 2)
outer_margin = dnw_margins[dnw_mode] - nw_dnw_ext
# calculate implant left X coordinate distance from right edge
od_delta = po_od_extx - (sd_pitch - lch_unit) // 2
imp_delta = od_delta + imp_od_encx
# compute number of finger needed to have correct implant enclosure
fg_od_margin = -(-imp_delta // sd_pitch)
fg_outer = max(fg_od_margin, fg_outer_min)
if guard_ring_nf == 0:
fg_gr_sub = 0
fg_gr_sep = 0
else:
if is_sub_ring:
fg_gr_sep = -(-edge_margin // sd_pitch)
else:
fg_gr_sep = fg_outer
fg_outer = 0
fg_gr_sub = guard_ring_nf + 2 * fg_od_margin
return dict(
edge_num_fg=fg_outer + fg_gr_sub + fg_gr_sep,
edge_margin=outer_margin if is_end else 0,
fg_outer=fg_outer,
fg_gr_sub=fg_gr_sub,
fg_gr_sep=fg_gr_sep,
fg_od_margin=fg_od_margin,
)
def get_md_min_len(self, lch_unit):
# type: () -> int
"""Returns minimum drain wire length."""
drc_info = self.get_conn_drc_info(lch_unit, 'd')
return max((info['min_len'] for info in drc_info.values()))
def get_mos_info(self, lch_unit, w, mos_type, threshold, fg, **kwargs):
# type: (int, int, str, str, int, **kwargs) -> Dict[str, Any]
"""Get transistor layout information
Layout placement strategy:
1. find gate Y coordinates from PO spacing and CO enclosure rule.
2. find drain and OD coordinates by using gate-drain metal and gate-drain OD
spacing constraints.
3. get top PO Y coordinates and wrap up.
"""
ds_dummy = kwargs.get('ds_dummy', False)
mos_constants = self.get_mos_tech_constants(lch_unit)
sd_pitch = mos_constants['sd_pitch']
yloc_info = self.get_mos_yloc_info(lch_unit, w, **kwargs)
blk_yb, blk_yt = blk_y = yloc_info['blk']
od_yb, od_yt = od_y = yloc_info['od']
po_y = yloc_info['po']
g_y_list = yloc_info['g_y_list']
d_y_list = yloc_info['d_y_list']
od_yc = (od_yb + od_yt) // 2
mtype = mos_type, mos_type
lay_info_list = []
for imp_name in self.get_mos_layers(mos_type, threshold):
lay_info_list.append((imp_name, 0, blk_yb, blk_yt))
od_type = 'mos_fake' if ds_dummy else 'mos'
lr_edge_info = EdgeInfo(od_type=od_type)
od_h = od_yt - od_yb
po_types = ('PO', ) * fg
ext_top_info = ExtInfo(
margins=yloc_info['top_margins'],
od_h=od_h,
imp_min_h=0,
m1_sub_h=0,
mtype=mtype,
thres=threshold,
po_types=po_types,
edgel_info=lr_edge_info,
edger_info=lr_edge_info,
)
ext_bot_info = ExtInfo(
margins=yloc_info['bot_margins'],
od_h=od_h,
imp_min_h=0,
m1_sub_h=0,
mtype=mtype,
thres=threshold,
po_types=po_types,
edgel_info=lr_edge_info,
edger_info=lr_edge_info,
)
sub_type = 'ptap' if mos_type == 'nch' else 'ntap'
layout_info = dict(
blk_type='mos',
lch_unit=lch_unit,
sd_pitch=sd_pitch,
fg=fg,
arr_y=blk_y,
draw_od=not ds_dummy,
row_info_list=[
RowInfo(
od_x=(0, fg),
od_y=od_y,
od_type=('mos', sub_type),
po_y=po_y,
)
],
lay_info_list=lay_info_list,
# edge parameters
sub_type=sub_type,
imp_params=[(mos_type, threshold, 0, blk_yt, 0, blk_yt)],
is_sub_ring=False,
dnw_mode='',
# MosConnection parameters
g_y_list=g_y_list,
d_y_list=d_y_list,
)
lr_edge_info_row = (lr_edge_info, [])
return dict(
layout_info=layout_info,
ext_top_info=ext_top_info,
ext_bot_info=ext_bot_info,
left_edge_info=lr_edge_info_row,
right_edge_info=lr_edge_info_row,
sd_yc=od_yc,
po_y=po_y,
od_y=od_y,
g_conn_y=g_y_list[-1],
d_conn_y=d_y_list[-1],
)
def get_valid_extension_widths(self, lch_unit, top_ext_info, bot_ext_info, **kwargs):
# type: (int, ExtInfo, ExtInfo) -> List[int]
"""Compute a list of valid extension widths.
The DRC rules that we consider are:
1. wire line-end space
# implant/threshold layers minimum width.
#. max OD space
#. implant/threshold layers to draw
Of these | |
this
operator's fit method.
"""
return self.get_schema('input_fit')
def input_schema_predict(self):
"""Returns the schema for predict method's input.
Returns
-------
dict
Logical schema describing input required by this
operator's predict method.
"""
return self.get_schema('input_predict')
def input_schema_predict_proba(self):
"""Returns the schema for predict proba method's input.
Returns
-------
dict
Logical schema describing input required by this
operator's predict proba method.
"""
sch = self.get_schema_maybe('input_predict_proba')
if sch is None:
return self.input_schema_predict()
else:
return sch
def input_schema_transform(self):
"""Returns the schema for transform method's input.
Returns
-------
dict
Logical schema describing input required by this
operator's transform method.
"""
return self.input_schema_predict()
def output_schema(self):
"""Returns the schema for predict/transform method's output.
Returns
-------
dict
Logical schema describing output of this
operator's predict/transform method.
"""
return self.get_schema('output')
def output_schema_predict_proba(self):
"""Returns the schema for predict proba method's output.
Returns
-------
dict
Logical schema describing output of this
operator's predict proba method.
"""
sch = self.get_schema_maybe('output_predict_proba')
if sch is None:
return self.output_schema()
else:
return sch
def hyperparam_schema(self, name:Optional[str]=None):
"""Returns the hyperparameter schema for the operator.
Parameters
----------
name : string, optional
Name of the hyperparameter.
Returns
-------
dict
Full hyperparameter schema for this operator or part of the schema
corresponding to the hyperparameter given by parameter `name`.
"""
hp_schema = self.get_schema('hyperparams')
if name is None:
return hp_schema
else:
params = next(iter(hp_schema.get('allOf',[])))
return params.get('properties', {}).get(name)
def hyperparam_defaults(self):
"""Returns the default values of hyperparameters for the operator.
Returns
-------
dict
A dictionary with names of the hyperparamers as keys and
their default values as values.
"""
if not hasattr(self, '_hyperparam_defaults'):
schema = self.hyperparam_schema()
props = next(iter(schema.get('allOf',[])), {}).get('properties', {})
defaults = { k: props[k].get('default') for k in props.keys() }
self._hyperparam_defaults = defaults
return self._hyperparam_defaults
def get_param_ranges(self)->Tuple[Dict[str,Any], Dict[str,Any]]:
"""Returns two dictionaries, ranges and cat_idx, for hyperparameters.
The ranges dictionary has two kinds of entries. Entries for
numeric and Boolean hyperparameters are tuples of the form
(min, max, default). Entries for categorical hyperparameters
are lists of their values.
The cat_idx dictionary has (min, max, default) entries of indices
into the corresponding list of values.
"""
hyperparam_obj = next(iter(self.hyperparam_schema().get('allOf',[])))
original = hyperparam_obj.get('properties')
def is_relevant(hp, s):
if 'relevantToOptimizer' in hyperparam_obj:
return hp in hyperparam_obj['relevantToOptimizer']
return True
relevant = {hp: s for hp, s in original.items() if is_relevant(hp, s)}
def pick_one_type(schema):
if 'anyOf' in schema:
def by_type(typ):
for s in schema['anyOf']:
if 'type' in s and s['type'] == typ:
if ('forOptimizer' not in s) or s['forOptimizer']:
return s
return None
for typ in ['number', 'integer', 'string']:
s = by_type(typ)
if s:
return s
return schema['anyOf'][0]
return schema
unityped = {hp: pick_one_type(relevant[hp]) for hp in relevant}
def add_default(schema):
if 'type' in schema:
minimum, maximum = 0.0, 1.0
if 'minimumForOptimizer' in schema:
minimum = schema['minimumForOptimizer']
elif 'minimum' in schema:
minimum = schema['minimum']
if 'maximumForOptimizer' in schema:
maximum = schema['maximumForOptimizer']
elif 'maximum' in schema:
maximum = schema['maximum']
result = {**schema}
if schema['type'] in ['number', 'integer']:
if 'default' not in schema:
schema['default'] = None
if 'minimumForOptimizer' not in schema:
result['minimumForOptimizer'] = minimum
if 'maximumForOptimizer' not in schema:
result['maximumForOptimizer'] = maximum
return result
elif 'enum' in schema:
if 'default' in schema:
return schema
return {'default': schema['enum'][0], **schema}
return schema
defaulted = {hp: add_default(unityped[hp]) for hp in unityped}
def get_range(hp, schema):
if 'enum' in schema:
default = schema['default']
non_default = [v for v in schema['enum'] if v != default]
return [*non_default, default]
elif schema['type'] == 'boolean':
return (False, True, schema['default'])
else:
def get(schema, key):
return schema[key] if key in schema else None
keys = ['minimumForOptimizer', 'maximumForOptimizer', 'default']
return tuple([get(schema, key) for key in keys])
def get_cat_idx(schema):
if 'enum' not in schema:
return None
return (0, len(schema['enum'])-1, len(schema['enum'])-1)
autoai_ranges = {hp: get_range(hp, s) for hp, s in defaulted.items()}
autoai_cat_idx = {hp: get_cat_idx(s)
for hp, s in defaulted.items() if 'enum' in s}
return autoai_ranges, autoai_cat_idx
def _enum_to_strings(self, arg:enum.Enum)->Tuple[str, Any]:
"""[summary]
Parameters
----------
arg : [type]
[description]
Raises
------
ValueError
[description]
Returns
-------
[type]
[description]
"""
if not isinstance(arg, enum.Enum):
raise ValueError('Missing keyword on argument {}.'.format(arg))
return arg.__class__.__name__, arg.value
def name(self)->str:
"""[summary]
Returns
-------
[type]
[description]
"""
return self._name
def class_name(self)->str:
module = self._impl.__module__
if module is None or module == str.__class__.__module__:
class_name = self.name()
else:
class_name = module + '.' + self._impl.__class__.__name__
return class_name
def auto_arrange(self, planner):
return PlannedIndividualOp(self._name, self._impl, self._schemas)
def arrange(self, *args, **kwargs):
return PlannedIndividualOp(self._name, self._impl, self._schemas)
def to_json(self):
json = { 'class': self.class_name(),
'state': 'planned',
'operator': self.name()}
if 'documentation_url' in self._schemas:
json = {**json,
'documentation_url': self._schemas['documentation_url']}
return json
def __str__(self)->str:
return self.name()
def has_same_impl(self, other:Operator)->bool:
"""Checks if the type of the operator imnplementations are compatible
"""
if not isinstance(other, IndividualOp):
return False
return type(self._impl) == type(other._impl)
def customize_schema(self, **kwargs: Schema) -> 'IndividualOp':
"""Return a new operator with a customized schema
Parameters
----------
schema : Schema
A dictionary of json schemas for the operator. Override the entire schema and ignore other arguments
input : Schema
(or `input_*`) override the input schema for method `*`.
`input_*` must be an existing method (already defined in the schema for lale operators, exising method for external operators)
output : Schema
(or `output_*`) override the output schema for method `*`.
`output_*` must be an existing method (already defined in the schema for lale operators, exising method for external operators)
constraint : Schema
Add a constraint in JSON schema format.
relevantToOptimizer : String list
update the set parameters that will be optimized.
param : Schema
Override the schema of the hyperparameter.
`param` must be an existing parameter (already defined in the schema for lale operators, __init__ parameter for external operators)
Returns
-------
IndividualOp
Copy of the operator with a customized schema
"""
op = copy.deepcopy(self)
for arg in kwargs:
value = kwargs[arg]
if arg == 'schemas':
value.schema['$schema'] = 'http://json-schema.org/draft-04/schema#'
helpers.validate_is_schema(value.schema)
op._schemas = value.schema
break
elif arg.startswith('input') or arg.startswith('output'):
# multiple input types (e.g., fit, predict)
value.schema['$schema'] = 'http://json-schema.org/draft-04/schema#'
helpers.validate_method(op, arg)
helpers.validate_is_schema(value.schema)
op._schemas['properties'][arg] = value.schema
elif arg == 'constraint':
op._schemas['properties']['hyperparams']['allOf'].append(value.schema)
elif arg == 'relevantToOptimizer':
assert isinstance(value, list)
op._schemas['properties']['hyperparams']['allOf'][0]['relevantToOptimizer'] = value
elif arg in helpers.get_hyperparam_names(op):
op._schemas['properties']['hyperparams']['allOf'][0]['properties'][arg] = value.schema
else:
assert False, "Unkown method or parameter."
return op
def validate(self, X, y=None):
if not lale.helpers.is_schema(X):
X = lale.datasets.data_schemas.to_schema(X)
obj_X = {
'type': 'object',
'additionalProperties': False,
'required': ['X'],
'properties': {'X': X}}
if y is not None:
if not lale.helpers.is_schema(y):
y = lale.datasets.data_schemas.to_schema(y)
obj_Xy = {
'type': 'object',
'additionalProperties': False,
'required': ['X', 'y'],
'properties': {'X': X, 'y': y}}
fit_actual = obj_X if y is None else obj_Xy
fit_formal = self.input_schema_fit()
lale.helpers.validate_subschema(fit_actual, fit_formal,
'to_schema(data)', f'{self.name()}.input_schema_fit()')
predict_actual = obj_X
predict_formal = self.input_schema_predict()
lale.helpers.validate_subschema(predict_actual, predict_formal,
'to_schema(data)', f'{self.name()}.input_schema_predict()')
def transform_schema(self, s_X):
return self.output_schema()
class PlannedIndividualOp(IndividualOp, PlannedOperator):
"""
This is a concrete class that returns a trainable individual
operator through its __call__ method. A configure method can use
an optimizer and return the best hyperparameter combination.
"""
_hyperparams:Optional[Dict[str,Any]]
def __init__(self, _name:str, _impl, _schemas) -> None:
super(PlannedIndividualOp, self).__init__(_name, _impl, _schemas)
self._hyperparams = None
def _configure(self, *args, **kwargs)->'TrainableIndividualOp':
module_name:str = self._impl.__module__
class_name:str = self._impl.__class__.__name__
module = importlib.import_module(module_name)
class_ = getattr(module, class_name)
hyperparams = { }
for arg in args:
k, v = self._enum_to_strings(arg)
hyperparams[k] = v
for k, v in fixup_hyperparams_dict(kwargs).items():
if k in hyperparams:
raise ValueError('Duplicate argument {}.'.format(k))
v = helpers.val_wrapper.unwrap(v)
if isinstance(v, enum.Enum):
k2, v2 = self._enum_to_strings(v)
if k != k2:
raise ValueError(
'Invalid keyword {} for argument {}.'.format(k2, v2))
else:
v2 = v
hyperparams[k] = v2
#using params_all instead of hyperparams to ensure the construction is consistent with schema
trainable_to_get_params = TrainableIndividualOp(_name=self.name(), _impl=None, _schemas=self._schemas)
trainable_to_get_params._hyperparams = hyperparams
params_all = trainable_to_get_params.get_params_all()
try:
helpers.validate_schema(params_all, self.hyperparam_schema())
except jsonschema.ValidationError as e:
lale.helpers.validate_is_schema(e.schema)
schema = lale.pretty_print.to_string(e.schema)
if [*e.schema_path][:3] == ['allOf', 0, 'properties']:
arg = e.schema_path[3]
reason = f'invalid value {arg}={e.instance}'
schema_path = f'argument {arg}'
elif [*e.schema_path][:3] == ['allOf', 0, 'additionalProperties']:
pref, suff = 'Additional properties are not allowed (', ')'
assert e.message.startswith(pref) and e.message.endswith(suff)
reason = 'argument ' + e.message[len(pref):-len(suff)]
schema_path = 'arguments and their | |
<reponame>JainSamyak8840/deepchem<gh_stars>0
class Loss:
"""A loss function for use in training models."""
def _compute_tf_loss(self, output, labels):
"""Compute the loss function for TensorFlow tensors.
The inputs are tensors containing the model's outputs and the labels for a
batch. The return value should be a tensor of shape (batch_size) or
(batch_size, tasks) containing the value of the loss function on each
sample or sample/task.
Parameters
----------
output: tensor
the output of the model
labels: tensor
the expected output
Returns
-------
The value of the loss function on each sample or sample/task pair
"""
raise NotImplementedError("Subclasses must implement this")
def _create_pytorch_loss(self):
"""Create a PyTorch loss function."""
raise NotImplementedError("Subclasses must implement this")
class L1Loss(Loss):
"""The absolute difference between the true and predicted values."""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.abs(output - labels)
def _create_pytorch_loss(self):
import torch
return torch.nn.L1Loss(reduction='none')
class HuberLoss(Loss):
"""Modified version of L1 Loss, also known as Smooth L1 loss.
Less sensitive to small errors, linear for larger errors.
Huber loss is generally better for cases where are are both large outliers as well as small, as compared to the L1 loss.
By default, Delta = 1.0 and reduction = 'none'.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
return tf.keras.losses.Huber(reduction='none')(output, labels)
def _create_pytorch_loss(self):
import torch
return torch.nn.SmoothL1Loss(reduction='none')
class L2Loss(Loss):
"""The squared difference between the true and predicted values."""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.square(output - labels)
def _create_pytorch_loss(self):
import torch
return torch.nn.MSELoss(reduction='none')
class HingeLoss(Loss):
"""The hinge loss function.
The 'output' argument should contain logits, and all elements of 'labels'
should equal 0 or 1.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
return tf.keras.losses.hinge(labels, output)
def _create_pytorch_loss(self):
import torch
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return torch.mean(torch.clamp(1 - labels * output, min=0), dim=-1)
return loss
class PoissonLoss(Loss):
"""The Poisson loss function is defined as the mean of the elements of y_pred - (y_true * log(y_pred) for an input of (y_true, y_pred).
Poisson loss is generally used for regression tasks where the data follows the poisson
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
loss = tf.keras.losses.Poisson(reduction='auto')
return loss(labels, output)
def _create_pytorch_loss(self):
import torch
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return torch.mean(output - labels * torch.log(output))
return loss
class BinaryCrossEntropy(Loss):
"""The cross entropy between pairs of probabilities.
The arguments should each have shape (batch_size) or (batch_size, tasks) and
contain probabilities.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.keras.losses.binary_crossentropy(labels, output)
def _create_pytorch_loss(self):
import torch
bce = torch.nn.BCELoss(reduction='none')
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return torch.mean(bce(output, labels), dim=-1)
return loss
class CategoricalCrossEntropy(Loss):
"""The cross entropy between two probability distributions.
The arguments should each have shape (batch_size, classes) or
(batch_size, tasks, classes), and represent a probability distribution over
classes.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.keras.losses.categorical_crossentropy(labels, output)
def _create_pytorch_loss(self):
import torch
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return -torch.sum(labels * torch.log(output), dim=-1)
return loss
class SigmoidCrossEntropy(Loss):
"""The cross entropy between pairs of probabilities.
The arguments should each have shape (batch_size) or (batch_size, tasks). The
labels should be probabilities, while the outputs should be logits that are
converted to probabilities using a sigmoid function.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.nn.sigmoid_cross_entropy_with_logits(labels, output)
def _create_pytorch_loss(self):
import torch
bce = torch.nn.BCEWithLogitsLoss(reduction='none')
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return bce(output, labels)
return loss
class SoftmaxCrossEntropy(Loss):
"""The cross entropy between two probability distributions.
The arguments should each have shape (batch_size, classes) or
(batch_size, tasks, classes). The labels should be probabilities, while the
outputs should be logits that are converted to probabilities using a softmax
function.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
output, labels = _make_tf_shapes_consistent(output, labels)
output, labels = _ensure_float(output, labels)
return tf.nn.softmax_cross_entropy_with_logits(labels, output)
def _create_pytorch_loss(self):
import torch
ls = torch.nn.LogSoftmax(dim=1)
def loss(output, labels):
output, labels = _make_pytorch_shapes_consistent(output, labels)
return -torch.sum(labels * ls(output), dim=-1)
return loss
class SparseSoftmaxCrossEntropy(Loss):
"""The cross entropy between two probability distributions.
The labels should have shape (batch_size) or (batch_size, tasks), and be
integer class labels. The outputs have shape (batch_size, classes) or
(batch_size, tasks, classes) and be logits that are converted to probabilities
using a softmax function.
"""
def _compute_tf_loss(self, output, labels):
import tensorflow as tf
if len(labels.shape) == len(output.shape):
labels = tf.squeeze(labels, axis=-1)
labels = tf.cast(labels, tf.int32)
return tf.nn.sparse_softmax_cross_entropy_with_logits(labels, output)
def _create_pytorch_loss(self):
import torch
ce_loss = torch.nn.CrossEntropyLoss(reduction='none')
def loss(output, labels):
# Convert (batch_size, tasks, classes) to (batch_size, classes, tasks)
# CrossEntropyLoss only supports (batch_size, classes, tasks)
# This is for API consistency
if len(output.shape) == 3:
output = output.permute(0, 2, 1)
if len(labels.shape) == len(output.shape):
labels = labels.squeeze(-1)
return ce_loss(output, labels.long())
return loss
class VAE_ELBO(Loss):
"""The Variational AutoEncoder loss, KL Divergence Regularize + marginal log-likelihood.
This losses based on _[1].
ELBO(Evidence lower bound) lexically replaced Variational lower bound.
BCE means marginal log-likelihood, and KLD means KL divergence with normal distribution.
Added hyper parameter 'kl_scale' for KLD.
The logvar and mu should have shape (batch_size, hidden_space).
The x and reconstruction_x should have (batch_size, attribute).
The kl_scale should be float.
Examples
--------
Examples for calculating loss using constant tensor.
batch_size = 2,
hidden_space = 2,
num of original attribute = 3
>>> import numpy as np
>>> import torch
>>> import tensorflow as tf
>>> logvar = np.array([[1.0,1.3],[0.6,1.2]])
>>> mu = np.array([[0.2,0.7],[1.2,0.4]])
>>> x = np.array([[0.9,0.4,0.8],[0.3,0,1]])
>>> reconstruction_x = np.array([[0.8,0.3,0.7],[0.2,0,0.9]])
Case tensorflow
>>> VAE_ELBO()._compute_tf_loss(tf.constant(logvar), tf.constant(mu), tf.constant(x), tf.constant(reconstruction_x))
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.70165154, 0.76238271])>
Case pytorch
>>> (VAE_ELBO()._create_pytorch_loss())(torch.tensor(logvar), torch.tensor(mu), torch.tensor(x), torch.tensor(reconstruction_x))
tensor([0.7017, 0.7624], dtype=torch.float64)
References
----------
.. [1] Kingma, <NAME>., and <NAME>. "Auto-encoding variational bayes." arXiv preprint arXiv:1312.6114 (2013).
"""
def _compute_tf_loss(self, logvar, mu, x, reconstruction_x, kl_scale=1):
import tensorflow as tf
x, reconstruction_x = _make_tf_shapes_consistent(x, reconstruction_x)
x, reconstruction_x = _ensure_float(x, reconstruction_x)
BCE = tf.keras.losses.binary_crossentropy(x, reconstruction_x)
KLD = VAE_KLDivergence()._compute_tf_loss(logvar, mu)
return BCE + kl_scale * KLD
def _create_pytorch_loss(self):
import torch
bce = torch.nn.BCELoss(reduction='none')
def loss(logvar, mu, x, reconstruction_x, kl_scale=1):
x, reconstruction_x = _make_pytorch_shapes_consistent(x, reconstruction_x)
BCE = torch.mean(bce(reconstruction_x, x), dim=-1)
KLD = (VAE_KLDivergence()._create_pytorch_loss())(logvar, mu)
return BCE + kl_scale * KLD
return loss
class VAE_KLDivergence(Loss):
"""The KL_divergence between hidden distribution and normal distribution.
This loss represents KL divergence losses between normal distribution(using parameter of distribution)
based on _[1].
The logvar should have shape (batch_size, hidden_space) and each term represents
standard deviation of hidden distribution. The mean shuold have
(batch_size, hidden_space) and each term represents mean of hidden distribtuon.
Examples
--------
Examples for calculating loss using constant tensor.
batch_size = 2,
hidden_space = 2,
>>> import numpy as np
>>> import torch
>>> import tensorflow as tf
>>> logvar = np.array([[1.0,1.3],[0.6,1.2]])
>>> mu = np.array([[0.2,0.7],[1.2,0.4]])
Case tensorflow
>>> VAE_KLDivergence()._compute_tf_loss(tf.constant(logvar), tf.constant(mu))
<tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.17381787, 0.51425203])>
Case pytorch
>>> (VAE_KLDivergence()._create_pytorch_loss())(torch.tensor(logvar), torch.tensor(mu))
tensor([0.1738, 0.5143], dtype=torch.float64)
References
----------
.. [1] Kingma, <NAME>., and <NAME>. "Auto-encoding variational bayes." arXiv preprint arXiv:1312.6114 (2013).
"""
def _compute_tf_loss(self, logvar, mu):
import tensorflow as tf
logvar, mu = _make_tf_shapes_consistent(logvar, mu)
logvar, mu = _ensure_float(logvar, mu)
return 0.5 * tf.reduce_mean(
tf.square(mu) + tf.square(logvar) -
tf.math.log(1e-20 + tf.square(logvar)) - 1, -1)
def _create_pytorch_loss(self):
import torch
def loss(logvar, mu):
logvar, mu = _make_pytorch_shapes_consistent(logvar, mu)
return 0.5 * torch.mean(
torch.square(mu) + torch.square(logvar) -
torch.log(1e-20 + torch.square(logvar)) - 1, -1)
return loss
class ShannonEntropy(Loss):
"""The ShannonEntropy of discrete-distribution.
This loss represents shannon entropy based on _[1].
The inputs should have shape (batch size, num of variable) and represents
probabilites distribution.
Examples
--------
Examples for calculating loss using constant tensor.
| |
"""Implements an algorithm for stacking velocity computation"""
import numpy as np
import networkx as nx
from numba import njit
@njit(nogil=True)
def get_closest_index_by_val(val, array):
"""Return indices of the array elements, closest to the values from `val`."""
return np.array([np.argmin(np.abs(v - array)) for v in val])
@njit(nogil=True)
def interpolate_indices(x0, y0, x1, y1, x):
"""Linearly interpolate an int-valued function between points (`x0`, `y0`) and (`x1`, `y1`) and calculate its
values at `x`."""
return (y1 * (x - x0) + y0 * (x1 - x)) // (x1 - x0)
@njit(nogil=True)
def create_edges(semblance, times, velocities, start_velocity_range, end_velocity_range, max_vel_step,
n_times, n_velocities):
"""Return edges of the graph for stacking velocity computation with their weights.
Parameters
----------
semblance : 2d np.ndarray
An array with calculated vertical velocity semblance values.
times : 1d np.ndarray
Recording time for each seismic trace value for which semblance was calculated. Measured in milliseconds.
velocities : 1d np.ndarray
Range of velocity values for which semblance was calculated. Measured in meters/seconds.
start_velocity_range : 1d np.ndarray with 2 elements
Valid range for stacking velocity for the first timestamp. Both velocities are measured in meters/seconds.
end_velocity_range : 1d np.ndarray with 2 elements
Valid range for stacking velocity for the last timestamp. Both velocities are measured in meters/seconds.
max_vel_step : int
Maximal allowed velocity increase for nodes with adjacent times. Measured in samples.
n_times : int
The number of evenly spaced points to split time range into to generate graph vertices.
n_velocities : int
The number of evenly spaced points to split velocity range into for each time to generate graph vertices.
Returns
-------
edges : tuple with 3 elements: start_nodes, end_nodes, weights
start_nodes : list of tuples with 2 elements
Identifiers of start nodes of the edges.
end_nodes : list of tuples with 2 elements
Identifiers of end nodes of the edges, corresponding to `start_nodes`. Matches `start_nodes` length.
weights : list of floats
Weights of corresponding edges. Matches `start_nodes` length.
start_node : tuple with 2 elements
An identifier of an auxiliary node, connected to all of the actual starting nodes to run the path search from.
end_nodes : list of tuples with 2 elements
Identifiers of possible end nodes of the path.
"""
start_nodes = []
end_nodes = []
weights = []
# Switch from time and velocity values to their indices in semblance
# to further use them as node identifiers in the graph
times_ix = np.linspace(0, len(times) - 1, n_times).astype(np.int32)
start_vel_min_ix, start_vel_max_ix = get_closest_index_by_val(start_velocity_range, velocities)
end_vel_min_ix, end_vel_max_ix = get_closest_index_by_val(end_velocity_range, velocities)
start_vels_ix = interpolate_indices(times_ix[0], start_vel_min_ix, times_ix[-1], end_vel_min_ix, times_ix)
end_vels_ix = interpolate_indices(times_ix[0], start_vel_max_ix, times_ix[-1], end_vel_max_ix, times_ix)
# Instead of running the path search for each of starting nodes iteratively, create an auxiliary node,
# connected to all of them, and run the search from it
start_node = (np.int32(-1), np.int32(0))
prev_nodes = [start_node]
for time_ix, start_vel_ix, end_vel_ix in zip(times_ix, start_vels_ix, end_vels_ix):
curr_vels_ix = np.unique(np.linspace(start_vel_ix, end_vel_ix, n_velocities).astype(np.int32))
curr_nodes = [(time_ix, vel_ix) for vel_ix in curr_vels_ix]
for prev_time_ix, prev_vel_ix in prev_nodes:
for curr_time_ix, curr_vel_ix in curr_nodes:
# Connect two nodes only if:
# 1. they are a starting node and an auxilliary one
# 2. current velocity is no less then the previous one, but also does not exceed it by more than
# max_vel_step, determined by max_acceleration provided
if not ((prev_time_ix == -1) or (prev_vel_ix <= curr_vel_ix <= prev_vel_ix + max_vel_step)):
continue
# Calculate the edge weight: sum of (1 - semblance_value) for each value along the path between nodes
times_indices = np.arange(prev_time_ix + 1, curr_time_ix + 1, dtype=np.int32)
velocity_indices = interpolate_indices(prev_time_ix, prev_vel_ix, curr_time_ix, curr_vel_ix,
times_indices)
weight = len(times_indices)
for ti, vi in zip(times_indices, velocity_indices):
weight -= semblance[ti, vi]
start_nodes.append((prev_time_ix, prev_vel_ix))
end_nodes.append((curr_time_ix, curr_vel_ix))
weights.append(weight)
prev_nodes = curr_nodes
edges = (start_nodes, end_nodes, weights)
return edges, start_node, curr_nodes
def calculate_stacking_velocity(semblance, times, velocities, start_velocity_range, end_velocity_range,
max_acceleration=None, n_times=25, n_velocities=25):
"""Calculate stacking velocity by given semblance.
Stacking velocity is the value of the seismic velocity obtained from the best fit of the traveltime curve by a
hyperbola for each timestamp. It is used to correct the arrival times of reflection events in the traces for their
varying offsets prior to stacking.
If calculated by semblance, stacking velocity must meet the following conditions:
1. It should be monotonically increasing
2. Its gradient should be bounded above to avoid gather stretching after NMO correction
3. It should pass through local energy maxima on the semblance
In order for these conditions to be satisfied, the following algorithm is proposed:
1. Stacking velocity is being found inside a trapezoid whose vertices at first and last time are defined by
`start_velocity_range` and `end_velocity_range` respectively.
2. An auxiliary directed graph is constructed so that:
1. `n_times` evenly spaced points are generated to cover the whole semblance time range. For each of these
points `n_velocities` evenly spaced points are generated to cover the whole range of velocities inside the
trapezoid from its left to right border. All these points form a set of vertices of the graph.
2. An edge from a vertex A to a vertex B exists only if:
1. Vertex B is located at the very next timestamp after vertex A,
2. Velocity at vertex B is no less than at A,
3. Velocity at vertex B does not exceed that of A by a value determined by `max_acceleration` provided.
3. Edge weight is defined as sum of semblance values along its path.
3. A path with maximal semblance sum along it between any of starting and ending nodes is found using Dijkstra
algorithm and is considered to be the required stacking velocity.
Parameters
----------
semblance : 2d np.ndarray
An array with calculated vertical velocity semblance values.
times : 1d np.ndarray
Recording time for each seismic trace value for which semblance was calculated. Measured in milliseconds.
velocities : 1d np.ndarray
Range of velocity values for which semblance was calculated. Measured in meters/seconds.
start_velocity_range : tuple with 2 elements
Valid range for stacking velocity for the first timestamp. Both velocities are measured in meters/seconds.
end_velocity_range : tuple with 2 elements
Valid range for stacking velocity for the last timestamp. Both velocities are measured in meters/seconds.
max_acceleration : None or float, defaults to None
Maximal acceleration allowed for the stacking velocity function. If `None`, equals to
2 * (mean(end_velocity_range) - mean(start_velocity_range)) / total_time. Measured in meters/seconds^2.
n_times : int, defaults to 25
The number of evenly spaced points to split time range into to generate graph vertices.
n_velocities : int, defaults to 25
The number of evenly spaced points to split velocity range into for each time to generate graph vertices.
Returns
-------
stacking_times : 1d np.ndarray
Times for which stacking velocities were picked. Measured in milliseconds.
stacking_velocities : 1d np.ndarray
Picked stacking velocities. Matches the length of `stacking_times`. Measured in meters/seconds.
metric : float
Sum of semblance values along the stacking velocity path.
Raises
------
ValueError
If no path was found for given parameters.
"""
times = np.asarray(times, dtype=np.float32)
velocities = np.asarray(velocities, dtype=np.float32)
start_velocity_range = np.array(start_velocity_range, dtype=np.float32)
end_velocity_range = np.array(end_velocity_range, dtype=np.float32)
# Calculate maximal velocity growth (in samples) between two adjacent timestamps,
# for which graph nodes are created
total_time = (times[-1] - times[0]) / 1000 # from ms to s
if max_acceleration is None:
max_acceleration = 2 * (np.mean(end_velocity_range) - np.mean(start_velocity_range)) / total_time
max_vel_step = np.ceil((max_acceleration * total_time / n_times) / np.mean(velocities[1:] - velocities[:-1]))
max_vel_step = np.int32(max_vel_step)
# Create a graph and find paths with maximal semblance sum along them to all reachable nodes
edges, start_node, end_nodes = create_edges(semblance, times, velocities, start_velocity_range,
end_velocity_range, max_vel_step, n_times, n_velocities)
graph = nx.DiGraph()
graph.add_weighted_edges_from(zip(*edges))
paths = nx.shortest_path(graph, source=start_node, weight="weight") # pylint: disable=unexpected-keyword-arg
# Select only paths to the nodes at the last timestamp and choose the optimal one
path_weights = [(paths[end_node], nx.path_weight(graph, paths[end_node], weight="weight"))
for end_node in end_nodes if end_node in paths]
if not path_weights:
raise ValueError("No path was found for given parameters")
path, metric = min(path_weights, key=lambda x: x[1])
# Remove the | |
focus.
"""
focus_vals = [np.var(z) / (np.mean(z)+1e-8) for z in zstack]
best_focus_slice = np.argmax(focus_vals)
return best_focus_slice
def find_best_focus_stacks(zstacks):
""" Finds the best focus slice of a series of z-slice stacks and constructs an array composed of the best-focus slices.
Parameters
----------
zstacks : numpy array
an input (n_stacks x n_z x n_rows x n_cols) image.
Returns
-------
best_focus_imgs : numpy array
a new numpy array (n_stacks x n_rows x n_cols) composed of the best-focus slices only.
best_focus_slices : numpy array
list of the index of the z-slice of best focus for each z-slice stack.
"""
best_focus_imgs = []
best_focus_slices = []
for zstack in zstacks:
best_slice = find_best_focus(zstack)
best_focus_img = zstack[best_slice]
best_focus_slices.append(best_slice) # the best slice is needed to provide the slice to retrieve in the original video.
best_focus_imgs.append(best_focus_img[None,:])
best_focus_imgs = np.concatenate(best_focus_imgs, axis=0)
best_focus_slices = np.hstack(best_focus_slices)
return best_focus_imgs, best_focus_slices
def locate_centroids_simple(mask):
""" Given an image, locates all centroids of connected components.
Note: This function inherently assumes a threshold of 0 and dilation with disk kernel of 3.
Parameters
----------
mask : numpy array
an input grayscale image.
Returns
-------
centroids : numpy array
an array of (y,x) coordinate pairs giving the peaks in the input image.
"""
from skimage.measure import label, regionprops
from skimage.morphology import binary_dilation, disk
centroids = []
mask_ = mask>0
mask_ = binary_dilation(mask_, disk(3))
labelled = label(mask_)
regions = regionprops(labelled)
for reg in regions:
y,x = reg.centroid
centroids.append([y,x])
centroids = np.array(centroids)
return centroids
def produce_valid_img_mask(img, min_I=0.1, max_area=1000, dilation=3):
""" Example Centriole images may have a ring of high pixel intensity of a much larger structure. This function is designed to identify such large continuous areas in order to filter detections.
Parameters
----------
img : numpy array
an input grayscale image.
min_I : float
the lower threshold for identifying the bright intensity regions. Assumes normalised intensities i.e. image intensities should be between [0,1]
max_area : integer
threshold for identifying 'large' region based on counting the number of pixels within the area.
dilation : int
size of the disk kernel used to postprocess and smoothen resulting binary segmentation.
Returns
-------
invalid_regions : numpy array
a binary image of either 0, 1 pixel intensities indicating the large regions of high intensity i.e. invalid centriole zones.
"""
from scipy.ndimage.morphology import binary_fill_holes
from skimage.filters import threshold_otsu
from skimage.measure import label, regionprops
from skimage.morphology import binary_dilation, disk
thresh = threshold_otsu(img) # determines an Ostu threshold.
if np.mean(img[img>thresh]) > min_I: # is there signal in the image? which is the lower / better threshold to use.
binary = img > thresh
else:
binary = img > min_I # resort to the manual guidance.
# connected component analysis to identify large areas of high intensity.
labelled = label(binary)
regions = regionprops(labelled)
# initialise the mask
invalid_regions = np.zeros(labelled.shape)
for i in range(len(regions)):
area = regions[i].area
# is it large?, if yes
if area > max_area:
invalid_regions[labelled==i+1] = 1 # mark areas that satisfy the check to background
invalid_regions = binary_dilation(binary_fill_holes(invalid_regions>0), disk(dilation)) # dilation is to smooth edges.
return invalid_regions
def filter_noise_centroids_detection(centroids, mask):
""" Given (y,x) coordinates and a binary mask of 0,1 of background regions, removes coordinates that lie in 1 areas (background).
Parameters
----------
centroids : numpy array
array of (y,x) 2D coordinates.
mask : numpy array
boolean or integer mask with values 1 or 0 denoting invalid and valid spatial regions respectively.
Returns
-------
filtered_centroids : numpy array
array of only valid (y,x) 2D coordinates that lie in mask==0 regions.
select : bool array
a binary array either 0 or 1 indicating which centroids are valid.
"""
valid_mask = mask[centroids[:,0].astype(np.int), centroids[:,1].astype(np.int)] #(y,x) format
filtered_centroids = centroids[valid_mask==0]
select = valid_mask == 0
return filtered_centroids, select
def filter_border_centroids_detection(centroids, size, limits):
""" Given (y,x) coordinates and the size of the border, removes all coordinates that lie within the defined border.
Parameters
----------
centroids : numpy array
array of (y,x) 2D coordinates.
size : int
border size, how many pixels from the image edge do you consider the border. Isotropic border is assumed.
limits : tuple-like
(y_max, x_max) pair that define the maximum number of rows, columns respectively of the image.
Returns
-------
filtered_centroids : numpy array
array of only valid (y,x) 2D coordinates that do not lie in the border zone.
select : bool array
a binary array either 0 or 1 indicating which centroids lie within the border zone.
"""
select_y = np.logical_and(centroids[:,0] > size, centroids[:,0] < limits[0]-size)
select_x = np.logical_and(centroids[:,1] > size, centroids[:,1] < limits[1]-size)
filtered_centroids = centroids[ np.logical_and(select_x, select_y)]
select = np.logical_and(select_x, select_y)
return filtered_centroids, select
def filter_centrioles_BCV(centroids, max_slice_im, patch_size, CV_thresh=0.3):
""" Given (y,x) centroid coordinates, the maximum slice whole frame image filter detections based on signal-to-noise (SNR) ratio within local image crops.
The SNR measure used is the coefficient of variation, :math:`\sigma/\mu` where :math:`\sigma` and :math:`\mu` are the standard deviation and mean of the pixel intensities in the image patch.
Parameters
----------
centroids : numpy array
array of (y,x) 2D coordinates.
max_slice_im : numpy array
a grayscale 2D image
patch_size : int (odd)
width of the local area to crop around the given (y,x) centroid
CV_thresh : float
Signal-to-noise ratio cut-off where SNR is measured by CV i.e. centroids are kept if :math:`CV>` CV_thresh
Returns
-------
filtered_centroids : numpy array
array of only valid (y,x) 2D coordinates that have :math:`CV>` CV_thresh.
select : bool array
a binary array either 0 or 1 indicating which centroids have :math:`CV>` CV_thresh.
filtered_CV : array
array with the corresponding CV of filtered_centroids.
"""
# signal (biological coefficient of variation filter)
patches = crop_patches_from_img(max_slice_im, centroids, width=patch_size)
snr_patches = np.hstack([np.std(p)/np.mean(p) for p in patches])
# filter out the bogus detections?
select = snr_patches >= CV_thresh
filtered_centroids = centroids[select]
filtered_CV = snr_patches[select]
return filtered_centroids, select, filtered_CV
def remove_duplicate_centrioles(centroids, min_dist, lam=1000):
""" Removes duplicate (y,x) returning only one (y,x) instance given array of (y,x) centroid coordinates and a minimum distance threshold below which we call two (y,x) duplicates,
Parameters
----------
centroids : numpy array
array of (y,x) 2D coordinates.
min_dist : float
two (y,x) coordinates are a duplicate if the distance between them is less than mid_dist.
lam : float
a very large float, typically just a number larger than the image diagonal to exclude oneself in the pairwise pairing process of (y,x) coordinates.
Returns
-------
filtered_centroids : numpy array
array of unique (y,x) 2D coordinates.
select : bool array
a binary array either 0 or 1 indicating which centroids are taken as unique (y,x) instances.
"""
from sklearn.metrics.pairwise import pairwise_distances
dist_matrix = pairwise_distances(centroids)
dist_matrix += np.diag(lam*np.ones(len(centroids))) # prevent self interaction.
# initialisation.
select_filter = np.ones(len(centroids))
for i in range(len(dist_matrix)):
if select_filter[i] == 1:
dist = dist_matrix[i]
min_dist_arg = np.argmin(dist)
if dist[min_dist_arg] < min_dist:
select_filter[min_dist_arg] = 0 # set to false.
select_filter = select_filter>0 # make binary
filtered_centroids = centroids[select_filter>0]
return filtered_centroids, select_filter
def detect_centrioles_in_img( zstack_img, size, aniso_params, patch_size, CV_thresh=0.3, tslice=0, is_img_slice=False, filter_border=True, filter_high_intensity_bg=True, remove_duplicates=True, filter_CV=True, separation=5, invert=False, minmass=10, minoverlap=10, bg_min_I=0.2, bg_max_area=1000, bg_dilation=3, bg_invalid_check=0.5, debug=False):
""" Primary function that wraps various functions in this module into one API call to detect centrioles given an image or image stack.
Parameters
----------
zstack_img : numpy array
either
i) a temporal z-stack (n_frames x n_z x n_rows x n_cols),
ii) a z-stack (n_z x n_rows x n_cols) or
iii) a grayscale image (n_rows x n_cols)
size : float
Approximate expected width of centriole to detect in image pixels.
aniso_params : Python dict
A Python dictionary giving the parameters for running the anisotropic filtering of Perona-Malik [1]_. This dictionary should contain the following keys: 'iterations', 'delta', kappa', see :meth:`image_fn.perona_malik`
patch_size : int
size of the local image patch to crop for filtering by | |
from sympy import (Symbol, S, exp, log, sqrt, oo, E, zoo, pi, tan, sin, cos,
cot, sec, csc, Abs, symbols)
from sympy.calculus.util import (function_range, continuous_domain, not_empty_in,
periodicity, lcim, AccumBounds)
from sympy.core import Add, Mul, Pow
from sympy.sets.sets import Interval, FiniteSet, Complement, Union
from sympy.utilities.pytest import raises
from sympy.functions.special.gamma_functions import gamma
from sympy.abc import x
a = Symbol('a', real=True)
def test_function_range():
x, y, a, b = symbols('x y a b')
assert function_range(sin(x), x, Interval(-pi/2, pi/2)
) == Interval(-1, 1)
assert function_range(sin(x), x, Interval(0, pi)
) == Interval(0, 1)
assert function_range(tan(x), x, Interval(0, pi)
) == Interval(-oo, oo)
assert function_range(tan(x), x, Interval(pi/2, pi)
) == Interval(-oo, 0)
assert function_range((x + 3)/(x - 2), x, Interval(-5, 5)
) == Union(Interval(-oo, 2/7), Interval(8/3, oo))
assert function_range(1/(x**2), x, Interval(-1, 1)
) == Interval(1, oo)
assert function_range(exp(x), x, Interval(-1, 1)
) == Interval(exp(-1), exp(1))
assert function_range(log(x) - x, x, S.Reals
) == Interval(-oo, -1)
assert function_range(sqrt(3*x - 1), x, Interval(0, 2)
) == Interval(0, sqrt(5))
assert function_range(x*(x - 1) - (x**2 - x), x, S.Reals
) == FiniteSet(0)
assert function_range(x*(x - 1) - (x**2 - x) + y, x, S.Reals
) == FiniteSet(y)
assert function_range(sin(x), x, Union(Interval(-5, -3), FiniteSet(4))
) == Union(Interval(-sin(3), 1), FiniteSet(sin(4)))
assert function_range(cos(x), x, Interval(-oo, -4)
) == Interval(-1, 1)
raises(NotImplementedError, lambda : function_range(
exp(x)*(sin(x) - cos(x))/2 - x, x, S.Reals))
raises(NotImplementedError, lambda : function_range(
log(x), x, S.Integers))
raises(NotImplementedError, lambda : function_range(
sin(x)/2, x, S.Naturals))
def test_continuous_domain():
x = Symbol('x')
assert continuous_domain(sin(x), x, Interval(0, 2*pi)) == Interval(0, 2*pi)
assert continuous_domain(tan(x), x, Interval(0, 2*pi)) == \
Union(Interval(0, pi/2, False, True), Interval(pi/2, 3*pi/2, True, True),
Interval(3*pi/2, 2*pi, True, False))
assert continuous_domain((x - 1)/((x - 1)**2), x, S.Reals) == \
Union(Interval(-oo, 1, True, True), Interval(1, oo, True, True))
assert continuous_domain(log(x) + log(4*x - 1), x, S.Reals) == \
Interval(1/4, oo, True, True)
assert continuous_domain(1/sqrt(x - 3), x, S.Reals) == Interval(3, oo, True, True)
assert continuous_domain(1/x - 2, x, S.Reals) == \
Union(Interval.open(-oo, 0), Interval.open(0, oo))
assert continuous_domain(1/(x**2 - 4) + 2, x, S.Reals) == \
Union(Interval.open(-oo, -2), Interval.open(-2, 2), Interval.open(2, oo))
def test_not_empty_in():
assert not_empty_in(FiniteSet(x, 2*x).intersect(Interval(1, 2, True, False)), x) == \
Interval(S(1)/2, 2, True, False)
assert not_empty_in(FiniteSet(x, x**2).intersect(Interval(1, 2)), x) == \
Union(Interval(-sqrt(2), -1), Interval(1, 2))
assert not_empty_in(FiniteSet(x**2 + x, x).intersect(Interval(2, 4)), x) == \
Union(Interval(-sqrt(17)/2 - S(1)/2, -2),
Interval(1, -S(1)/2 + sqrt(17)/2), Interval(2, 4))
assert not_empty_in(FiniteSet(x/(x - 1)).intersect(S.Reals), x) == \
Complement(S.Reals, FiniteSet(1))
assert not_empty_in(FiniteSet(a/(a - 1)).intersect(S.Reals), a) == \
Complement(S.Reals, FiniteSet(1))
assert not_empty_in(FiniteSet((x**2 - 3*x + 2)/(x - 1)).intersect(S.Reals), x) == \
Complement(S.Reals, FiniteSet(1))
assert not_empty_in(FiniteSet(3, 4, x/(x - 1)).intersect(Interval(2, 3)), x) == \
Union(Interval(S(3)/2, 2), FiniteSet(3))
assert not_empty_in(FiniteSet(x/(x**2 - 1)).intersect(S.Reals), x) == \
Complement(S.Reals, FiniteSet(-1, 1))
assert not_empty_in(FiniteSet(x, x**2).intersect(Union(Interval(1, 3, True, True),
Interval(4, 5))), x) == \
Union(Interval(-sqrt(5), -2), Interval(-sqrt(3), -1, True, True),
Interval(1, 3, True, True), Interval(4, 5))
assert not_empty_in(FiniteSet(1).intersect(Interval(3, 4)), x) == S.EmptySet
assert not_empty_in(FiniteSet(x**2/(x + 2)).intersect(Interval(1, oo)), x) == \
Union(Interval(-2, -1, True, False), Interval(2, oo))
def test_periodicity():
x = Symbol('x')
y = Symbol('y')
assert periodicity(sin(2*x), x) == pi
assert periodicity((-2)*tan(4*x), x) == pi/4
assert periodicity(sin(x)**2, x) == 2*pi
assert periodicity(3**tan(3*x), x) == pi/3
assert periodicity(tan(x)*cos(x), x) == 2*pi
assert periodicity(sin(x)**(tan(x)), x) == 2*pi
assert periodicity(tan(x)*sec(x), x) == 2*pi
assert periodicity(sin(2*x)*cos(2*x) - y, x) == pi/2
assert periodicity(tan(x) + cot(x), x) == pi
assert periodicity(sin(x) - cos(2*x), x) == 2*pi
assert periodicity(sin(x) - 1, x) == 2*pi
assert periodicity(sin(4*x) + sin(x)*cos(x), x) == pi
assert periodicity(exp(sin(x)), x) == 2*pi
assert periodicity(log(cot(2*x)) - sin(cos(2*x)), x) == pi
assert periodicity(sin(2*x)*exp(tan(x) - csc(2*x)), x) == pi
assert periodicity(cos(sec(x) - csc(2*x)), x) == 2*pi
assert periodicity(tan(sin(2*x)), x) == pi
assert periodicity(2*tan(x)**2, x) == pi
assert periodicity(sin(x%4), x) == 4
assert periodicity(sin(x)%4, x) == 2*pi
assert periodicity(tan((3*x-2)%4), x) == 4/3
assert periodicity((sqrt(2)*(x+1)+x) % 3, x) == 3 / (sqrt(2)+1)
assert periodicity((x**2+1) % x, x) == None
assert periodicity(sin(x)**2 + cos(x)**2, x) == S.Zero
assert periodicity(tan(x), y) == S.Zero
assert periodicity(exp(x), x) is None
assert periodicity(log(x), x) is None
assert periodicity(exp(x)**sin(x), x) is None
assert periodicity(sin(x)**y, y) is None
assert periodicity(Abs(sin(Abs(sin(x)))), x) == pi
assert all(periodicity(Abs(f(x)), x) == pi for f in (
cos, sin, sec, csc, tan, cot))
assert periodicity(Abs(sin(tan(x))), x) == pi
assert periodicity(Abs(sin(sin(x) + tan(x))), x) == 2*pi
assert periodicity(sin(x) > S.Half, x) is 2*pi
assert periodicity(x > 2, x) is None
assert periodicity(x**3 - x**2 + 1, x) is None
assert periodicity(Abs(x), x) is None
assert periodicity(Abs(x**2 - 1), x) is None
assert periodicity((x**2 + 4)%2, x) is None
assert periodicity((E**x)%3, x) is None
assert periodicity(gamma(S(1)/7 + 1/x), x) is None
def test_periodicity_check():
x = Symbol('x')
y = Symbol('y')
assert periodicity(tan(x), x, check=True) == pi
assert periodicity(sin(x) + cos(x), x, check=True) == 2*pi
assert periodicity(sec(x), x) == 2*pi
assert periodicity(sin(x*y), x) == 2*pi/abs(y)
assert periodicity(Abs(sec(sec(x))), x) == pi
def test_lcim():
from sympy import pi
assert lcim([S(1)/2, S(2), S(3)]) == 6
assert lcim([pi/2, pi/4, pi]) == pi
assert lcim([2*pi, pi/2]) == 2*pi
assert lcim([S(1), 2*pi]) is None
assert lcim([S(2) + 2*E, E/3 + S(1)/3, S(1) + E]) == S(2) + 2*E
def test_AccumBounds():
assert AccumBounds(1, 2).args == (1, 2)
assert AccumBounds(1, 2).delta == S(1)
assert AccumBounds(1, 2).mid == S(3)/2
assert AccumBounds(1, 3).is_real == True
assert AccumBounds(1, 1) == S(1)
assert AccumBounds(1, 2) + 1 == AccumBounds(2, 3)
assert 1 + AccumBounds(1, 2) == AccumBounds(2, 3)
assert AccumBounds(1, 2) + AccumBounds(2, 3) == AccumBounds(3, 5)
assert -AccumBounds(1, 2) == AccumBounds(-2, -1)
assert AccumBounds(1, 2) - 1 == AccumBounds(0, 1)
assert 1 - AccumBounds(1, 2) == AccumBounds(-1, 0)
assert AccumBounds(2, 3) - AccumBounds(1, 2) == AccumBounds(0, 2)
assert x + AccumBounds(1, 2) == Add(AccumBounds(1, 2), x)
assert a + AccumBounds(1, 2) == AccumBounds(1 + a, 2 + a)
assert AccumBounds(1, 2) - x == Add(AccumBounds(1, 2), -x)
assert AccumBounds(-oo, 1) + oo == AccumBounds(-oo, oo)
assert AccumBounds(1, oo) + oo == oo
assert AccumBounds(1, oo) - oo == AccumBounds(-oo, oo)
assert (-oo - AccumBounds(-1, oo)) == -oo
assert AccumBounds(-oo, 1) - oo == -oo
assert AccumBounds(1, oo) - oo == AccumBounds(-oo, oo)
assert AccumBounds(-oo, 1) - (-oo) == AccumBounds(-oo, oo)
assert (oo - AccumBounds(1, oo)) == AccumBounds(-oo, oo)
assert (-oo - AccumBounds(1, oo)) == -oo
assert AccumBounds(1, 2)/2 == AccumBounds(S(1)/2, 1)
assert 2/AccumBounds(2, 3) == AccumBounds(S(2)/3, 1)
assert 1/AccumBounds(-1, 1) == AccumBounds(-oo, oo)
assert abs(AccumBounds(1, 2)) == AccumBounds(1, 2)
assert abs(AccumBounds(-2, -1)) == AccumBounds(1, 2)
assert abs(AccumBounds(-2, 1)) == AccumBounds(0, 2)
assert abs(AccumBounds(-1, 2)) == AccumBounds(0, 2)
def test_AccumBounds_mul():
assert AccumBounds(1, 2)*2 == AccumBounds(2, 4)
assert 2*AccumBounds(1, 2) == AccumBounds(2, 4)
assert AccumBounds(1, 2)*AccumBounds(2, 3) == AccumBounds(2, 6)
assert AccumBounds(1, 2)*0 == 0
assert AccumBounds(1, oo)*0 == AccumBounds(0, oo)
assert AccumBounds(-oo, 1)*0 == AccumBounds(-oo, 0)
assert AccumBounds(-oo, oo)*0 == AccumBounds(-oo, oo)
assert AccumBounds(1, 2)*x == Mul(AccumBounds(1, 2), x, evaluate=False)
assert AccumBounds(0, 2)*oo == AccumBounds(0, oo)
assert AccumBounds(-2, 0)*oo == AccumBounds(-oo, 0)
assert AccumBounds(0, 2)*(-oo) == AccumBounds(-oo, 0)
assert AccumBounds(-2, 0)*(-oo) == AccumBounds(0, oo)
assert AccumBounds(-1, 1)*oo == AccumBounds(-oo, oo)
assert AccumBounds(-1, 1)*(-oo) == AccumBounds(-oo, oo)
assert AccumBounds(-oo, oo)*oo == AccumBounds(-oo, oo)
def test_AccumBounds_div():
assert AccumBounds(-1, 3)/AccumBounds(3, 4) == AccumBounds(-S(1)/3, 1)
assert AccumBounds(-2, 4)/AccumBounds(-3, 4) == AccumBounds(-oo, oo)
assert AccumBounds(-3, -2)/AccumBounds(-4, 0) == AccumBounds(S(1)/2, oo)
# these two tests can have a better answer
# after Union of AccumBounds is improved
assert AccumBounds(-3, -2)/AccumBounds(-2, 1) == AccumBounds(-oo, oo)
assert AccumBounds(2, 3)/AccumBounds(-2, 2) == AccumBounds(-oo, oo)
assert AccumBounds(-3, -2)/AccumBounds(0, 4) == AccumBounds(-oo, -S(1)/2)
assert AccumBounds(2, 4)/AccumBounds(-3, 0) == AccumBounds(-oo, -S(2)/3)
assert AccumBounds(2, 4)/AccumBounds(0, 3) == AccumBounds(S(2)/3, oo)
assert AccumBounds(0, 1)/AccumBounds(0, 1) == AccumBounds(0, oo)
assert AccumBounds(-1, 0)/AccumBounds(0, 1) == AccumBounds(-oo, 0)
assert AccumBounds(-1, 2)/AccumBounds(-2, 2) == AccumBounds(-oo, oo)
assert 1/AccumBounds(-1, 2) == AccumBounds(-oo, oo)
assert 1/AccumBounds(0, 2) == AccumBounds(S(1)/2, oo)
assert (-1)/AccumBounds(0, 2) == AccumBounds(-oo, -S(1)/2)
assert 1/AccumBounds(-oo, 0) == AccumBounds(-oo, 0)
assert 1/AccumBounds(-1, 0) == AccumBounds(-oo, -1)
assert (-2)/AccumBounds(-oo, 0) == AccumBounds(0, oo)
assert 1/AccumBounds(-oo, -1) == AccumBounds(-1, 0)
assert AccumBounds(1, 2)/a == Mul(AccumBounds(1, 2), 1/a, evaluate=False)
assert AccumBounds(1, 2)/0 == AccumBounds(1, 2)*zoo
assert AccumBounds(1, oo)/oo == AccumBounds(0, oo)
assert AccumBounds(1, | |
import numpy as np
import math
import bisect
import scipy.stats as stats
from typing import TypeVar, Callable
from gym_fabrikatioRL.envs.env_utils import UndefinedInputType
from copy import deepcopy
# indicates generic types
T = TypeVar('T')
class SchedulingDimensions:
"""
Initializes and stores scheduling problem dimensions.
"""
def __init__(self, n_jobs, n_machines, n_tooling_lvls, n_types,
n_operations, min_n_operations, max_n_operations,
max_n_failures, max_jobs_visible, n_jobs_initial):
self.__n_jobs = n_jobs
self.__n_machines = n_machines
self.__n_tooling_lvls = n_tooling_lvls
self.__n_types = n_types
if type(n_operations) == np.ndarray:
self.__n_operations = n_operations
elif n_operations == 'default_sampling':
assert min_n_operations < max_n_operations
self.__n_operations = np.random.randint(
min_n_operations, max_n_operations, n_jobs)
elif min_n_operations == max_n_operations:
self.__n_operations = np.repeat(max_n_operations, n_jobs)
else:
raise UndefinedInputType(
type(n_operations), " n_operations parameter.")
self.__min_n_operations = min_n_operations
self.__max_n_operations = max_n_operations
self.__max_n_failures = max_n_failures
self.__max_jobs_visible = max_jobs_visible
self.__n_jobs_initial = n_jobs_initial
# <editor-fold desc="Getters">
@property
def n_jobs(self):
return self.__n_jobs
@property
def n_machines(self):
return self.__n_machines
@property
def n_tooling_lvls(self):
return self.__n_tooling_lvls
@property
def n_types(self):
return self.__n_types
@property
def n_operations(self):
return self.__n_operations
@property
def min_n_operations(self):
return self.__min_n_operations
@property
def max_n_operations(self):
return self.__max_n_operations
@property
def max_n_failures(self):
return self.__max_n_failures
@property
def max_jobs_visible(self):
return self.__max_jobs_visible
@property
def n_jobs_initial(self):
return self.__max_jobs_visible
# </editor-fold>
class GraphUtils:
"""
Methods for precedence graph generation and and transformation.
"""
# <editor-fold desc="Transformation Functions">
@staticmethod
def graph_adjacency_list_to_matrix(graph_adjacency_list: dict,
max_n_ops: int,
current_job: T = -1) -> np.ndarray:
"""
Converts an adjacency list representation of the precedence constraints
graph to a matrix representation. The graph root given by current_job
parameter is ignored.
:param current_job: Representation of the job root; this node's
children represent the first eligible operations in a job.
:param graph_adjacency_list: The adjacency list representation of the
precedence constraints partial order graph.
:param max_n_ops: The size of the matrix; needed for consitency with the
other jobs.
:return: The matrix representation of the precedence constraints graph.
"""
graph_matrix = np.zeros((max_n_ops, max_n_ops))
for node in graph_adjacency_list.keys():
if node == current_job: # ignore job root
continue
for neighbor in graph_adjacency_list[node]:
graph_matrix[node][neighbor] = 1
return graph_matrix
@staticmethod
def graph_matrix_to_adjacency_list(matrix: np.ndarray, n_ops: int,
current_job: int) -> dict:
"""
Converts a n_operations x n_operations matrix containing the
an adjacency into a corresponding adjacency dictionary representation.
:param matrix: A precedence graph matrix.
:param n_ops: List of the number of operations in each job.
:param current_job: Current job number for the root label.
:return: The adjacency dictionary representation of the matrix.
"""
ingress_counts, job_adjacency = {}, {}
for node_out in range(n_ops):
for node_in in range(n_ops):
edge = matrix[node_out][node_in]
if edge == 0:
continue
if node_out not in job_adjacency:
job_adjacency[node_out] = {node_in}
else:
job_adjacency[node_out].add(node_in)
if node_in not in ingress_counts:
ingress_counts[node_in] = 1
else:
ingress_counts[node_in] += 1
job_adjacency[(current_job,)] = (set(range(n_ops)) -
set(ingress_counts.keys()))
return job_adjacency
# </editor-fold>
# <editor-fold desc="POm Precedence Generation">
@staticmethod
def get_random_precedence_relation(n_ops: int,
n_ops_max: int) -> (dict, np.ndarray):
"""
DEPRECATED COMMENT
Creates random hasse diagrams representing the operation precedence
relation. It work by iteratively sampling random integers smaller than
10e6 and adding their divisors (found in O(sqrt(n))) to a list until the
latter's length is between n_ops_min and n_ops_max. Then the adjacency
list representation corresponding to the 'divides' relation is computed
and transitively reduced.
The nodes (divisors) are renamed while building the adjacency lists to
sequential integers, wile maintaining the original relation.
It is ensured that 1 (renamed to 0) is always part of the relation,
such that the job root dummy can easily be inserted.
:param n_ops: The minimum number of operations.
:param n_ops_max: The maximum number of job operations.
:return: The Hasse diagram of the the job operation precedence
constraints in its matrix and adjacency list representation.
"""
divisors = set([])
while len(divisors) < n_ops + 1:
new_int = np.random.randint(1000000)
divisors |= set(GraphUtils.__get_divisors(new_int))
if len(divisors) > n_ops + 1:
while len(divisors) > n_ops + 1:
divisors.pop()
break
if 1 not in divisors:
divisors.pop()
divisors.add(1)
graph = GraphUtils.__get_adjacency(divisors)
al_hasse = GraphUtils.__transitive_reduction(graph)
am = GraphUtils.graph_adjacency_list_to_matrix(
al_hasse, n_ops_max, -1) # -1 is the generic job root node
return al_hasse, am
@staticmethod
def __get_divisors(n):
"""
Finds the divisors of a number. O(sqrt(n))
Source: https://github.com/tnaftali/hasse-diagram-processing-py
"""
divisors = []
limit = int(str(math.sqrt(n)).split('.')[0])
for i in range(1, limit + 1):
if n % i == 0:
bisect.insort(divisors, i)
if i != (n / i):
bisect.insort(divisors, n / i)
return divisors
@staticmethod
def __get_adjacency(divisors: set):
"""
Constructs Adjacency list repesentation for the division relation;
Renames the nodes sequentially; O(n^2).
"""
latest_node_nr = -1
sequential_names = {}
graph = {}
for i in divisors:
if i not in sequential_names:
sequential_names[i] = latest_node_nr
latest_node_nr += 1
neighbors = set([])
for j in divisors:
if j not in sequential_names:
sequential_names[j] = latest_node_nr
latest_node_nr += 1
if j % i == 0 and i != j:
neighbors.add(sequential_names[j])
graph[sequential_names[i]] = neighbors
return graph
@staticmethod
def __transitive_closure(graph, node, closure, visited):
"""
Adds all nodes reacheable from the node parameter in the graph
parameter to the closure parameter.
"""
if node in visited:
return
visited |= {node}
closure |= graph[node] # O(1)
for neighbor in graph[node]: # O(|V| + |E|)
GraphUtils.__transitive_closure(graph, neighbor, closure, visited)
@staticmethod
def __transitive_reduction(graph):
"""
Computes the transitive reduction by eliminating direct node
neighbors who are present in the union of all the direct neighbor
transitive clauses. O(N)
"""
reduced_graph = {}
for node in graph.keys():
neighbor_closure, visited = set({}), set({})
good_neighbors = set({})
for neighbor in graph[node]:
GraphUtils.__transitive_closure(
graph, neighbor, neighbor_closure, visited)
for neighbor in graph[node]:
if neighbor not in neighbor_closure:
good_neighbors.add(neighbor)
reduced_graph[node] = good_neighbors
return reduced_graph
# </editor-fold>
# <editor-fold desc="Jm/Fm Precedence Generation">
@staticmethod
def get_job_chain_precedence_graphs(n_jobs: int, n_ops: np.ndarray) -> list:
"""
Creates a list of dictionaries containing adjacency list representations
of linear precedence constraints, one for every job. Every job graph has
a the following tuple as a root node: (j_index,).
Example n_jobs == 2, n_ops == [3, 5]:
[{(0,): [0], 0: [1], 1: [2]},
{(1,): [0], 0: [1], 1: [2], 2: [3], 3: [4]}]
:param n_jobs: Number of jobs for which to construct the chain
precedence graphs.
:param n_ops: List containing the number of operations in every job.
:return: List of dictionaries representing the operation precedence
(chain) graphs.
"""
graphs = []
for i in range(n_jobs):
graph_dict = GraphUtils.__graph_chain_precedence(
list(range(n_ops[i])))
graph_dict[(i,)] = [0] # dummy element for job root
graphs.append(graph_dict)
return graphs
@staticmethod
def __graph_chain_precedence(operations_range: list) -> dict:
adjacency_dict = {}
start_node = operations_range[0]
for node in operations_range[1:]:
adjacency_dict[start_node] = [node]
start_node = node
return adjacency_dict
# </editor-fold>
class JobMatrices:
def __init__(self, dims: SchedulingDimensions,
job_pool: np.ndarray, op_types: T, op_durations: T,
op_tool_sets: T, op_precedence: T, due_dates: T,
time_inter_release: T,
perturbation_processing_time: T): # perturbation_due_date: T
if job_pool is not None:
job_idxs = np.random.choice(
len(job_pool), dims.n_jobs, replace=True)
jobs = job_pool[job_idxs]
# todo: pool as an object
# todo: check and report pool consistency (i.e. dims vs matrix dims)
self.__op_type = jobs[:, 0]
self.__op_duration = jobs[:, 1]
self.__op_tool_set = jobs[:, 2]
als, ams = JobMatrices.__set_op_precedence_from_spec(
dims, jobs[:, 3])
self.__op_precedence_l, self.__op_precedence_m = als, ams
else:
self.__op_type = JobMatrices.__set_op_types(dims, op_types)
self.__op_duration = JobMatrices.__set_op_duration(
dims, op_durations, self.__op_type)
self.__op_tool_set = JobMatrices.__set_op_tool_sets(
dims, op_tool_sets, self.__op_type)
als, ams = JobMatrices.__set_op_precedence(dims, op_precedence)
self.__op_precedence_l, self.__op_precedence_m = als, ams
self.__op_perturbations = JobMatrices.__set_op_perturbation(
dims, perturbation_processing_time)
self.__job_release_times = JobMatrices.__set_release_dates(
dims, time_inter_release, self.operation_durations)
self.__job_due_dates = JobMatrices.__set_due_dates(
dims, due_dates, self.__job_release_times, self.operation_durations)
# todo!
# self.__job_due_date_perturbation = JobMatrices.__set_due_date_noise(
# perturbation_due_date)
# <editor-fold desc="Constructor Helpers">
@staticmethod
def __set_op_types(dims: SchedulingDimensions, op_types: T):
if type(op_types) == np.ndarray:
return np.array(op_types).astype('int16')
else:
n, o_max = dims.n_jobs, dims.max_n_operations
if callable(op_types):
set_op_types = JobMatrices.__sample(op_types, (n, o_max))
elif op_types == 'default_sampling': # op_types == '':
set_op_types = np.random.choice(
range(1, dims.n_types + 1), (n, o_max), replace=True)
elif op_types == 'Jm':
assert dims.n_types == dims.max_n_operations
assert dims.max_n_operations == dims.min_n_operations
set_op_types = np.zeros((n, o_max))
for j in range(n):
set_op_types[j, :] = np.random.permutation(dims.n_types) + 1
elif op_types == 'Fm':
assert dims.n_types == dims.max_n_operations
assert dims.max_n_operations == dims.min_n_operations
set_op_types = np.zeros((n, o_max))
job_structure = np.random.permutation(dims.n_types) + 1
for j in range(n):
set_op_types[j, :] = job_structure.copy()
else:
raise UndefinedInputType(
type(op_types),
" operation_types parameter. Accepted inputs are"
"the 'deafault_sampling' string, a sampling function "
"taking a shape as a parameter and returning a numpy array "
"of corresponding size, the string 'Pm' or 'Fm'.")
| |
unreachable / succeeded.
The results are put in (Class).ansible_results, keyed by category name.
"""
# The json_end_idx reference below is important. The playbook run is in json output,
# however the text we're opening here is a mix of free-text and json.
# it's formatted like this.
# <optional> free text
# Giant Glob of JSON
# <optional> free text.
# The json_end_idx variable in this function defines the end of the json.
# Without it, JSON parsing will fail.
dt = datetime.datetime.now()
with open(jout_file, 'r') as f:
all_output = f.readlines()
if len(all_output) > 1:
json_start_idx = all_output.index('{\n')
json_end_idx, _ = max(enumerate(all_output), key=operator.itemgetter(1))
else:
if len(all_output) == 1:
cls.log.error("ansible output:")
cls.log.error(all_output[0])
else:
cls.log.error("ansible produced no output")
raise Exception('Failed to parse ansible output')
j = json.loads(''.join(all_output[json_start_idx:json_end_idx + 1]))['stats']
unreachable = []
failed = []
succeeded = []
if 'localhost' in j.keys():
del j['localhost']
for h in j.keys():
if j[h]['unreachable'] != 0:
unreachable.append(h)
elif j[h]['failures'] != 0:
failed.append(h)
else:
succeeded.append(h)
# ran into issues where etcd_prescale_down category key does not exist in the dict
if category not in cls.nodes_to_add.keys():
cls.nodes_to_add[category] = []
# Pruning down to category only.
cat_results = {
'succeeded': [x for x in succeeded if x in cls.nodes_to_add[category]],
'failed': [x for x in failed if x in cls.nodes_to_add[category]],
'unreachable': [x for x in unreachable if x in cls.nodes_to_add[category]]
}
cls.ansible_results[category] = cat_results
cls.log.info("- [{}] playbook run results: {}".format(category, cat_results))
final_logfile = "/var/log/aws-quickstart-openshift-scaling.{}-{}-{}-{}T{}{}{}".format(
category, dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second
)
os.rename(jout_file, final_logfile)
cls.log.info("The json output logfile has been moved to %s" % final_logfile)
@classmethod
def summarize_playbook_results(cls):
cls.log.debug("ansible_results: %s" % cls.ansible_results)
for cat in cls.ansible_results.keys():
cls.log.debug("running %s to see whether inventory must be updated" % cat)
if not cat.startswith("pre_"):
additional_add = []
cjson = cls.ansible_results[cat]
cls.log.debug("cjson: %s" % cjson)
cls.log.info("Category: {}, Results: {} / {} / {}, ({} / {} / {})".format(
cat, len(cjson['succeeded']), len(cjson['failed']), len(cjson['unreachable']), 'Succeeded', 'Failed',
'Unreachable'))
if cat == 'masters':
additional_add = ['nodes']
cls.log.debug(
"running cls.migrate_nodes_between_section(%s, %s, %s)" % (cjson['succeeded'], cat, additional_add))
cls.migrate_nodes_between_section(cjson['succeeded'], cat, additional_add=additional_add)
class LocalScalingActivity(object):
"""
Class to objectify each scaling activity within an ASG
"""
def __init__(self, json_doc):
self._json = json_doc
self.start_time = self._json['StartTime']
self._instance_pattern = 'i-[0-9a-z]+'
self.event_type = self._determine_scale_type()
if self.event_type:
self.instance = self._determine_affected_instance()
del self._json
def _determine_affected_instance(self):
"""
Determines the affected instance for the scaling event.
"""
_pattern = re.compile(self._instance_pattern)
_instance_id = _pattern.search(self._json['Description'])
if _instance_id:
return _instance_id.group()
else:
return None
def _determine_scale_type(self):
"""
Determines the scaling event type (scale in, or scale out)
"""
if self._json['StatusCode'] == 'Failed':
return False
_t = self._json['Description'].split()[0]
if 'Launching' in _t:
_type = "launch"
elif 'Terminating' in _t:
_type = "terminate"
else:
_type = None
return _type
class LocalASG(object):
"""
Class to objectify an ASG
"""
def __init__(self, json_doc, version='3.9'):
self.log = LogUtil.get_root_logger()
self._instances = {'list': [], "scaling": []}
self._asg = boto3.client('autoscaling', InventoryConfig.region_name)
self.name = json_doc['AutoScalingGroupName']
self.private_ips = list()
self.scaling_events = list()
self.node_hostdefs = dict()
self.scale_in_progress_instances = {'terminate': [], 'launch': []}
self.cooldown = json_doc['DefaultCooldown']
self._cooldown_upperlimit = self.cooldown * 3
self.scale_override = False
self.logical_name = None
self.elb_name = None
self.stack_id = None
self.logical_id = None
if self._cooldown_upperlimit <= 300:
self._cooldown_upperlimit = 300
for tag in self._grab_tags(json_doc['Tags']):
self.__dict__[tag['key']] = tag['value']
self.in_openshift_cluster = self._determine_cluster_membership()
if self.in_openshift_cluster:
self.openshift_config_category = self._determine_openshift_category(self.logical_id)
# Set the logical_name
self.logical_name = InventoryConfig.logical_names[self.logical_id]
# Sanity check to verify they're in the API.
# - and populate the InventoryConfig.all_instances dict as a result.
# - working around edge cases.
ilist = [i['InstanceId'] for i in json_doc['Instances']]
InventoryScaling.wait_for_api(instance_id_list=ilist)
# Grab instances
for instance in self._grab_instance_metadata(json_doc['Instances']):
self._instances[instance.InstanceId] = instance
self._instances['list'].append(instance.InstanceId)
self.private_ips += instance.private_ips
# Grab scaling events. Anything newer than (self.cooldown * 3).
# However, only do so if we're not populating the initial inventory.
if not InventoryConfig.initial_inventory:
for scaling_event in self._grab_current_scaling_events():
self.scaling_events.append(scaling_event)
# If the instance is not already in the config. Done to compensate for the self._
# cooldown_upperlimit var.
if (scaling_event.event_type == 'launch') and (
scaling_event.instance in InventoryConfig.known_instances.keys()):
continue
if (scaling_event.event_type == 'launch') and (
scaling_event.instance in self.scale_in_progress_instances['terminate']):
continue
self.scale_in_progress_instances[scaling_event.event_type].append(scaling_event.instance)
self._instances['scaling'].append(scaling_event.instance)
for instance in self._instances['list']:
# Sanity check.
# - If the instance is not in the known_instances list, or defined in a recent scaling event,
# but is in the ASG (we dont know about it otherwise)
# -- Add it to the scale_in_progress list, and set scale_override to True, so a scale-up occurs.
# (See: scaler.scale_
if (instance not in InventoryConfig.known_instances.keys()) and (
instance not in self._instances['scaling']):
self.scale_in_progress_instances['launch'].append(instance)
self.scale_override = True
# Grab Inventory host definitions
for combined_hostdef in self.generate_asg_node_hostdefs(version):
instance_id, hostdef = combined_hostdef
InventoryConfig.id_to_ip_map[instance_id] = hostdef['ip_or_dns']
del hostdef['ip_or_dns']
InventoryConfig.provisioning_hostdefs[instance_id] = hostdef
self.node_hostdefs.update(hostdef)
@staticmethod
def _grab_tags(tag_json):
"""
Descriptor to grabs the tags for an ASG
"""
i = 0
while i < len(tag_json):
if 'cloudformation' in tag_json[i]['Key']:
_k = tag_json[i]['Key'].split(':')[2]
yield {'key': _k.lower().replace('-', '_'), 'value': tag_json[i]['Value']}
i += 1
def _determine_cluster_membership(self):
"""
Determines if the ASG is within the OpenShift Cluster
"""
if self.stack_id == InventoryConfig.stack_id:
self.log.debug("{} matches {} for ASG: {}".format(self.stack_id, InventoryConfig.stack_id, self.name))
self.log.info("Awesome! This ASG is in the openshift cluster:" + self.name)
return True
self.log.debug("{} != {} for ASG: {}".format(self.stack_id, InventoryConfig.stack_id, self.name))
self.log.info("This ASG is not in the openshift cluster")
return False
def _grab_current_scaling_events(self):
"""
Descriptor to query the EC2 API to fetch the current scaling events for the ASG.
"""
_now = datetime.datetime.now().replace(tzinfo=dateutil.tz.tzlocal())
scaling_activities = self._asg.describe_scaling_activities(AutoScalingGroupName=self.name)['Activities']
i = 0
while i < len(scaling_activities):
_se = LocalScalingActivity(scaling_activities[i])
i += 1
# If the scaling activity was not successful, move along.
if not _se.event_type:
continue
_diff = _now - _se.start_time
if (_se.event_type == 'terminate') and (_se.instance in InventoryConfig.known_instances.keys()):
yield _se
elif _diff.days == 0 and (_diff.seconds <= self._cooldown_upperlimit):
yield _se
@staticmethod
def _grab_instance_metadata(json_doc):
"""
Generator to grab the metadata of the ansible controller (local) instance.
"""
i = 0
while i < len(json_doc):
yield LocalASInstance(json_doc[i]['InstanceId'])
i += 1
@staticmethod
def _determine_openshift_category(logical_id):
"""
Determine the openshift category (etcd/nodes/master)
"""
try:
openshift_category = InventoryConfig.logical_names[logical_id]
except KeyError:
return None
return openshift_category
def generate_asg_node_hostdefs(self, version='3.9'):
# - ADD IN FILE TO READ FROM DISK FOR DYNAMIC NODE LABELS.
"""
Generates the host definition for populating the Ansible Inventory.
"""
i = 0
while i < len(self._instances['list']):
instance_id = self._instances['list'][i]
node = self._instances[instance_id]
# https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceState.html
if node.State['Code'] not in [0, 16]:
i += 1
continue
_ihd = {'instance_id': instance_id}
if version == '3.9':
_ihd.update({
'openshift_node_labels': {
'application_node': 'yes',
'registry_node': 'yes',
'router_node': 'yes',
'region': 'infra',
'zone': 'default'
}
})
if version != '3.9':
if 'glusterfs' in self.openshift_config_category:
_ihd.update({'openshift_node_group_name': 'node-config-glusterfs'})
else:
_ihd.update({'openshift_node_group_name': 'node-config-compute-infra'})
if 'master' in self.openshift_config_category:
print("making schedulable")
_ihd.update({'openshift_schedulable': 'true'})
if version == '3.9':
_ihd.update({
'openshift_node_labels': {
'region': 'primary',
'zone': 'default'
}
})
else:
print('setting node group')
_ihd['openshift_node_group_name'] = 'node-config-master'
if self.elb_name:
# openshift_public_hostname is only needed if we're dealing with masters, and an ELB is present.
_ihd['openshift_public_hostname'] = self.elb_name
elif 'glusterfs' in self.openshift_config_category:
_ihd.update({
'glusterfs_devices': ["/dev/xvdc"]
})
elif 'node' not in self.openshift_config_category:
# Nodes don't need openshift_public_hostname (#3), or openshift_schedulable (#5)
# etcd only needs hostname and node labes. doing the 'if not' above addresses both
# of these conditions at once, as the remainder are default values prev. defined.
if version == '3.9':
del _ihd['openshift_node_labels']
else:
del _ihd['openshift_node_group_name']
hostdef = {node.PrivateDnsName: _ihd, 'ip_or_dns': node.PrivateDnsName}
i += 1
yield (instance_id, hostdef)
class LocalASInstance(object):
"""
Class around each instance within an ASG
"""
def __init__(self, instance_id):
self.private_ips = []
self.InstanceId = None
self.State = None
self.PrivateDnsName = None
try:
instance_object = InventoryConfig.all_instances[instance_id]
for ip in self._extract_private_ips(instance_object['NetworkInterfaces']):
self.private_ips.append(ip)
self.__dict__.update(**instance_object)
except KeyError:
pass
@staticmethod
def _extract_private_ips(network_json):
"""
Generator that extracts the private IPs from the instance.
"""
i = 0
while i < len(network_json):
yield network_json[i]['PrivateDnsName']
i += 1
class ClusterGroups(object):
"""
Class around the ASGs within the Cluster
"""
groups = []
@classmethod
def setup(cls, version='3.9'):
for group in cls._determine_cluster_groups(version):
cls.groups.append(group)
@classmethod
def _determine_cluster_groups(cls, version):
"""
Generator that determines what ASGs are within the cluster.
"""
asg = boto3.client('autoscaling', InventoryConfig.region_name)
all_groups = asg.describe_auto_scaling_groups()['AutoScalingGroups']
i = 0
while i < len(all_groups):
_g = LocalASG(all_groups[i], version)
| |
<reponame>BubuLK/sfepy<gh_stars>100-1000
"""
python3 script/gen_serendipity_basis.py > sfepy/discrete/fem/_serendipity.py
"""
import sympy as sm
x, y, z = sm.symbols('x y z')
all_bfs = {
2: {
1 : [
0.25 * (1 - x) * (1 - y),
0.25 * (1 + x) * (1 - y),
0.25 * (1 + x) * (1 + y),
0.25 * (1 - x) * (1 + y),
],
2 : [
-0.25 * (1 - x) * (1 - y) * (1 + x + y),
-0.25 * (1 + x) * (1 - y) * (1 - x + y),
-0.25 * (1 + x) * (1 + y) * (1 - x - y),
-0.25 * (1 - x) * (1 + y) * (1 + x - y),
0.5 * (1 - x**2) * (1 - y),
0.5 * (1 + x) * (1 - y**2),
0.5 * (1 - x**2) * (1 + y),
0.5 * (1 - x) * (1 - y**2),
],
3 : [
0.03125 * (x - 1) * (y - 1) * (9 * (x**2 + y**2) - 10),
-0.03125 * (x + 1) * (y - 1) * (9 * (x**2 + y**2) - 10),
0.03125 * (x + 1) * (y + 1) * (9 * (x**2 + y**2) - 10),
-0.03125 * (x - 1) * (y + 1) * (9 * (x**2 + y**2) - 10),
0.28125 * (y - 1) * (-3 * x**3 + x**2 + 3 * x - 1),
-0.28125 * (y - 1) * (-3 * x**3 - x**2 + 3 * x + 1),
-0.28125 * (x + 1) * (-3 * y**3 + y**2 + 3 * y - 1),
0.28125 * (x + 1) * (-3 * y**3 - y**2 + 3 * y + 1),
0.28125 * (y + 1) * (-3 * x**3 - x**2 + 3 * x + 1),
-0.28125 * (y + 1) * (-3 * x**3 + x**2 + 3 * x - 1),
-0.28125 * (x - 1) * (-3 * y**3 - y**2 + 3 * y + 1),
0.28125 * (x - 1) * (-3 * y**3 + y**2 + 3 * y - 1),
],
},
3 : {
1 : [
0.125 * (1 - x) * (1 - y) * (1 - z),
0.125 * (1 + x) * (1 - y) * (1 - z),
0.125 * (1 + x) * (1 + y) * (1 - z),
0.125 * (1 - x) * (1 + y) * (1 - z),
0.125 * (1 - x) * (1 - y) * (1 + z),
0.125 * (1 + x) * (1 - y) * (1 + z),
0.125 * (1 + x) * (1 + y) * (1 + z),
0.125 * (1 - x) * (1 + y) * (1 + z),
],
2 : [
-0.125 * (1 - x) * (1 - y) * (1 - z) * (x + y + z + 2),
-0.125 * (1 + x) * (1 - y) * (1 - z) * (-x + y + z + 2),
-0.125 * (1 + x) * (1 + y) * (1 - z) * (-x - y + z + 2),
-0.125 * (1 - x) * (1 + y) * (1 - z) * (x - y + z + 2),
-0.125 * (1 - x) * (1 - y) * (1 + z) * (x + y - z + 2),
-0.125 * (1 + x) * (1 - y) * (1 + z) * (-x + y - z + 2),
-0.125 * (1 + x) * (1 + y) * (1 + z) * (-x - y - z + 2),
-0.125 * (1 - x) * (1 + y) * (1 + z) * (x - y - z + 2),
0.25 * (1 - x) * (1 + x) * (1 - y) * (1 - z),
0.25 * (1 - y) * (1 + y) * (1 + x) * (1 - z),
0.25 * (1 - x) * (1 + x) * (1 + y) * (1 - z),
0.25 * (1 - y) * (1 + y) * (1 - x) * (1 - z),
0.25 * (1 - x) * (1 + x) * (1 - y) * (1 + z),
0.25 * (1 - y) * (1 + y) * (1 + x) * (1 + z),
0.25 * (1 - x) * (1 + x) * (1 + y) * (1 + z),
0.25 * (1 - y) * (1 + y) * (1 - x) * (1 + z),
0.25 * (1 - z) * (1 + z) * (1 - x) * (1 - y),
0.25 * (1 - z) * (1 + z) * (1 + x) * (1 - y),
0.25 * (1 - z) * (1 + z) * (1 + x) * (1 + y),
0.25 * (1 - z) * (1 + z) * (1 - x) * (1 + y),
],
3 : [
0.015625 * (1 - x) * (1 - y) * (1 - z) * (9 * (x**2 + y**2 + z**2) - 19.),
0.015625 * (1 + x) * (1 - y) * (1 - z) * (9 * (x**2 + y**2 + z**2) - 19.),
0.015625 * (1 + x) * (1 + y) * (1 - z) * (9 * (x**2 + y**2 + z**2) - 19.),
0.015625 * (1 - x) * (1 + y) * (1 - z) * (9 * (x**2 + y**2 + z**2) - 19.),
0.015625 * (1 - x) * (1 - y) * (1 + z) * (9 * (x**2 + y**2 + z**2) - 19.),
0.015625 * (1 + x) * (1 - y) * (1 + z) * (9 * (x**2 + y**2 + z**2) - 19.),
0.015625 * (1 + x) * (1 + y) * (1 + z) * (9 * (x**2 + y**2 + z**2) - 19.),
0.015625 * (1 - x) * (1 + y) * (1 + z) * (9 * (x**2 + y**2 + z**2) - 19.),
-0.140625 * (1 - y) * (1 - z) * (-3 * x**3 + x**2 + 3 * x - 1),
0.140625 * (1 - y) * (1 - z) * (-3 * x**3 - x**2 + 3 * x + 1),
-0.140625 * (1 + x) * (1 - z) * (-3 * y**3 + y**2 + 3 * y - 1),
0.140625 * (1 + x) * (1 - z) * (-3 * y**3 - y**2 + 3 * y + 1),
0.140625 * (1 + y) * (1 - z) * (-3 * x**3 - x**2 + 3 * x + 1),
-0.140625 * (1 + y) * (1 - z) * (-3 * x**3 + x**2 + 3 * x - 1),
0.140625 * (1 - x) * (1 - z) * (-3 * y**3 - y**2 + 3 * y + 1),
-0.140625 * (1 - x) * (1 - z) * (-3 * y**3 + y**2 + 3 * y - 1),
-0.140625 * (1 - y) * (1 + z) * (-3 * x**3 + x**2 + 3 * x - 1),
0.140625 * (1 - y) * (1 + z) * (-3 * x**3 - x**2 + 3 * x + 1),
-0.140625 * (1 + x) * (1 + z) * (-3 * y**3 + y**2 + 3 * y - 1),
0.140625 * (1 + x) * (1 + z) | |
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contributors:
# - <NAME>
# pylint: disable=too-many-lines
"""Datos de ejemplo."""
from datetime import date
from cacao_accounting.auth.roles import asigna_rol_a_usuario
from cacao_accounting.loggin import log
from cacao_accounting.transaccion import Transaccion
# pylint: disable=import-outside-toplevel, too-many-locals, too-many-statements
def _demo_usuarios():
"""Usuarios para demostracion."""
from cacao_accounting.auth.registros import RegistroUsuario
from cacao_accounting.auth import proteger_passwd
USUARIO = RegistroUsuario()
log.debug("Creando usuarios de prueba.")
admin = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"usuario": "administrador",
"correo_e": "<EMAIL>",
"clave_acceso": proteger_passwd("<PASSWORD>"),
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(admin)
asigna_rol_a_usuario("administrador", "admin")
auditor = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"usuario": "auditor",
"correo_e": "<EMAIL>",
"clave_acceso": proteger_passwd("<PASSWORD>"),
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(auditor)
asigna_rol_a_usuario("auditor", "comptroller")
analista = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"usuario": "analista",
"correo_e": "<EMAIL>",
"clave_acceso": proteger_passwd("<PASSWORD>"),
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(analista)
asigna_rol_a_usuario("analista", "business_analyst")
contabilidad = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"usuario": "contabilidad",
"correo_e": "<EMAIL>",
"clave_acceso": proteger_passwd("contabilidad"),
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(contabilidad)
asigna_rol_a_usuario("contabilidad", "accounting_manager")
contabilidadj = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"usuario": "contabilidadj",
"correo_e": "<EMAIL>",
"clave_acceso": proteger_passwd("contabilidadj"),
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(contabilidadj)
asigna_rol_a_usuario("contabilidadj", "accounting_auxiliar")
compras = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={"usuario": "compras", "correo_e": "<EMAIL>", "clave_acceso": proteger_passwd("compras")},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(compras)
asigna_rol_a_usuario("compras", "purchasing_manager")
compras_junior = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={"usuario": "comprasj", "correo_e": "<EMAIL>", "clave_acceso": proteger_passwd("<PASSWORD>")},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(compras_junior)
asigna_rol_a_usuario("comprasj", "purchasing_auxiliar")
ventas = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={"usuario": "ventas", "correo_e": "<EMAIL>", "clave_acceso": proteger_passwd("ventas")},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(ventas)
asigna_rol_a_usuario("ventas", "sales_manager")
ventasj = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={"usuario": "ventasj", "correo_e": "<EMAIL>", "clave_acceso": proteger_passwd("<PASSWORD>")},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(ventasj)
asigna_rol_a_usuario("ventasj", "sales_auxiliar")
inventario = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"usuario": "inventario",
"correo_e": "<EMAIL>",
"clave_acceso": proteger_passwd("<PASSWORD>"),
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(inventario)
asigna_rol_a_usuario("inventario", "inventory_manager")
inventarioj = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"usuario": "inventarioj",
"correo_e": "<EMAIL>",
"clave_acceso": proteger_passwd("<PASSWORD>"),
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(inventarioj)
asigna_rol_a_usuario("inventarioj", "inventory_auxiliar")
tesoreria = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"usuario": "tesoreria",
"correo_e": "<EMAIL>",
"clave_acceso": proteger_passwd("tesoreria"),
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(tesoreria)
asigna_rol_a_usuario("tesoreria", "head_of_treasury")
tesoreriaj = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"usuario": "tesoreriaj",
"correo_e": "<EMAIL>",
"clave_acceso": proteger_passwd("<PASSWORD>"),
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(tesoreriaj)
asigna_rol_a_usuario("tesoreriaj", "auxiliar_of_treasury")
pasante = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"usuario": "pasante",
"correo_e": "<EMAIL>",
"clave_acceso": proteger_passwd("<PASSWORD>"),
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(pasante)
asigna_rol_a_usuario("pasante", "purchasing_auxiliar")
asigna_rol_a_usuario("pasante", "accounting_auxiliar")
asigna_rol_a_usuario("pasante", "auxiliar_of_treasury")
asigna_rol_a_usuario("pasante", "inventory_auxiliar")
asigna_rol_a_usuario("pasante", "sales_auxiliar")
usuario = Transaccion(
registro="Usuario",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"usuario": "usuario",
"correo_e": "<EMAIL>",
"clave_acceso": proteger_passwd("usuario"),
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
USUARIO.ejecutar_transaccion(usuario)
asigna_rol_a_usuario("usuario", "purchasing_user")
asigna_rol_a_usuario("usuario", "accounting_user")
asigna_rol_a_usuario("usuario", "inventory_user")
asigna_rol_a_usuario("usuario", "user_of_treasury")
asigna_rol_a_usuario("usuario", "sales_user")
def _demo_entidad():
"""Entidad de demostración."""
from cacao_accounting.contabilidad.registros.entidad import RegistroEntidad
log.debug("Creando entidades de prueba.")
ENTIDAD = RegistroEntidad()
ENTIDAD1 = Transaccion(
registro="Entidad",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"entidad": "cacao",
"razon_social": "<NAME> Sociedad Anonima",
"nombre_comercial": "<NAME>",
"id_fiscal": "J0310000000000",
"moneda": "NIO",
"tipo_entidad": "Sociedad",
"correo_electronico": "<EMAIL>",
"web": "chocoworld.com",
"telefono1": "+505 8456 6543",
"telefono2": "+505 8456 7543",
"fax": "+505 8456 7545",
"habilitada": True,
"predeterminada": True,
"status": "predeterminado",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
ENTIDAD2 = Transaccion(
registro="Entidad",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"entidad": "cafe",
"razon_social": "Mundo Cafe Sociedad Anonima",
"nombre_comercial": "<NAME>",
"id_fiscal": "J0310000000001",
"moneda": "USD",
"tipo_entidad": "Sociedad",
"correo_electronico": "<EMAIL>",
"web": "m<EMAIL>",
"telefono1": "+505 8456 6542",
"telefono2": "+505 8456 7542",
"fax": "+505 8456 7546",
"habilitada": True,
"predeterminada": False,
"status": "activo",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
ENTIDAD3 = Transaccion(
registro="Entidad",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"entidad": "dulce",
"razon_social": "Mundo Sabor Sociedad Anonima",
"nombre_comercial": "<NAME>",
"id_fiscal": "J0310000000002",
"moneda": "NIO",
"tipo_entidad": "Sociedad",
"correo_electronico": "<EMAIL>",
"web": "cho<EMAIL>",
"telefono1": "+505 8456 6543",
"telefono2": "+505 8456 7543",
"fax": "+505 8456 7545",
"habilitada": False,
"predeterminada": False,
"status": "inactivo",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
ENTIDAD.ejecutar_transaccion(ENTIDAD1)
ENTIDAD.ejecutar_transaccion(ENTIDAD2)
ENTIDAD.ejecutar_transaccion(ENTIDAD3)
def _demo_unidades():
"""Unidades de Negocio de Demostración."""
from cacao_accounting.contabilidad.registros.unidad import RegistroUnidad
log.debug("Cargando unidades de negocio de prueba.")
UNIDAD = RegistroUnidad()
UNIDAD.ejecutar_transaccion(
Transaccion(
registro="Unidad",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"nombre": "<NAME>",
"entidad": "cacao",
"unidad": "matriz",
"status": "activo",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
UNIDAD.ejecutar_transaccion(
Transaccion(
registro="Unidad",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"nombre": "Movil",
"entidad": "cacao",
"unidad": "movil",
"status": "activo",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
UNIDAD.ejecutar_transaccion(
Transaccion(
registro="Unidad",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"nombre": "Masaya",
"entidad": "cacao",
"unidad": "masaya",
"status": "inactivo",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
def _catalogo():
from cacao_accounting.contabilidad.ctas import base, cargar_catalogos
from cacao_accounting.contabilidad.registros.cuenta import RegistroCuentaContable
log.debug("Cargando catalogos de cuentas.")
cargar_catalogos(base, "cacao")
cargar_catalogos(base, "dulce")
cargar_catalogos(base, "cafe")
CUENTA_CONTABLE = RegistroCuentaContable()
CUENTA_CONTABLE.ejecutar_transaccion(
Transaccion(
registro="Moneda",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"habilitada": True,
"entidad": "cacao",
"codigo": "6",
"nombre": "Cuenta Prueba Nivel 0",
"grupo": True,
"padre": None,
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
CUENTA_CONTABLE.ejecutar_transaccion(
Transaccion(
registro="Moneda",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"habilitada": True,
"entidad": "cacao",
"codigo": "6.1",
"nombre": "Cuenta Prueba Nivel 1",
"grupo": True,
"padre": "6",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
CUENTA_CONTABLE.ejecutar_transaccion(
Transaccion(
registro="Moneda",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"habilitada": True,
"entidad": "cacao",
"codigo": "6.1.1",
"nombre": "Cuenta Prueba Nivel 2",
"grupo": True,
"padre": "6.1",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
CUENTA_CONTABLE.ejecutar_transaccion(
Transaccion(
registro="Moneda",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"habilitada": True,
"entidad": "cacao",
"codigo": "6.1.1.1",
"nombre": "Cuenta Prueba Nivel 3",
"grupo": True,
"padre": "6.1.1",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
CUENTA_CONTABLE.ejecutar_transaccion(
Transaccion(
registro="Moneda",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"habilitada": True,
"entidad": "cacao",
"codigo": "6.1.1.1.1",
"nombre": "Cuenta Prueba Nivel 4",
"grupo": True,
"padre": "6.1.1.1",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
CUENTA_CONTABLE.ejecutar_transaccion(
Transaccion(
registro="Moneda",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"habilitada": True,
"entidad": "cacao",
"codigo": "6.1.1.1.1.1",
"nombre": "Cuenta Prueba Nivel 5",
"grupo": True,
"padre": "6.1.1.1.1",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
CUENTA_CONTABLE.ejecutar_transaccion(
Transaccion(
registro="Moneda",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"habilitada": True,
"entidad": "cacao",
"codigo": "6.1.1.1.1.1.1",
"nombre": "Cuenta Prueba Nivel 6",
"grupo": True,
"padre": "6.1.1.1.1.1",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
CUENTA_CONTABLE.ejecutar_transaccion(
Transaccion(
registro="Moneda",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"habilitada": True,
"entidad": "cacao",
"codigo": "6.1.1.1.1.1.1.1",
"nombre": "Cuenta Prueba Nivel 7",
"grupo": True,
"padre": "6.1.1.1.1.1.1",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
CUENTA_CONTABLE.ejecutar_transaccion(
Transaccion(
registro="Moneda",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"habilitada": True,
"entidad": "cacao",
"codigo": "6.1.1.1.1.1.1.1.1",
"nombre": "Cuenta Prueba Nivel 8",
"grupo": True,
"padre": "6.1.1.1.1.1.1.1",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
CUENTA_CONTABLE.ejecutar_transaccion(
Transaccion(
registro="Moneda",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"habilitada": True,
"entidad": "cacao",
"codigo": "6.1.1.1.1.1.1.1.1.1",
"nombre": "Cuenta Prueba Nivel 9",
"grupo": False,
"padre": "6.1.1.1.1.1.1.1.1",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
def _centros_de_costos():
from cacao_accounting.contabilidad.registros.ccosto import RegistroCentroCosto
CENTRO_DE_COSTO = RegistroCentroCosto()
log.debug("Cargando centros de costos.")
CENTRO_DE_COSTO.ejecutar_transaccion(
Transaccion(
registro="Centro de Costo",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"predeterminado": True,
"habilitada": True,
"entidad": "cacao",
"grupo": False,
"codigo": "A00000",
"nombre": "Centro Costos Predeterminado",
"status": "activo",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
CENTRO_DE_COSTO.ejecutar_transaccion(
Transaccion(
registro="Centro de Costo",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"predeterminado": True,
"habilitada": True,
"entidad": "cacao",
"grupo": True,
"codigo": "B00000",
"nombre": "Centro Costos Nivel 0",
"status": "activo",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
CENTRO_DE_COSTO.ejecutar_transaccion(
Transaccion(
registro="Centro de Costo",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"predeterminado": True,
"habilitada": True,
"entidad": "cacao",
"grupo": True,
"codigo": "B00001",
"nombre": "Centro Costos Nivel 1",
"status": "activo",
"padre": "B00000",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
CENTRO_DE_COSTO.ejecutar_transaccion(
Transaccion(
registro="Centro de Costo",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"predeterminado": True,
"habilitada": True,
"entidad": "cacao",
"grupo": True,
"codigo": "B00011",
"nombre": "Centro Costos Nivel 2",
"status": "activo",
"padre": "B00001",
},
datos_detalle=None,
relaciones=None,
relacion_id=None,
)
)
CENTRO_DE_COSTO.ejecutar_transaccion(
Transaccion(
registro="Centro de Costo",
tipo="principal",
estatus_actual=None,
nuevo_estatus=None,
uuid=None,
accion="crear",
datos={
"activa": True,
"predeterminado": True,
"habilitada": True,
"entidad": "cacao",
"grupo": True,
"codigo": "B00111",
"nombre": "Centro | |
else:
hc = convert(h["value"], h["unit"], "m")
wc = convert(w["value"], w["unit"], "kg")
if isinstance(hc, Left):
return hc
elif isinstance(wc, Left):
return wc
bmi = wc.value / (hc.value * hc.value)
return convert(bmi, "kg/m^2", unit).map(lambda bmic: {
"variableValue": {
"value": bmic,
"unit": unit
},
"certitutde": min(height["certitude"], weight["certitude"]),
"how": {
"computed_from": ["height", "weight"],
"height": height['how'],
"weight": weight['how']
}
})
def oxygen_saturation(records, unit, timestamp):
return query_records_closest(records, [
{
"system":"http://loinc.org",
"code":"LP21258-6",
"is_regex": False
}
], unit, timestamp, "oxygen saturation", "Observation")
def address(patient, unit, timestamp):
if patient == None:
return Right({
"variableValue": {
"value": None
},
"certitude": 0,
"how": "record not found"
})
else:
address = patient.get("address")
if address is None:
return Right({
"variableValue": {
"value": None
},
"certitude": 0,
"how": "address not set"
})
else:
# use home type address if available, otherwise, just use the first address
used_addr_dict = None
for addr in address:
if addr['use'] == 'home':
used_addr_dict = addr
break
if not used_addr_dict:
used_addr_dict = address[0]
used_addr_str = '{line}, {city}, {state} {pc}, {country}'.format(line=','.join(used_addr_dict['line']),
city=used_addr_dict['city'],
state=used_addr_dict['state'],
pc=used_addr_dict['postalCode'],
country=used_addr_dict['country'])
return Right({
"variableValue": {
"value": used_addr_str
},
"certitude": 2,
"how": f"FHIR resource 'Patient' field>'address' = {used_addr_str}"
})
def calculate_age2(born, timestamp):
today = timestamp
return Right(today.year - born.year - ((today.month, today.day) < (born.month, born.day)))
def age(patient, unit, timestamp):
if unit is not None and unit != "year":
return Left((f"unsupported unit {unit}", 400))
if patient == None:
return Right({
"variableValue": {
"value": None
},
"certitude": 0,
"how": "record not found"
})
else:
if "birthDate" in patient:
birth_date = patient["birthDate"]
date_of_birth = strtodate2(birth_date)
today = timestamp.strftime("%Y-%m-%d")
mage = calculate_age2(date_of_birth, timestamp)
return mage.map(lambda age: {
"variableValue": {
"value": age,
"unit": "year"
},
"certitude": 2,
"how": {
"request_timestamp": today,
"computed_from": [
"request_timestamp", "birthDate"
],
"birthDate": {
"computed_from": {
"resourceType": "Patient",
"field": "birthDate"
},
"value": birth_date
}
}
})
else:
return Right({
"variableValue": {
"value": None
},
"certitude": 0,
"how": "birthDate not set"
})
def sex(patient, unit, timestamp):
if patient == None:
return Right({
"variableValue": {
"value": None
},
"certitude": 0,
"how": "record not found"
})
else:
gender = patient.get("gender")
if gender is None:
return Right({
"variableValue": {
"value": None
},
"certitude": 0,
"how": "gender not set"
})
else:
return Right({
"variableValue": {
"value": gender
},
"certitude": 2,
"how": f"FHIR resource 'Patient' field>'gender' = {gender}"
})
def demographic_extension(url):
def func(patient, unit, timestamp):
if patient == None:
return Right({
"variableValue": {
"value": None
},
"certitude": 0,
"how": "record not found"
})
else:
extension = patient.get("extension")
if extension is None:
return Right({
"variableValue": {
"value": None
},
"certitude": 0,
"how": "extension not found"
})
else:
filtered = filter(lambda x: x["url"]==url, extension)
if len(filtered) == 0:
return Right({
"variableValue": {
"value": None
},
"certitude": 0,
"how": f"extension not found url {url}"
})
else:
certitude = 2
value = []
calculation = {
"from": {
"extension": {
"url": url
}
}
}
hasValueCodeableConcept = True
for a in filtered:
valueCodeableConcept = a.get("valueCodeableConcept")
if valueCodeableConcept is None:
certitude = 1
calculation += " valueCodeableConcept not found"
else:
hasValueCodeableConcept = True
value.append(valueCodeableConcept)
if len(value) == 0:
certitude = 0
elif not hasValueCodeableConcept:
calculation += " on some extension"
return Right({
"variableValue": {
"value": value
},
"certitude": certitude,
"how": calculation
})
return func
race = demographic_extension("http://hl7.org/fhir/StructureDefinition/us-core-race")
ethnicity = demographic_extension("http://hl7.org/fhir/StructureDefinition/us-core-ethnicity")
def fever(records, unit, timestamp):
return query_records_closest(records, [
{
"system": "http://loinc.org",
"code": "45701-0",
"is_regex": False
}
], unit, timestamp, "fever", "Condition")
def date_of_fever_onset(records, unit, timestamp):
return query_records_closest(records, [
{
"system": "http://loinc.org",
"code": "LP212175-6",
"is_regex": False
}
], unit, timestamp, "date of fever onset", "Condition")
def cough(records, unit, timestamp):
return query_records_closest(records, [
{
"system": "http://loinc.org",
"code": "64145-6",
"is_regex": False
}
], unit, timestamp, "cough", "Condition")
def date_of_cough_onset(records, unit, timestamp):
return query_records_closest(records, [
{
"system": "http://loinc.org",
"code": "85932-2",
"is_regex": False
}
], unit, timestamp, "date of cough onset", "Condition")
def shortness_of_breath(records, unit, timestamp):
return query_records_closest(records, [
{
"system": "http://loinc.org",
"code": "54564-0",
"is_regex": False
}
], unit, timestamp, "shortness of breath", "Condition")
def autoimmune_disease(records, unit, timestamp):
return query_records_closest(records, [
{
"system": "http://loinc.org",
"code": "LP128504-0",
"is_regex": False
}
], unit, timestamp, "autoimmune disease", "Condition")
def pulmonary_disease(records, unit, timestamp):
return query_records_closest(records, [
{
"system": "http://loinc.org",
"code": "54542-6",
"is_regex": False
}
], unit, timestamp, "pulmonary disease", "Condition")
def cardiovascular_disease(records, unit, timestamp):
return query_records_closest(records, [
{
"system": "http://loinc.org",
"code": "LP172921-1",
"is_regex": False
}
], unit, timestamp, "cardiovascular disease", "Condition")
def serum_creatinine(records, unit, timestamp):
return query_records_closest(records, [
{
"system":"http://loinc.org",
"code":"2160-0",
"is_regex": False
}
], unit, timestamp, "serum creatinine", "Observation")
def pregnancy(records, unit, timestamp):
return query_records_closest(records, [
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"^Z34\\.",
"is_regex": True
}
], unit, timestamp, "pregnancy", "Condition")
bleeding_patterns = [
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"I60\\..*",
"is_regex":True
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"I61\\..*",
"is_regex":True
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"I62\\..*",
"is_regex":True
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"G95.19",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"T85.830",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H11.3",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H31.3",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H43.1",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H59.1",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H59.3",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"I85.01",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K22.11",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H22.6",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H25.0",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H25.2",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H25.4",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H25.6",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H26.0",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H26.2",
"is_regex":False
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H26.4",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H26.6",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H27.0",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H27.2",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H27.4",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H27.6",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H28.0",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H28.2",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H28.4",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"H28.6",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K29.01",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K31.811",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K92.0",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K55.21",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K57.01",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K57.21",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K57.31",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K57.33",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K57.41",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K57.51",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K57.53",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K57.81",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K57.91",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K57.93",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K62.5",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K92.1",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K92.2",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"K66.1",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"M25.0",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"I31.2",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"R58\\..*",
"is_regex":True,
}
]
def bleeding(records, unit, timestamp):
return query_records_closest(records, bleeding_patterns, None, timestamp, "bleeding", "Condition")
def bleeding2(records, unit, start, end):
return query_records_interval(records, bleeding_patterns, None, start, end, "bleeding", "Condition")
def kidney_dysfunction(records, unit, timestamp):
return query_records_closest(records, [
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N00\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N10\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N17\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N14\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N14.1",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N14.2",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"T36.5X5",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"B52.0",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"D59.3",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"E10.2",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"E11.2",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"E13.2",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"I12\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"I13\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"I15.1",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"I15.2",
"is_regex":False,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N01\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N02\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N03\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N04\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N05\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N06\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N07\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N08\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N11\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N13\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N15\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N16\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N18\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N19\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N25\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N26\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N27\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N28\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N29\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"Q60\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"Q61\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"Q62\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"Q63\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"Z49\\..*",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"Z99.2",
"is_regex":True,
},
{
"system":"http://hl7.org/fhir/sid/icd-10-cm",
"code":"N12\\..*",
"is_regex":True,
}
], unit, timestamp, "kidney dysfunction", "Condition")
def DOAC2(records, start, end):
return query_records_interval(records, doac_event_code_maps, None, start, end, "DOAC", "MedicationRequest")
def DOAC_Interventions(records, start, end):#for testing since records cannot find this code
| |
57, 45, 45, 45, 45, 45, 45), False, 0 , 6),
46 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 54, 0 , 0 , 0 , 0 , 0 ), True , 18, 1),
47 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 54, 0 , 0 , 0 , 0 ), True , 18, 1),
48 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 16, 1),
49 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 17, 1),
50 : ((0 , 0 , 0 , 0 , 0 , 0 , 55, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 30, 1),
51 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 19, 1),
52 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 18, 1),
53 : ((45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 56, 45, 45, 45, 56, 45, 45, 45, 45, 45, 45), False, 0 , 6),
54 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 23, 1),
55 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 30, 1),
56 : ((45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 53, 45, 45, 45, 57, 45, 45, 45, 45, 45, 45), False, 0 , 6),
57 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 21, 1),
58 : ((59, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 58, 59, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 0 , 7),
59 : ((59, 59, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 59, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 14, 1),
60 : ((60, 60, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 60, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 15, 1),
61 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 62, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), False, 0 , 9),
62 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 103, 1),
63 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 64, 0 , 0 , 0 ), True , 107, 1),
64 : ((0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 104, 1),
65 : ((65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 7 , 65, 67, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65), False, 0 , 5),
66 : ((66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 7 , 69, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66), False, 0 , 5),
67 : ((65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 70, 65, 70, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65), False, 0 , 5),
69 : ((66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 71, 71, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66), False, 0 , 5),
70 : ((65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 7 , 65, 67, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65), False, 0 , 5),
71 : ((66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 7 , 69, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66), False, 0 , 5),
72 : ((-11, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), True , 4 , 1),
-1 : ((0 , 0 , | |
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import functools
import os
import json
import math
from collections import defaultdict
import random
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from sg2im.data import imagenet_deprocess_batch
from sg2im.data.coco import CocoSceneGraphDataset, coco_collate_fn
from sg2im.data.vg import VgSceneGraphDataset, vg_collate_fn
from sg2im.discriminators import PatchDiscriminator, AcCropDiscriminator
from sg2im.losses import get_gan_losses
from sg2im.metrics import jaccard
from sg2im.model import Layout2ImModel
from sg2im.utils import int_tuple, float_tuple, str_tuple
from sg2im.utils import timeit, bool_flag, LossManager
from models.layout_model import *
from tqdm import tqdm
torch.backends.cudnn.benchmark = True
VG_DIR = os.path.expanduser('datasets/vg')
COCO_DIR = os.path.expanduser('/mnt/xfs1/hassan2/projectdata/data/coco/val2017/')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='coco', choices=['vg', 'coco'])
# Optimization hyperparameters
parser.add_argument('--batch_size', default=50, type=int)
parser.add_argument('--num_iterations', default=1000000, type=int)
parser.add_argument('--learning_rate', default=1e-4, type=float)
# Switch the generator to eval mode after this many iterations
parser.add_argument('--eval_mode_after', default=100000, type=int)
# Dataset options common to both VG and COCO
parser.add_argument('--image_size', default='64,64', type=int_tuple)
parser.add_argument('--num_train_samples', default=None, type=int)
parser.add_argument('--num_val_samples', default=1024, type=int)
parser.add_argument('--shuffle_val', default=True, type=bool_flag)
parser.add_argument('--loader_num_workers', default=4, type=int)
parser.add_argument('--include_relationships', default=True, type=bool_flag)
# VG-specific options
parser.add_argument('--vg_image_dir', default=os.path.join(VG_DIR, 'images'))
parser.add_argument('--train_h5', default=os.path.join(VG_DIR, 'train.h5'))
parser.add_argument('--val_h5', default=os.path.join(VG_DIR, 'val.h5'))
parser.add_argument('--vocab_json', default=os.path.join(VG_DIR, 'vocab.json'))
parser.add_argument('--max_objects_per_image', default=8, type=int)
parser.add_argument('--vg_use_orphaned_objects', default=True, type=bool_flag)
# COCO-specific options
parser.add_argument('--coco_train_image_dir',
default=os.path.join(COCO_DIR, '/mnt/xfs1/hassan2/projectdata/data/coco/train2017/'))
parser.add_argument('--coco_val_image_dir',
default=os.path.join(COCO_DIR, '/mnt/xfs1/hassan2/projectdata/data/coco/val2017/'))
parser.add_argument('--coco_train_instances_json',
default=os.path.join(COCO_DIR, '/mnt/xfs1/hassan2/projectdata/data/coco/annotations/instances_train2017.json'))
parser.add_argument('--coco_train_stuff_json',
default=os.path.join(COCO_DIR, '/mnt/xfs1/hassan2/projectdata/data/coco/annotations/stuff_train2017.json'))
parser.add_argument('--coco_val_instances_json',
default=os.path.join(COCO_DIR, '/mnt/xfs1/hassan2/projectdata/data/coco/annotations/instances_val2017.json'))
parser.add_argument('--coco_val_stuff_json',
default=os.path.join(COCO_DIR, '/mnt/xfs1/hassan2/projectdata/data/coco/annotations/stuff_val2017.json'))
parser.add_argument('--instance_whitelist', default=None, type=str_tuple)
parser.add_argument('--stuff_whitelist', default=None, type=str_tuple)
parser.add_argument('--coco_include_other', default=False, type=bool_flag)
parser.add_argument('--min_object_size', default=0.02, type=float)
parser.add_argument('--min_objects_per_image', default=3, type=int)
parser.add_argument('--coco_stuff_only', default=True, type=bool_flag)
# Generator options
parser.add_argument('--mask_size', default=16, type=int) # Set this to 0 to use no masks
parser.add_argument('--embedding_dim', default=128, type=int)
parser.add_argument('--gconv_dim', default=128, type=int)
parser.add_argument('--gconv_hidden_dim', default=512, type=int)
parser.add_argument('--gconv_num_layers', default=5, type=int)
parser.add_argument('--mlp_normalization', default='none', type=str)
parser.add_argument('--refinement_network_dims', default='1024,512,256,128,64', type=int_tuple)
parser.add_argument('--normalization', default='batch')
parser.add_argument('--activation', default='leakyrelu-0.2')
parser.add_argument('--layout_noise_dim', default=32, type=int)
parser.add_argument('--use_boxes_pred_after', default=-1, type=int)
# Generator losses
parser.add_argument('--mask_loss_weight', default=0, type=float)
parser.add_argument('--l1_pixel_loss_weight', default=1.0, type=float)
parser.add_argument('--bbox_pred_loss_weight', default=10, type=float)
parser.add_argument('--predicate_pred_loss_weight', default=0, type=float) # DEPRECATED
# Generic discriminator options
parser.add_argument('--discriminator_loss_weight', default=0.01, type=float)
parser.add_argument('--gan_loss_type', default='gan')
parser.add_argument('--d_clip', default=None, type=float)
parser.add_argument('--d_normalization', default='batch')
parser.add_argument('--d_padding', default='valid')
parser.add_argument('--d_activation', default='leakyrelu-0.2')
# Object discriminator
parser.add_argument('--d_obj_arch',
default='C4-64-2,C4-128-2,C4-256-2')
parser.add_argument('--crop_size', default=32, type=int)
parser.add_argument('--d_obj_weight', default=1.0, type=float) # multiplied by d_loss_weight
parser.add_argument('--ac_loss_weight', default=0.1, type=float)
# Image discriminator
parser.add_argument('--d_img_arch',
default='C4-64-2,C4-128-2,C4-256-2')
parser.add_argument('--d_img_weight', default=1.0, type=float) # multiplied by d_loss_weight
# Output options
parser.add_argument('--print_every', default=10, type=int)
parser.add_argument('--timing', default=False, type=bool_flag)
parser.add_argument('--checkpoint_every', default=10000, type=int)
parser.add_argument('--output_dir', default=os.getcwd())
parser.add_argument('--checkpoint_name', default='checkpoint')
parser.add_argument('--checkpoint_start_from', default=None)
parser.add_argument('--restore_from_checkpoint', default=False, type=bool_flag)
def add_loss(total_loss, curr_loss, loss_dict, loss_name, weight=1):
curr_loss = curr_loss * weight
loss_dict[loss_name] = curr_loss.item()
if total_loss is not None:
total_loss += curr_loss
else:
total_loss = curr_loss
return total_loss
def check_args(args):
H, W = args.image_size
for _ in args.refinement_network_dims[1:]:
H = H // 2
if H == 0:
raise ValueError("Too many layers in refinement network")
def build_model(args, vocab):
if args.checkpoint_start_from is not None:
checkpoint = torch.load(args.checkpoint_start_from)
kwargs = checkpoint['model_kwargs']
#model = Sg2ImModel(**kwargs)
raw_state_dict = checkpoint['model_state']
state_dict = {}
for k, v in raw_state_dict.items():
if k.startswith('module.'):
k = k[7:]
state_dict[k] = v
model.load_state_dict(state_dict)
else:
kwargs = {
'vocab': vocab,
'image_size': args.image_size,
'embedding_dim': args.embedding_dim,
'gconv_dim': args.gconv_dim,
'gconv_hidden_dim': args.gconv_hidden_dim,
'gconv_num_layers': args.gconv_num_layers,
'mlp_normalization': args.mlp_normalization,
'refinement_dims': args.refinement_network_dims,
'normalization': args.normalization,
'activation': args.activation,
'mask_size': args.mask_size,
'layout_noise_dim': args.layout_noise_dim,
}
model = Layout2ImModel(**kwargs)
return model, kwargs
def build_obj_discriminator(args, vocab):
discriminator = None
d_kwargs = {}
d_weight = args.discriminator_loss_weight
d_obj_weight = args.d_obj_weight
if d_weight == 0 or d_obj_weight == 0:
return discriminator, d_kwargs
d_kwargs = {
'vocab': vocab,
'arch': args.d_obj_arch,
'normalization': args.d_normalization,
'activation': args.d_activation,
'padding': args.d_padding,
'object_size': args.crop_size,
}
discriminator = AcCropDiscriminator(**d_kwargs)
return discriminator, d_kwargs
def build_img_discriminator(args, vocab):
discriminator = None
d_kwargs = {}
d_weight = args.discriminator_loss_weight
d_img_weight = args.d_img_weight
if d_weight == 0 or d_img_weight == 0:
return discriminator, d_kwargs
d_kwargs = {
'arch': args.d_img_arch,
'normalization': args.d_normalization,
'activation': args.d_activation,
'padding': args.d_padding,
}
discriminator = PatchDiscriminator(**d_kwargs)
return discriminator, d_kwargs
def build_coco_dsets(args):
dset_kwargs = {
'image_dir': args.coco_train_image_dir,
'instances_json': args.coco_train_instances_json,
'stuff_json': args.coco_train_stuff_json,
'stuff_only': args.coco_stuff_only,
'image_size': args.image_size,
'mask_size': args.mask_size,
'max_samples': args.num_train_samples,
'min_object_size': args.min_object_size,
'min_objects_per_image': args.min_objects_per_image,
'instance_whitelist': args.instance_whitelist,
'stuff_whitelist': args.stuff_whitelist,
'include_other': args.coco_include_other,
'include_relationships': args.include_relationships,
}
train_dset = CocoSceneGraphDataset(**dset_kwargs)
num_objs = train_dset.total_objects()
num_imgs = len(train_dset)
print('Training dataset has %d images and %d objects' % (num_imgs, num_objs))
print('(%.2f objects per image)' % (float(num_objs) / num_imgs))
dset_kwargs['image_dir'] = args.coco_val_image_dir
dset_kwargs['instances_json'] = args.coco_val_instances_json
dset_kwargs['stuff_json'] = args.coco_val_stuff_json
dset_kwargs['max_samples'] = args.num_val_samples
val_dset = CocoSceneGraphDataset(**dset_kwargs)
assert train_dset.vocab == val_dset.vocab
vocab = json.loads(json.dumps(train_dset.vocab))
return vocab, train_dset, val_dset
def build_vg_dsets(args):
with open(args.vocab_json, 'r') as f:
vocab = json.load(f)
dset_kwargs = {
'vocab': vocab,
'h5_path': args.train_h5,
'image_dir': args.vg_image_dir,
'image_size': args.image_size,
'max_samples': args.num_train_samples,
'max_objects': args.max_objects_per_image,
'use_orphaned_objects': args.vg_use_orphaned_objects,
'include_relationships': args.include_relationships,
}
train_dset = VgSceneGraphDataset(**dset_kwargs)
iter_per_epoch = len(train_dset) // args.batch_size
print('There are %d iterations per epoch' % iter_per_epoch)
dset_kwargs['h5_path'] = args.val_h5
del dset_kwargs['max_samples']
val_dset = VgSceneGraphDataset(**dset_kwargs)
return vocab, train_dset, val_dset
def build_loaders(args):
if args.dataset == 'vg':
vocab, train_dset, val_dset = build_vg_dsets(args)
collate_fn = vg_collate_fn
elif args.dataset == 'coco':
vocab, train_dset, val_dset = build_coco_dsets(args)
collate_fn = coco_collate_fn
loader_kwargs = {
'batch_size': args.batch_size,
'num_workers': args.loader_num_workers,
'shuffle': True,
'collate_fn': collate_fn,
}
train_loader = DataLoader(train_dset, **loader_kwargs)
loader_kwargs['shuffle'] = args.shuffle_val
val_loader = DataLoader(val_dset, **loader_kwargs)
return vocab, train_loader, val_loader
def check_model(args, t, loader, model):
return
float_dtype = torch.cuda.FloatTensor
long_dtype = torch.cuda.LongTensor
num_samples = 0
all_losses = defaultdict(list)
total_iou = 0
total_boxes = 0
with torch.no_grad():
for batch in loader:
batch = [tensor.cuda() for tensor in batch]
masks = None
if len(batch) == 6:
imgs, objs, boxes, triples, obj_to_img, triple_to_img = batch
elif len(batch) == 7:
imgs, objs, boxes, masks, triples, obj_to_img, triple_to_img = batch
# Run the model as it has been run during training
model_masks = masks
model_out = model(objs, triples, obj_to_img, boxes_gt=boxes, masks_gt=model_masks)
imgs_pred, boxes_pred, masks_pred, predicate_scores = model_out
skip_pixel_loss = False
total_loss, losses = calculate_model_losses(
args, skip_pixel_loss, model, imgs, imgs_pred,
boxes, boxes_pred, masks, masks_pred,
predicates, predicate_scores)
total_iou += jaccard(boxes_pred, boxes)
total_boxes += boxes_pred.size(0)
for loss_name, loss_val in losses.items():
all_losses[loss_name].append(loss_val)
num_samples += imgs.size(0)
if num_samples >= args.num_val_samples:
break
samples = {}
samples['gt_img'] = imgs
model_out = model(objs, triples, obj_to_img, boxes_gt=boxes, masks_gt=masks)
samples['gt_box_gt_mask'] = model_out[0]
model_out = model(objs, triples, obj_to_img, boxes_gt=boxes)
samples['gt_box_pred_mask'] = model_out[0]
model_out = model(objs, triples, obj_to_img)
samples['pred_box_pred_mask'] = model_out[0]
for k, v in samples.items():
samples[k] = imagenet_deprocess_batch(v)
mean_losses = {k: np.mean(v) for k, v in all_losses.items()}
avg_iou = total_iou / total_boxes
masks_to_store = masks
if masks_to_store is not None:
masks_to_store = masks_to_store.data.cpu().clone()
masks_pred_to_store = masks_pred
if masks_pred_to_store is not None:
masks_pred_to_store = masks_pred_to_store.data.cpu().clone()
batch_data = {
'objs': objs.detach().cpu().clone(),
'boxes_gt': boxes.detach().cpu().clone(),
'masks_gt': masks_to_store,
'triples': triples.detach().cpu().clone(),
'obj_to_img': obj_to_img.detach().cpu().clone(),
'triple_to_img': triple_to_img.detach().cpu().clone(),
'boxes_pred': boxes_pred.detach().cpu().clone(),
'masks_pred': masks_pred_to_store
}
out = [mean_losses, samples, batch_data, avg_iou]
return tuple(out)
def calculate_model_losses(args, model, img, img_pred,
bbox, bbox_pred, logit_boxes,generated_boxes,original_combined):
total_loss = torch.zeros(1).to(img)
losses = {}
l1_pixel_weight = args.l1_pixel_loss_weight
l1_pixel_loss = F.l1_loss(img_pred, img)
total_loss = add_loss(total_loss, l1_pixel_loss, losses, 'L1_pixel_loss',
l1_pixel_weight)
loss_bbox = F.mse_loss(bbox_pred, bbox)
total_loss = add_loss(total_loss, loss_bbox, losses, 'bbox_pred',
args.bbox_pred_loss_weight)
orig_labels=torch.argmax(original_combined[:,:,4:],dim=2).view(-1)
#print('logits:',logit_boxes.shape)
loss_classification=nn.CrossEntropyLoss()
loss_classify = loss_classification(logit_boxes[:,:,4:].view(-1,184), orig_labels)
total_loss = add_loss(total_loss, loss_classify, losses, 'classification_loss')
mse_loss = F.mse_loss(generated_boxes[:,:,:4], original_combined[:,:,:4])
total_loss = add_loss(total_loss, mse_loss, losses, 'mse_loss',
args.bbox_pred_loss_weight)
return total_loss, losses
def main(args):
print(args)
check_args(args)
float_dtype = torch.cuda.FloatTensor
long_dtype = torch.cuda.LongTensor
vocab, train_loader, val_loader = build_loaders(args)
model, model_kwargs = build_model(args, vocab)
model.type(float_dtype)
model=model.cuda()
layoutgen = LayoutGenerator(args.batch_size,args.max_objects_per_image+1,184).cuda()
optimizer_params = list(model.parameters()) + list(layoutgen.parameters())
optimizer = torch.optim.Adam(params=optimizer_params, lr=args.learning_rate)
obj_discriminator, d_obj_kwargs = build_obj_discriminator(args, vocab)
img_discriminator, d_img_kwargs = build_img_discriminator(args, vocab)
obj_discriminator= obj_discriminator.cuda()
img_discriminator=img_discriminator.cuda()
layout_discriminator = LayoutDiscriminator(args.batch_size,args.max_objects_per_image+1,184,64,64).cuda()
gan_g_loss, gan_d_loss = get_gan_losses(args.gan_loss_type)
obj_discriminator.type(float_dtype)
obj_discriminator.train()
optimizer_d_obj = torch.optim.Adam(obj_discriminator.parameters(),lr=args.learning_rate)
img_discriminator.type(float_dtype)
img_discriminator.train()
optimizer_d_img = torch.optim.Adam(img_discriminator.parameters(),lr=args.learning_rate)
optimizer_d_layout=torch.optim.Adam(params=layout_discriminator.parameters(), lr = args.learning_rate)
model_path='stats/epoch_2_batch_399_with_model.pt'
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint['model_state'])
layoutgen.load_state_dict(checkpoint['layout_gen'])
obj_discriminator.load_state_dict(checkpoint['d_obj_state'])
img_discriminator.load_state_dict(checkpoint['d_img_state'])
layout_discriminator.load_state_dict(checkpoint['d_layout_state'])
optimizer_d_obj.load_state_dict(checkpoint['d_obj_optim_state'])
optimizer_d_img.load_state_dict(checkpoint['d_img_optim_state'])
optimizer_d_layout.load_state_dict(checkpoint['d_layout_optim_state'])
optimizer.load_state_dict(checkpoint['optim_state'])
# checkpoint = torch.load('sg2im-models/coco64.pt')
# model.load_state_dict(checkpoint['model_state'])
# 0/0
# 'model_state':model.state_dict(),
# 'layout_gen':layoutgen.state_dict(),
# 'd_obj_state': obj_discriminator.state_dict(),
# 'd_img_state': img_discriminator.state_dict(),
# 'd_layout_state':layout_discriminator.state_dict(),
# 'd_obj_optim_state': optimizer_d_obj.state_dict(),
# 'd_img_optim_state': optimizer_d_img.state_dict(),
# 'd_layout_optim_state':optimizer_d_layout.state_dict(),
# 'optim_state': optimizer.state_dict(),
# restore_path = None
# if args.restore_from_checkpoint:
# restore_path = 'stats/%s_with_model.pt' % args.checkpoint_name
# restore_path = os.path.join(args.output_dir, restore_path)
# if restore_path is not None and os.path.isfile(restore_path):
# print('Restoring from checkpoint:')
# print(restore_path)
# checkpoint = torch.load(restore_path)
# model.load_state_dict(checkpoint['model_state'])
# optimizer.load_state_dict(checkpoint['optim_state'])
# obj_discriminator.load_state_dict(checkpoint['d_obj_state'])
# optimizer_d_obj.load_state_dict(checkpoint['d_obj_optim_state'])
# img_discriminator.load_state_dict(checkpoint['d_img_state'])
# optimizer_d_img.load_state_dict(checkpoint['d_img_optim_state'])
# t = checkpoint['counters']['t']
# if 0 <= args.eval_mode_after <= t:
# model.eval()
# else:
# model.train()
# epoch = checkpoint['counters']['epoch']
# else:
# t, epoch = 0, 0
# checkpoint = {
# 'args': args.__dict__,
# 'vocab': vocab,
# 'model_kwargs': model_kwargs,
# 'd_obj_kwargs': d_obj_kwargs,
# 'd_img_kwargs': d_img_kwargs,
# 'losses_ts': [],
# 'losses': defaultdict(list),
# 'd_losses': defaultdict(list),
# 'checkpoint_ts': [],
# 'train_batch_data': [],
# 'train_samples': [],
# 'train_iou': [],
| |
<reponame>lujiwei/impala
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This test suite validates the scanners by running queries against ALL file formats and
# their permutations (e.g. compression codec/compression type). This works by exhaustively
# generating the table format test vectors for this specific test suite. This way, other
# tests can run with the normal exploration strategy and the overall test runtime doesn't
# explode.
import os
import pytest
import random
import re
import tempfile
from copy import deepcopy
from parquet.ttypes import ConvertedType
from subprocess import check_call
from testdata.common import widetable
from tests.common.impala_test_suite import ImpalaTestSuite, LOG
from tests.common.skip import (
SkipIf,
SkipIfS3,
SkipIfGCS,
SkipIfABFS,
SkipIfADLS,
SkipIfEC,
SkipIfHive2,
SkipIfHive3,
SkipIfIsilon,
SkipIfLocal,
SkipIfNotHdfsMinicluster)
from tests.common.test_dimensions import (
create_single_exec_option_dimension,
create_exec_option_dimension,
create_uncompressed_text_dimension)
from tests.common.file_utils import (
create_table_from_parquet,
create_table_and_copy_files)
from tests.common.test_result_verifier import (
QueryTestResult,
parse_result_rows)
from tests.common.test_vector import ImpalaTestDimension
from tests.util.filesystem_utils import IS_HDFS, WAREHOUSE, get_fs_path
from tests.util.hdfs_util import NAMENODE
from tests.util.get_parquet_metadata import get_parquet_metadata
from tests.util.parse_util import get_bytes_summary_stats_counter
from tests.util.test_file_parser import QueryTestSectionReader
# Test scanners with denial of reservations at varying frequency. This will affect the
# number of scanner threads that can be spun up.
DEBUG_ACTION_DIMS = [None,
'-1:OPEN:SET_DENY_RESERVATION_PROBABILITY@0.5',
'-1:OPEN:SET_DENY_RESERVATION_PROBABILITY@1.0']
# Trigger injected soft limit failures when scanner threads check memory limit.
DEBUG_ACTION_DIMS.append('HDFS_SCANNER_THREAD_CHECK_SOFT_MEM_LIMIT:FAIL@0.5')
MT_DOP_VALUES = [0, 1, 4]
class TestScannersAllTableFormats(ImpalaTestSuite):
BATCH_SIZES = [0, 1, 16]
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestScannersAllTableFormats, cls).add_test_dimensions()
if cls.exploration_strategy() == 'core':
# The purpose of this test is to get some base coverage of all the file formats.
# Even in 'core', we'll test each format by using the pairwise strategy.
cls.ImpalaTestMatrix.add_dimension(cls.create_table_info_dimension('pairwise'))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('batch_size', *TestScannersAllTableFormats.BATCH_SIZES))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('debug_action', *DEBUG_ACTION_DIMS))
def test_scanners(self, vector):
new_vector = deepcopy(vector)
# Copy over test dimensions to the matching query options.
new_vector.get_value('exec_option')['batch_size'] = vector.get_value('batch_size')
new_vector.get_value('exec_option')['debug_action'] = vector.get_value('debug_action')
self.run_test_case('QueryTest/scanners', new_vector)
def test_many_nulls(self, vector):
if vector.get_value('table_format').file_format == 'hbase':
# manynulls table not loaded for HBase
pytest.skip()
# Copy over test dimensions to the matching query options.
new_vector = deepcopy(vector)
new_vector.get_value('exec_option')['batch_size'] = vector.get_value('batch_size')
new_vector.get_value('exec_option')['debug_action'] = vector.get_value('debug_action')
self.run_test_case('QueryTest/scanners-many-nulls', new_vector)
def test_hdfs_scanner_profile(self, vector):
if vector.get_value('table_format').file_format in ('kudu', 'hbase') or \
vector.get_value('exec_option')['num_nodes'] != 0:
pytest.skip()
self.run_test_case('QueryTest/hdfs_scanner_profile', vector)
def test_string_escaping(self, vector):
"""Test handling of string escape sequences."""
if vector.get_value('table_format').file_format == 'rc':
# IMPALA-7778: RCFile scanner incorrectly ignores escapes for now.
self.run_test_case('QueryTest/string-escaping-rcfile-bug', vector)
else:
self.run_test_case('QueryTest/string-escaping', vector)
# Test all the scanners with a simple limit clause. The limit clause triggers
# cancellation in the scanner code paths.
class TestScannersAllTableFormatsWithLimit(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestScannersAllTableFormatsWithLimit, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('mt_dop', *MT_DOP_VALUES))
def test_limit(self, vector):
vector.get_value('exec_option')['abort_on_error'] = 1
self._test_limit(vector)
# IMPALA-3337: when continuing on error, the error log should not show errors
# (e.g. "Cancelled").
vector.get_value('exec_option')['abort_on_error'] = 0
self._test_limit(vector)
def _test_limit(self, vector):
# Use a small batch size so changing the limit affects the timing of cancellation
vector.get_value('exec_option')['batch_size'] = 100
iterations = 50
query_template = "select * from alltypes limit %s"
for i in range(1, iterations):
# Vary the limit to vary the timing of cancellation
limit = (i * 100) % 1001 + 1
query = query_template % limit
result = self.execute_query(query, vector.get_value('exec_option'),
table_format=vector.get_value('table_format'))
assert len(result.data) == limit
# IMPALA-3337: The error log should be empty.
assert not result.log
class TestScannersMixedTableFormats(ImpalaTestSuite):
BATCH_SIZES = [0, 1, 16]
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestScannersMixedTableFormats, cls).add_test_dimensions()
# Only run with a single dimension format, since the table includes mixed formats.
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('batch_size', *TestScannersAllTableFormats.BATCH_SIZES))
cls.ImpalaTestMatrix.add_dimension(
ImpalaTestDimension('debug_action', *DEBUG_ACTION_DIMS))
cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('mt_dop', *MT_DOP_VALUES))
def test_mixed_format(self, vector):
new_vector = deepcopy(vector)
new_vector.get_value('exec_option')['batch_size'] = vector.get_value('batch_size')
new_vector.get_value('exec_option')['debug_action'] = vector.get_value('debug_action')
self.run_test_case('QueryTest/mixed-format', new_vector)
# Test case to verify the scanners work properly when the table metadata (specifically the
# number of columns in the table) does not match the number of columns in the data file.
class TestUnmatchedSchema(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestUnmatchedSchema, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
# Avro has a more advanced schema evolution process which is covered in more depth
# in the test_avro_schema_evolution test suite.
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format != 'avro')
def _create_test_table(self, vector, unique_database):
"""
Creates the test table
Cannot be done in a setup method because we need access to the current test vector
"""
file_format = vector.get_value('table_format').file_format
if file_format == 'orc':
# TODO: Enable this test on non-HDFS filesystems once IMPALA-9365 is resolved.
if not IS_HDFS: pytest.skip()
self.run_stmt_in_hive(
"create table {0}.jointbl_test like functional.jointbl "
"stored as orc".format(unique_database))
self.run_stmt_in_hive(
'insert into {0}.jointbl_test '
'select * from functional_orc_def.jointbl'.format(unique_database))
self.execute_query_using_client(self.client,
'invalidate metadata {0}.jointbl_test'.format(unique_database),
vector)
else:
self.execute_query_using_client(self.client,
"create external table {0}.jointbl_test like jointbl".format(
unique_database), vector)
# Update the location of the new table to point the same location as the old table
location = self._get_table_location('jointbl', vector)
self.execute_query_using_client(self.client,
"alter table {0}.jointbl_test set location '{1}'".format(
unique_database, location), vector)
def test_unmatched_schema(self, vector, unique_database):
if vector.get_value('table_format').file_format == 'kudu':
pytest.xfail("IMPALA-2890: Missing Kudu DDL support")
table_format = vector.get_value('table_format')
# jointbl has no columns with unique values. When loaded in hbase, the table looks
# different, as hbase collapses duplicates.
if table_format.file_format == 'hbase':
pytest.skip()
self._create_test_table(vector, unique_database)
self.run_test_case('QueryTest/test-unmatched-schema', vector, use_db=unique_database)
# Tests that scanners can read a single-column, single-row, 10MB table
class TestWideRow(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestWideRow, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension(debug_action_options=DEBUG_ACTION_DIMS))
# I can't figure out how to load a huge row into hbase
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format != 'hbase')
def test_wide_row(self, vector):
if vector.get_value('table_format').file_format == 'kudu':
pytest.xfail("KUDU-666: Kudu support for large values")
new_vector = deepcopy(vector)
# Use a 5MB scan range, so we will have to perform 5MB of sync reads
new_vector.get_value('exec_option')['max_scan_range_length'] = 5 * 1024 * 1024
# We need > 10 MB of memory because we're creating extra buffers:
# - 10 MB table / 5 MB scan range = 2 scan ranges, each of which may allocate ~20MB
# - Sync reads will allocate ~5MB of space
# - Result spooling require 32 MB initial reservation (2 page of 16 MB each) to fit
# 10 MB row.
# The 132MB value used here was determined empirically by raising the limit until the
# query succeeded for all file formats -- I don't know exactly why we need this much.
# TODO: figure out exact breakdown of memory usage (IMPALA-681)
new_vector.get_value('exec_option')['mem_limit'] = 132 * 1024 * 1024
# Specify that the query should able to handle 10 MB MAX_ROW_SIZE.
new_vector.get_value('exec_option')['max_row_size'] = 10 * 1024 * 1024
self.run_test_case('QueryTest/wide-row', new_vector)
class TestWideTable(ImpalaTestSuite):
# TODO: expand this to more rows when we have the capability
NUM_COLS = [250, 500, 1000]
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestWideTable, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension(debug_action_options=DEBUG_ACTION_DIMS))
cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension("num_cols", *cls.NUM_COLS))
# To cut down on test execution time, only run in exhaustive.
if cls.exploration_strategy() != 'exhaustive':
cls.ImpalaTestMatrix.add_constraint(lambda v: False)
def test_wide_table(self, vector):
if vector.get_value('table_format').file_format == 'kudu':
pytest.xfail("IMPALA-3718: Extend Kudu functional test support")
NUM_COLS = vector.get_value('num_cols')
# Due to the way HBase handles duplicate row keys, we have different number of
# rows in HBase tables compared to HDFS tables.
NUM_ROWS = 10 if vector.get_value('table_format').file_format != 'hbase' else 2
DB_NAME = QueryTestSectionReader.get_db_name(vector.get_value('table_format'))
TABLE_NAME = "%s.widetable_%s_cols" % (DB_NAME, NUM_COLS)
result = self.client.execute("select count(*) from %s " % TABLE_NAME)
assert result.data == [str(NUM_ROWS)]
expected_result = widetable.get_data(NUM_COLS, NUM_ROWS, quote_strings=True)
result = self.client.execute("select * from %s" % TABLE_NAME)
if vector.get_value('table_format').file_format == 'hbase':
assert len(result.data) == NUM_ROWS
return
types = result.column_types
labels = result.column_labels
expected = QueryTestResult(expected_result, types, labels, order_matters=False)
actual = QueryTestResult(parse_result_rows(result), types, labels,
order_matters=False)
assert expected == actual
class TestHudiParquet(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestHudiParquet, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension(debug_action_options=DEBUG_ACTION_DIMS))
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format == 'parquet')
def test_hudiparquet(self, vector):
self.run_test_case('QueryTest/hudi-parquet', vector)
class TestIceberg(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestIceberg, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_exec_option_dimension(debug_action_options=DEBUG_ACTION_DIMS))
| |
set the property to.
"""
pass
@staticmethod
def SetFontWeight(element,value):
"""
SetFontWeight(element: DependencyObject,value: FontWeight)
Sets the value of the System.Windows.Controls.TextBlock.FontWeight�attached property on a
specified dependency object.
element: The dependency object on which to set the value of the
System.Windows.Controls.TextBlock.FontWeight property.
value: The new value to set the property to.
"""
pass
@staticmethod
def SetForeground(element,value):
"""
SetForeground(element: DependencyObject,value: Brush)
Sets the value of the System.Windows.Controls.TextBlock.Foreground�attached property on a
specified dependency object.
element: The dependency object on which to set the value of the
System.Windows.Controls.TextBlock.Foreground property.
value: The new value to set the property to.
"""
pass
@staticmethod
def SetLineHeight(element,value):
"""
SetLineHeight(element: DependencyObject,value: float)
Sets the value of the System.Windows.Controls.TextBlock.LineHeight�attached property on a
specified dependency object.
element: The dependency object on which to set the value of the
System.Windows.Controls.TextBlock.LineHeight property.
value: The new value to set the property to.
"""
pass
@staticmethod
def SetLineStackingStrategy(element,value):
"""
SetLineStackingStrategy(element: DependencyObject,value: LineStackingStrategy)
Sets the value of the System.Windows.Controls.TextBlock.LineStackingStrategy�attached property
on a specified dependency object.
element: The dependency object on which to set the value of the
System.Windows.Controls.TextBlock.LineStackingStrategy property.
value: The new value to set the property to.
"""
pass
@staticmethod
def SetTextAlignment(element,value):
"""
SetTextAlignment(element: DependencyObject,value: TextAlignment)
Sets the value of the System.Windows.Controls.TextBlock.TextAlignment�attached property on a
specified dependency object.
element: The dependency object on which to set the value of the
System.Windows.Controls.TextBlock.TextAlignment property.
value: The new value to set the property to.
"""
pass
def ShouldSerializeBaselineOffset(self):
"""
ShouldSerializeBaselineOffset(self: TextBlock) -> bool
Returns a value that indicates whether the effective value of the
System.Windows.Controls.TextBlock.BaselineOffset property should be serialized during
serialization of a System.Windows.Controls.TextBlock object.
Returns: true if the System.Windows.Controls.TextBlock.BaselineOffset property should be serialized;
otherwise,false.
"""
pass
def ShouldSerializeInlines(self,manager):
"""
ShouldSerializeInlines(self: TextBlock,manager: XamlDesignerSerializationManager) -> bool
Returns a value that indicates whether the effective value of the
System.Windows.Controls.TextBlock.Inlines property should be serialized during serialization of
a System.Windows.Controls.TextBlock object.
manager: A serialization service manager object for this object.
Returns: true if the System.Windows.Controls.TextBlock.Inlines property should be serialized; otherwise,
false.
"""
pass
def ShouldSerializeProperty(self,*args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize the value for
the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false.
"""
pass
def ShouldSerializeText(self):
"""
ShouldSerializeText(self: TextBlock) -> bool
Returns a value that indicates whether the effective value of the
System.Windows.Controls.TextBlock.Text property should be serialized during serialization of a
System.Windows.Controls.TextBlock object.
Returns: true if the System.Windows.Controls.TextBlock.Text property should be serialized; otherwise,
false.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,inline=None):
"""
__new__(cls: type)
__new__(cls: type,inline: Inline)
"""
pass
Background=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the System.Windows.Media.Brush used to fill the background of content area.
Get: Background(self: TextBlock) -> Brush
Set: Background(self: TextBlock)=value
"""
BaselineOffset=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the amount by which each line of text is offset from the baseline.
Get: BaselineOffset(self: TextBlock) -> float
Set: BaselineOffset(self: TextBlock)=value
"""
BreakAfter=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a System.Windows.LineBreakCondition that indicates how content should break after the current element.
Get: BreakAfter(self: TextBlock) -> LineBreakCondition
"""
BreakBefore=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a System.Windows.LineBreakCondition that indicates how content should break before the current element.
Get: BreakBefore(self: TextBlock) -> LineBreakCondition
"""
ContentEnd=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a System.Windows.Documents.TextPointer to the end of content in the System.Windows.Controls.TextBlock.
Get: ContentEnd(self: TextBlock) -> TextPointer
"""
ContentStart=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a System.Windows.Documents.TextPointer to the beginning of content in the System.Windows.Controls.TextBlock.
Get: ContentStart(self: TextBlock) -> TextPointer
"""
DefaultStyleKey=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the key to use to reference the style for this control,when theme styles are used or defined.
"""
FontFamily=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the preferred top-level font family for the System.Windows.Controls.TextBlock.
Get: FontFamily(self: TextBlock) -> FontFamily
Set: FontFamily(self: TextBlock)=value
"""
FontSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the top-level font size for the System.Windows.Controls.TextBlock.
Get: FontSize(self: TextBlock) -> float
Set: FontSize(self: TextBlock)=value
"""
FontStretch=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the top-level font-stretching characteristics for the System.Windows.Controls.TextBlock.
Get: FontStretch(self: TextBlock) -> FontStretch
Set: FontStretch(self: TextBlock)=value
"""
FontStyle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the top-level font style for the System.Windows.Controls.TextBlock.
Get: FontStyle(self: TextBlock) -> FontStyle
Set: FontStyle(self: TextBlock)=value
"""
FontWeight=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the top-level font weight for the System.Windows.Controls.TextBlock.
Get: FontWeight(self: TextBlock) -> FontWeight
Set: FontWeight(self: TextBlock)=value
"""
Foreground=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the System.Windows.Media.Brush to apply to the text contents of the System.Windows.Controls.TextBlock.
Get: Foreground(self: TextBlock) -> Brush
Set: Foreground(self: TextBlock)=value
"""
HasEffectiveKeyboardFocus=property(lambda self: object(),lambda self,v: None,lambda self: None)
HostedElementsCore=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an enumerator that can be used iterate the elements hosted by this System.Windows.Controls.TextBlock.
"""
InheritanceBehavior=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the scope limits for property value inheritance,resource key lookup,and RelativeSource FindAncestor lookup.
"""
Inlines=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an System.Windows.Documents.InlineCollection containing the top-level System.Windows.Documents.Inline elements that comprise the contents of the System.Windows.Controls.TextBlock.
Get: Inlines(self: TextBlock) -> InlineCollection
"""
IsEnabledCore=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value that becomes the return value of System.Windows.UIElement.IsEnabled in derived classes.
"""
IsHyphenationEnabled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates whether automatic hyphenation of words is enabled or disabled.
Get: IsHyphenationEnabled(self: TextBlock) -> bool
Set: IsHyphenationEnabled(self: TextBlock)=value
"""
LineHeight=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the height of each line of content.
Get: LineHeight(self: TextBlock) -> float
Set: LineHeight(self: TextBlock)=value
"""
LineStackingStrategy=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the mechanism by which a line box is determined for each line of text within the System.Windows.Controls.TextBlock.
Get: LineStackingStrategy(self: TextBlock) -> LineStackingStrategy
Set: LineStackingStrategy(self: TextBlock)=value
"""
LogicalChildren=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an enumerator that can iterate the logical children of the System.Windows.Controls.TextBlock.
"""
Padding=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates the thickness of padding space between the boundaries of the content area,and the content displayed by a System.Windows.Controls.TextBlock.
Get: Padding(self: TextBlock) -> Thickness
Set: Padding(self: TextBlock)=value
"""
StylusPlugIns=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a collection of all stylus plug-in (customization) objects associated with this element.
"""
Text=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the text contents of a System.Windows.Controls.TextBlock.
Get: Text(self: TextBlock) -> str
Set: Text(self: TextBlock)=value
"""
TextAlignment=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates the horizontal alignment of text content.
Get: TextAlignment(self: TextBlock) -> TextAlignment
Set: TextAlignment(self: TextBlock)=value
"""
TextDecorations=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a System.Windows.TextDecorationCollection that contains the effects to apply to the text of a System.Windows.Controls.TextBlock.
Get: TextDecorations(self: TextBlock) -> TextDecorationCollection
Set: TextDecorations(self: TextBlock)=value
"""
TextEffects=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the effects to apply to the text content in this element.
Get: TextEffects(self: TextBlock) -> TextEffectCollection
Set: TextEffects(self: TextBlock)=value
"""
TextTrimming=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the text trimming behavior to employ when content overflows the content area.
Get: TextTrimming(self: TextBlock) -> TextTrimming
Set: TextTrimming(self: TextBlock)=value
"""
TextWrapping=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets how the System.Windows.Controls.TextBlock should wrap text.
Get: TextWrapping(self: TextBlock) -> TextWrapping
Set: TextWrapping(self: TextBlock)=value
"""
Typography=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the currently effective typography variations for the contents of this element.
Get: Typography(self: TextBlock) -> Typography
"""
VisualBitmapEffect=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the System.Windows.Media.Effects.BitmapEffect value for the System.Windows.Media.Visual.
"""
VisualBitmapEffectInput=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the System.Windows.Media.Effects.BitmapEffectInput value for the System.Windows.Media.Visual.
"""
VisualBitmapScalingMode=property(lambda self: object(),lambda self,v: None,lambda | |
<reponame>albertometelli/remps
"""
Relative entropy policy model search
Reference: https://pdfs.semanticscholar.org/ff47/526838ce85d77a50197a0c5f6ee5095156aa.pdf
Idea: use REPS to find the distribution p(s,a,s') containing both policy and transition model.
Then matches the distributions minimizing the KL between the p and the induced distribution from
\pi and \p_\omega
Follows the rllab implementation
"""
from copy import copy
import numpy as np
import scipy.optimize
import tensorflow as tf
from tensorflow.contrib.opt import ScipyOptimizerInterface
import baselines
import baselines.common.tf_util as U
from baselines import logger
class REPMS:
"""
Relative Entropy Policy Search (REPS)
References
----------
[1] <NAME>, <NAME>, and <NAME>, "Relative Entropy Policy Search," Artif. Intell., pp. 1607-1612, 2008.
"""
def __init__(
self,
epsilon=1e-3, # 0.001,
L2_reg_dual=0.0, # 1e-5,
L2_reg_loss=0.0,
L2_reg_projection=0,
max_opt_itr=1000,
optimizer=scipy.optimize.fmin_l_bfgs_b,
tf_optimizer=ScipyOptimizerInterface,
model=None,
policy=None,
env=None,
projection_type="joint", # joint or disjoint, joint: state kernel projection
**kwargs
):
"""
:param epsilon: Max KL divergence between new policy and old policy.
:param L2_reg_dual: Dual regularization
:param L2_reg_loss: Loss regularization
:param max_opt_itr: Maximum number of batch optimization iterations.
:param optimizer: Module path to the optimizer. It must support the same interface as
scipy.optimize.fmin_l_bfgs_b.
:return:
"""
super(REPMS, self).__init__(**kwargs)
self.epsilon = epsilon
self.L2_reg_dual = L2_reg_dual
self.L2_reg_loss = L2_reg_loss
self.max_opt_itr = max_opt_itr
self.optimizer = optimizer
self.tf_optimizer = tf_optimizer
self.opt_info = None
self.model = model
self.policy = policy
self.env = env
self.dtype = tf.float64
self.epsilon_small = 1e-20
self.projection_type = projection_type
self.model_L2_reg_loss = 0
self.policy_L2_reg_loss = L2_reg_loss
self.L2_reg_projection = L2_reg_projection
def initialize(self, session, summary_writer, theta=5):
self.sess = session
# Init dual param values
self.param_eta = 1.0
# Adjust for linear feature vector.
self.param_v = np.random.rand(self.env.observation_space_size * 2 + 1)
self.summary_writer = summary_writer
self.global_step = 0
# Tf vars
self.observations_ph = tf.placeholder(
dtype=self.dtype, name="obs", shape=(None, self.env.observation_space_size)
)
# one hot tensor
self.actions_one_hot_ph = tf.placeholder(
dtype=self.dtype,
name="action_one_hot",
shape=(None, self.env.action_space_size),
)
# -1, 0, +1 tensor
# or -1 +1 tensor
# actual action taken or
# all actions possible
# e.g. [-1, 1; -1, 1 ...]
self.actions_ph = tf.placeholder(
dtype=self.dtype,
name="action",
shape=(None, self.env.n_actions if self.projection_type == "joint" else 1),
)
self.rewards_ph = tf.placeholder(
dtype=self.dtype, name="rewards", shape=(None, 1)
)
self.returns_ph = tf.placeholder(
dtype=self.dtype, name="returns", shape=(None,)
)
self.timesteps_ph = tf.placeholder(
dtype=self.dtype, name="timestep", shape=(None,)
)
# next state centered on the previous one
self.next_states_ph = tf.placeholder(
dtype=self.dtype,
name="next_states",
shape=(None, self.env.observation_space_size),
)
# Feature difference variable representing the difference in feature
# value of the next observation and the current observation \phi(s') -
# \phi(s).
self.feat_diff_ph = tf.placeholder(
dtype=self.dtype,
name="feat_diff",
shape=(None, self.env.observation_space_size * 2 + 1),
)
theta = np.random.rand()
policy_tf, _ = self.policy(self.observations_ph, theta)
other_policy = copy(self.policy)
other_policy.name = "OtherPolicy"
other_policy_tf, _ = other_policy(self.observations_ph, theta)
self.policy_tf = policy_tf
self.param_v_ph = tf.placeholder(
name="param_v",
shape=(self.env.observation_space_size * 2 + 1, 1),
dtype=self.dtype,
)
self.param_eta_ph = tf.placeholder(name="eta", shape=(), dtype=self.dtype)
# Symbolic sample Bellman error
delta_v = self.rewards_ph + tf.matmul(self.feat_diff_ph, self.param_v_ph)
print("Delta v: ", delta_v.get_shape())
print("Policy: ", policy_tf.get_shape())
# Policy and model loss loss (KL divergence, to be minimized)
state_kernel_before_sum = tf.multiply(model_prob_tf, policy_tf)
other_state_kernel_before_sum = tf.multiply(
other_model_prob_tf, other_policy_tf
)
state_kernel = tf.reduce_sum(state_kernel_before_sum, axis=1, keepdims=True)
other_state_kernel = tf.reduce_sum(
other_state_kernel_before_sum, axis=1, keepdims=True
)
weights = tf.exp(
delta_v * self.param_eta_ph - tf.reduce_max(delta_v * self.param_eta_ph)
)
weights_exp = delta_v * self.param_eta_ph - tf.reduce_max(
delta_v * self.param_eta_ph
)
print("State kernel shape: ", state_kernel.get_shape())
# For regularization add L2 reg term
# use sum
model_policy_loss = -tf.reduce_mean(
tf.exp(
(
delta_v * self.param_eta_ph
- tf.reduce_max(delta_v * self.param_eta_ph)
)
)
* tf.log(state_kernel + self.epsilon_small)
)
# add l2 regularization
# var_loss = # Loss function using L2 Regularization
regularizers = [tf.reduce_sum(tf.square(x)) for x in self.policy.trainable_vars]
total_loss = tf.add_n(regularizers)
print("Reg loss shape:", total_loss.get_shape())
model_policy_loss += self.L2_reg_loss * (total_loss)
print("model policy loss", model_policy_loss.get_shape())
model_policy_loss += self.L2_reg_projection * tf.add_n(
[
tf.reduce_sum(tf.square(x - y))
for x, y in zip(self.policy.trainable_vars, other_policy.trainable_vars)
]
)
self.model_policy_tf_optimizer = self.tf_optimizer(
model_policy_loss,
var_list=self.model.trainable_vars + self.policy.trainable_vars,
)
# log of the policy dist
logli = tf.log(
tf.reduce_sum(
tf.multiply(policy_tf, self.actions_one_hot_ph), axis=1, keepdims=True
)
)
print("Policy: ", logli.get_shape())
# Policy loss (KL divergence, to be minimized)
policy_loss = -tf.reduce_mean(
logli
* tf.exp(
delta_v * self.param_eta_ph - tf.reduce_max(delta_v * self.param_eta_ph)
)
)
policy_regularizers = [
tf.reduce_sum(tf.square(x)) for x in self.policy.trainable_vars
]
policy_reg_loss = tf.add_n(policy_regularizers)
policy_loss += self.policy_L2_reg_loss * (policy_reg_loss)
print("Policy loss shape: ", policy_loss.get_shape())
print("Policy vars", self.policy.trainable_vars)
self.policy_tf_optimizer = self.tf_optimizer(
policy_loss, var_list=self.policy.trainable_vars
)
# Dual-related symbolics
# Symbolic dual
# debug purposes
inside_log = tf.reduce_mean(
tf.exp(
delta_v * self.param_eta_ph - tf.reduce_max(delta_v) * self.param_eta_ph
)
)
inside_log_f = U.function(
inputs=[self.rewards_ph, self.feat_diff_ph]
+ [self.param_eta_ph, self.param_v_ph],
outputs=inside_log,
)
# (1/self.param_eta_ph) * self.epsilon +
dual = (
(1 / self.param_eta_ph) * self.epsilon
+ (1 / self.param_eta_ph) * tf.log(inside_log) # + self.epsilon_small
+ tf.reduce_max(delta_v)
)
# Add L2 regularization.
dual += self.L2_reg_dual * (
tf.square(self.param_eta_ph) + tf.square(1 / self.param_eta_ph)
) # + tf.reduce_sum(tf.square(self.param_v_ph)))
# Symbolic dual gradient
dual_grad = U.flatgrad(dual, [self.param_eta_ph, self.param_v_ph])
# Eval functions.
f_dual = U.function(
inputs=[self.rewards_ph, self.feat_diff_ph]
+ [self.param_eta_ph, self.param_v_ph],
outputs=dual,
)
f_dual_grad = U.function(
inputs=[self.rewards_ph, self.feat_diff_ph]
+ [self.param_eta_ph, self.param_v_ph],
outputs=dual_grad,
)
max_delta_v_eta = tf.reduce_max(delta_v * self.param_eta_ph)
exp_delta_v_eta_minus_max = tf.exp(
delta_v * self.param_eta_ph - max_delta_v_eta
)
mean_delta_v_eta_minus_max = tf.reduce_mean(exp_delta_v_eta_minus_max)
exp_delta_v_eta = tf.exp(delta_v * self.param_eta_ph)
mean_delta_v_eta = tf.reduce_mean(exp_delta_v_eta)
# KL(p||q) = (1/E[deltav]) * E(delta_v*(eta^-1))
d_kl_pq = (
tf.reduce_mean((delta_v * self.param_eta_ph) * exp_delta_v_eta_minus_max)
/ mean_delta_v_eta_minus_max
- tf.log(mean_delta_v_eta_minus_max)
- max_delta_v_eta
)
d_kl_pq_2 = tf.reduce_mean(
delta_v * self.param_eta_ph * exp_delta_v_eta
) / mean_delta_v_eta - tf.log(mean_delta_v_eta)
d_kl_p_hat_q = tf.reduce_mean(
tf.log(state_kernel + self.epsilon_small)
- tf.log(other_state_kernel + self.epsilon_small)
)
self.opt_info = dict(
f_dual=f_dual,
f_dual_grad=f_dual_grad,
model_policy_loss=model_policy_loss,
policy_loss=policy_loss,
model_loss=model_loss,
inside_log=inside_log_f,
state_kernel=state_kernel,
delta_v=delta_v,
d_kl_pq=d_kl_pq,
d_kl_pq_2=d_kl_pq_2,
d_kl_p_hat_q=d_kl_p_hat_q
# model_grad = model_f_loss_grad
)
self.policy_tf = policy_tf
self.model_logli = model_logli
self.model_policy_loss = model_policy_loss
self.weights = weights
self.weights_exp = weights_exp
# plot purpose
mean_ret = tf.reduce_mean(self.returns_ph)
mean_ts = tf.reduce_mean(self.timesteps_ph)
tf.summary.scalar("Reward", mean_ret)
tf.summary.scalar("Timesteps", mean_ts)
tf.summary.scalar("Theta", tf.reduce_sum(self.model.get_theta()))
tf.summary.scalar("KL", d_kl_pq)
tf.summary.scalar("KL2", d_kl_pq_2)
self.summary_writer.add_graph(self.sess.graph)
self.summarize = tf.summary.merge_all()
self.setModelParam = U.SetFromFlat(other_model.trainable_vars, dtype=tf.float64)
self.setPolicyParam = U.SetFromFlat(
other_policy.trainable_vars, dtype=tf.float64
)
self.other_policy_tf = other_policy_tf
self.other_model_tf = other_model_log_prob_tf
self.other_model = other_model
self.other_policy = other_policy
def _features(self, path):
o = np.array(path)
pos = np.expand_dims(o[:, 0], 1)
vel = np.expand_dims(o[:, 1], 1)
# l = np.shape(path)[0]
# al = np.arange(l).reshape(-1, 1) / 1000.0
return np.hstack(
(o, o ** 2, pos * vel)
) # al, al**2, al**3))#, al, al ** 2, al ** 3, np.ones((l, 1))], axis=1)
# convert to the usage of my framework
# samples data contains:
# - rewards
# - observations : visited states
# - paths : list of observations, used to build next states and states
# (add to observation all but the last states)
# - actions: taken actions
# - actions_one_hot: one hot vector of taken actions
def train(self, samples_data, normalize_rewards=False):
# Init vars
rewards = samples_data["rewards"]
reward_list = samples_data["reward_list"]
actions = samples_data["actions"]
timesteps = samples_data["timesteps"]
actions_one_hot = samples_data["actions_one_hot"]
# unused
observations = samples_data["observations"]
# agent_infos = samples_data["agent_infos"]
# Compute sample Bellman error.
feat_diff = []
next_states = []
states = []
for (i, path) in enumerate(samples_data["paths"]):
feats = self._features(path)
obs = np.array(path)
# all but the first
# centered
next_states.append(obs[1:, :])
# all but the last
states.append(obs[:-1, :])
# feats = np.vstack([feats, np.zeros(feats.shape[1])])
feat_diff.append(feats[1:] - feats[:-1])
feat_diff = np.vstack(feat_diff)
states = np.vstack(states)
if self.projection_type == "joint":
actions = np.zeros((states.shape[0], 1))
actions = np.hstack((actions - 1, actions + 1))
next_states = np.vstack(next_states)
if normalize_rewards:
rewards = (rewards - np.min(rewards)) / (np.min(rewards) - np.max(rewards))
assert next_states.shape == states.shape
print("actions Shape", actions.shape)
#################
# Optimize dual #
#################
# Here we need to optimize dual through BFGS in order to obtain \eta
# value. Initialize dual function g(\theta, v). \eta > 0
# First eval delta_v
f_dual = self.opt_info["f_dual"]
f_dual_grad = self.opt_info["f_dual_grad"]
inside_log_f = self.opt_info["inside_log"]
# Set BFGS eval function
def eval_dual(input):
param_eta = input[0]
param_v = np.matrix(input[1:]).transpose()
if param_eta == 0.0:
return +np.inf
# print("Parameters: ", param_eta, param_v)
# inside_log_val = inside_log_f(*([rewards, feat_diff] + [param_eta, param_v]))
# print("Inside log", inside_log_val)
val = f_dual(*([rewards, feat_diff] + [param_eta, param_v]))
# print("Function value", val)
return val.astype(np.float64)
# Set BFGS gradient eval function
def eval_dual_grad(input):
param_eta = input[0]
param_v = np.matrix(input[1:]).transpose()
grad = f_dual_grad(*([rewards, feat_diff] + [param_eta, param_v]))
# eta_grad = np.matrix(np.float(grad[0]))
# v_grad = np.transpose(grad[1])
# grad = np.hstack([eta_grad, v_grad])
# print("Gradient", np.expand_dims(grad,axis=0).transpose())
return np.expand_dims(grad, axis=0).transpose()
# Initial BFGS parameter | |
from typing import Dict, Iterator, List, NamedTuple, Optional, Sequence, Set, Tuple
from eth_typing import BLSPubkey, Hash32
from eth_utils import ValidationError, encode_hex
from eth2._utils.bls import bls
from eth2._utils.hash import hash_eth2
from eth2.beacon.committee_helpers import compute_shuffled_index
from eth2.beacon.constants import (
BASE_REWARDS_PER_EPOCH,
DEPOSIT_CONTRACT_TREE_DEPTH,
FAR_FUTURE_EPOCH,
GENESIS_EPOCH,
)
from eth2.beacon.deposit_helpers import validate_deposit_proof
from eth2.beacon.epoch_processing_helpers import (
compute_activation_exit_epoch,
decrease_balance,
increase_balance,
)
from eth2.beacon.helpers import (
compute_domain,
compute_epoch_at_slot,
compute_signing_root,
compute_start_slot_at_epoch,
get_block_root,
get_block_root_at_slot,
get_domain,
get_randao_mix,
get_seed,
signature_domain_to_domain_type,
)
from eth2.beacon.signature_domain import SignatureDomain
from eth2.beacon.state_machines.forks.serenity.block_validation import (
_validate_checkpoint,
_validate_eligible_exit_epoch,
_validate_eligible_target_epoch,
_validate_slot_matches_target_epoch,
_validate_validator_has_not_exited,
_validate_validator_is_active,
_validate_validator_minimum_lifespan,
_validate_voluntary_exit_signature,
validate_attestation_slot,
validate_block_header_signature,
validate_block_is_new,
validate_block_parent_root,
validate_block_slot,
validate_is_slashable_attestation_data,
validate_proposer_slashing_headers,
validate_proposer_slashing_slot,
validate_randao_reveal,
)
from eth2.beacon.state_machines.forks.serenity.slot_processing import _process_slot
from eth2.beacon.types.attestation_data import AttestationData
from eth2.beacon.types.attestations import Attestation, IndexedAttestation
from eth2.beacon.types.attester_slashings import AttesterSlashing
from eth2.beacon.types.block_headers import BeaconBlockHeader
from eth2.beacon.types.blocks import BeaconBlock, BeaconBlockBody
from eth2.beacon.types.checkpoints import Checkpoint
from eth2.beacon.types.deposit_data import DepositMessage
from eth2.beacon.types.deposits import Deposit
from eth2.beacon.types.historical_batch import HistoricalBatch
from eth2.beacon.types.pending_attestations import PendingAttestation
from eth2.beacon.types.proposer_slashings import ProposerSlashing
from eth2.beacon.types.states import BeaconState
from eth2.beacon.types.validators import Validator
from eth2.beacon.types.voluntary_exits import SignedVoluntaryExit
from eth2.beacon.typing import (
Bitfield,
CommitteeIndex,
Epoch,
Gwei,
Slot,
ValidatorIndex,
)
from eth2.configs import Eth2Config
ENDIANNESS = "little"
def integer_squareroot(n: int) -> int:
"""
Return the largest integer ``x`` such that ``x**2 <= n``.
"""
x = n
y = (x + 1) // 2
while y < x:
x = y
y = (x + n // x) // 2
return x
def xor(bytes_1: bytes, bytes_2: bytes) -> bytes:
"""
Return the exclusive-or of two 32-byte strings.
"""
return bytes(a ^ b for a, b in zip(bytes_1, bytes_2))
# ShuffleList shuffles a list, using the given seed for randomness. Mutates the input list.
def shuffle_list(input: List[ValidatorIndex], seed: Hash32, config: Eth2Config) -> None:
_inner_shuffle_list(input, seed, True, config)
# UnshuffleList undoes a list shuffling using the seed of the shuffling. Mutates the input list.
def unshuffle_list(
input: List[ValidatorIndex], seed: Hash32, config: Eth2Config
) -> None:
_inner_shuffle_list(input, seed, False, config)
_SHUFFLE_H_SEED_SIZE = 32
_SHUFFLE_H_ROUND_SIZE = 1
_SHUFFLE_H_POSITION_WINDOW_SIZE = 4
_SHUFFLE_H_PIVOT_VIEW_SIZE = _SHUFFLE_H_SEED_SIZE + _SHUFFLE_H_ROUND_SIZE
_SHUFFLE_H_TOTAL_SIZE = (
_SHUFFLE_H_SEED_SIZE + _SHUFFLE_H_ROUND_SIZE + _SHUFFLE_H_POSITION_WINDOW_SIZE
)
# Shuffles or unshuffles, depending on the `dir` (true for shuffling, false for unshuffling
def _inner_shuffle_list(
input: List[ValidatorIndex], seed: Hash32, dir: bool, config: Eth2Config
) -> None:
if len(input) <= 1:
# nothing to (un)shuffle
return
listSize = len(input)
buf = bytearray([0] * _SHUFFLE_H_TOTAL_SIZE)
r = 0
if not dir:
# Start at last round.
# Iterating through the rounds in reverse, un-swaps everything, effectively un-shuffling
# the list.
r = config.SHUFFLE_ROUND_COUNT - 1
# Seed is always the first 32 bytes of the hash input, we never have to change this part of the
# buffer.
buf[:_SHUFFLE_H_SEED_SIZE] = seed[:]
while True:
# spec: pivot = bytes_to_int(hash_eth2(seed + int_to_bytes1(round))[0:8]) % list_size
# This is the "int_to_bytes1(round)", appended to the seed.
buf[_SHUFFLE_H_SEED_SIZE] = r
# Seed is already in place, now just hash the correct part of the buffer, and take a int
# from it,
# and modulo it to get a pivot within range.
h = hash_eth2(buf[:_SHUFFLE_H_PIVOT_VIEW_SIZE])
pivot = int.from_bytes(h[:8], byteorder=ENDIANNESS) % listSize
# Split up the for-loop in two:
# 1. Handle the part from 0 (incl) to pivot (incl). This is mirrored around (pivot / 2)
# 2. Handle the part from pivot (excl) to N (excl). This is mirrored around
# ((pivot / 2) + (size/2))
# The pivot defines a split in the array, with each of the splits mirroring their data
# within the split.
# Print out some example even/odd sized index lists, with some even/odd pivots,
# and you can deduce how the mirroring works exactly.
# Note that the mirror is strict enough to not consider swapping the index @mirror with
# itself.
mirror = (pivot + 1) >> 1
# Since we are iterating through the "positions" in order, we can just repeat the hash
# every 256th position.
# No need to pre-compute every possible hash for efficiency like in the example code.
# We only need it consecutively (we are going through each in reverse order however, but
# same thing)
#
# spec: source = hash_eth2(seed + int_to_bytes1(round) + int_to_bytes4(position # 256))
# - seed is still in 0:32 (excl., 32 bytes)
# - round number is still in 32
# - mix in the position for randomness, except the last byte of it,
# which will be used later to select a bit from the resulting hash.
# We start from the pivot position, and work back to the mirror position (of the part left
# to the pivot).
# This makes us process each pear exactly once (instead of unnecessarily twice, like in the
# spec)
buf[_SHUFFLE_H_PIVOT_VIEW_SIZE:] = ((pivot >> 8) & 0xFFFF_FFFF).to_bytes(
length=4, byteorder=ENDIANNESS
)
source = hash_eth2(buf)
byteV = source[(pivot & 0xFF) >> 3]
i, j = 0, pivot
while i < mirror:
# The pair is i,j. With j being the bigger of the two, hence the "position" identifier
# of the pair.
# Every 256th bit (aligned to j).
if j & 0xFF == 0xFF:
# just overwrite the last part of the buffer, reuse the start (seed, round)
buf[_SHUFFLE_H_PIVOT_VIEW_SIZE:] = ((j >> 8) & 0xFFFF_FFFF).to_bytes(
length=4, byteorder=ENDIANNESS
)
source = hash_eth2(buf)
# Same trick with byte retrieval. Only every 8th.
if j & 0x7 == 0x7:
byteV = source[(j & 0xFF) >> 3]
bitV = (byteV >> (j & 0x7)) & 0x1
if bitV == 1:
# swap the pair items
input[i], input[j] = input[j], input[i]
i, j = i + 1, j - 1
# Now repeat, but for the part after the pivot.
mirror = (pivot + listSize + 1) >> 1
end = listSize - 1
# Again, seed and round input is in place, just update the position.
# We start at the end, and work back to the mirror point.
# This makes us process each pear exactly once (instead of unnecessarily twice, like in
# the spec)
buf[_SHUFFLE_H_PIVOT_VIEW_SIZE:] = ((end >> 8) & 0xFFFF_FFFF).to_bytes(
length=4, byteorder=ENDIANNESS
)
source = hash_eth2(buf)
byteV = source[(end & 0xFF) >> 3]
i, j = pivot + 1, end
while i < mirror:
# Exact same thing (copy of above loop body)
# --------------------------------------------
# The pair is i,j. With j being the bigger of the two, hence the "position"
# identifier of the pair.
# Every 256th bit (aligned to j).
if j & 0xFF == 0xFF:
# just overwrite the last part of the buffer, reuse the start (seed, round)
buf[_SHUFFLE_H_PIVOT_VIEW_SIZE:] = ((j >> 8) & 0xFFFF_FFFF).to_bytes(
length=4, byteorder=ENDIANNESS
)
source = hash_eth2(buf)
# Same trick with byte retrieval. Only every 8th.
if j & 0x7 == 0x7:
byteV = source[(j & 0xFF) >> 3]
bitV = (byteV >> (j & 0x7)) & 0x1
if bitV == 1:
# swap the pair items
input[i], input[j] = input[j], input[i]
i, j = i + 1, j - 1
# --------------------------------------------
# go forwards?
if dir:
# -> shuffle
r += 1
if r == config.SHUFFLE_ROUND_COUNT:
break
else:
if r == 0:
break
# -> un-shuffle
r -= 1
def compute_committee_count(active_validators_count: int, config: Eth2Config) -> int:
validators_per_slot = active_validators_count // config.SLOTS_PER_EPOCH
committees_per_slot = validators_per_slot // config.TARGET_COMMITTEE_SIZE
if config.MAX_COMMITTEES_PER_SLOT < committees_per_slot:
committees_per_slot = config.MAX_COMMITTEES_PER_SLOT
if committees_per_slot == 0:
committees_per_slot = 1
return committees_per_slot
# as with indexed attestation order (index of validator within committee)
Committee = Sequence[ValidatorIndex]
SlotCommittees = Sequence[
Committee
] # by index of committee (len <= MAX_COMMITTEES_PER_SLOT)
EpochCommittees = Sequence[SlotCommittees] # (len == SLOTS_PER_EPOCH)
# With a high amount of shards, or low amount of validators,
# some shards may not have a committee this epoch.
class ShufflingEpoch(object):
epoch: Epoch
active_indices: Sequence[ValidatorIndex] # non-shuffled active validator indices
# the active validator indices, shuffled into their committee
shuffling: Sequence[ValidatorIndex]
committees: EpochCommittees # list of lists of slices of Shuffling
# indices_bounded: (index, activation_epoch, exit_epoch) per validator.
def __init__(
self,
state: BeaconState,
indices_bounded: Sequence[Tuple[ValidatorIndex, Epoch, Epoch]],
epoch: Epoch,
config: Eth2Config,
):
self.epoch = epoch
self.config = config
seed = get_seed(
state,
epoch,
signature_domain_to_domain_type(SignatureDomain.DOMAIN_BEACON_ATTESTER),
config,
)
self.active_indices = [
index
for (index, activation_epoch, exit_epoch) in indices_bounded
if activation_epoch <= epoch < exit_epoch
]
shuffling = list(self.active_indices) # copy
unshuffle_list(shuffling, seed, config)
| |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Models declaration for application ``django_mailbox``.
"""
from email.encoders import encode_base64
from email.message import Message as EmailMessage
from email.utils import formatdate, parseaddr, parsedate_tz, parsedate_to_datetime
from quopri import encode as encode_quopri
import base64
import email
import logging
import mimetypes
import os.path
import sys
import uuid
import six
from urllib import parse
import django
from django.conf import settings as django_settings
from django.core.files.base import ContentFile
from django.core.mail.message import make_msgid
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from model_utils.managers import InheritanceManager
from communication.utils import comm_utils
from communication.transports.harvest_transports import HarvestImapTransport, HarvestPop3Transport, HarvestGmailTransport, \
HarvestImapExchangeTransport
from communication.transports import transport_exceptions
from cryptographic_fields.fields import EncryptedCharField
from phonenumber_field.modelfields import PhoneNumberField
from oauth2client.contrib.django_util.models import CredentialsField
import assets
import crm
from crm.models import Person
from cedar_settings.models import GeneralSetting
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# For DRF Serializing See:
# http://www.django-rest-framework.org/api-guide/relations/#rest-framework-generic-relations
class ActiveObjectManager(models.Manager):
"""
Filters all objects that are not active.
Requires a boolean field called 'active'
"""
def get_queryset(self):
return super(ActiveObjectManager, self).get_queryset().filter(active=True)
class Communication(models.Model):
"""
Sort-of parent object for Communication Type (Phone, Fax, Message, etc.) objects.
Note: a post_delete signal is attached that will delete the comm_type instance when
a Communication instance is deleted. Since CommunicationRelation objects will cascade
we don't have to worry about those.
"""
subject = models.TextField(max_length=1000)
date = models.DateTimeField(verbose_name='Date & time of communication.')
from_contacts = models.ManyToManyField(crm.models.Person, related_name='from_contact')
to_contacts = models.ManyToManyField(crm.models.Person, related_name='to_contact')
# Generic foreign keys to communication types (Fax, PhoneCall, Message, etc.)
comm_type_ct = models.ForeignKey(ContentType)
comm_type_oid = models.PositiveIntegerField()
comm_type = GenericForeignKey('comm_type_ct', 'comm_type_oid')
class Meta:
ordering = ['-date']
def __str__(self):
return '{}: {}'.format(self.date, self.subject)
@classmethod
def create_communication(cls, subject, date, from_contacts, to_contacts, comm_type_obj):
"""
Takes a communication type object, creates a communication instance and creates a relation between the two.
:param subject:
:param date:
:param from_contacts:
:param to_contacts:
:param comm_type_obj: PhoneCall, Message, Fax, etc.
:return:
"""
comm = Communication(
subject=subject,
date=date,
)
comm.comm_type = comm_type_obj
comm.save()
if from_contacts:
comm.from_contacts = from_contacts
if to_contacts:
comm.to_contacts = to_contacts
comm.save()
return comm
@classmethod
def create_related_communication(cls, subject, date, from_contacts, to_contacts, comm_type_obj, related_obj):
"""
Takes a communication type object, creates a communication instance and creates a relation between the two.
:param subject:
:param date:
:param from_contacts:
:param to_contacts:
:param comm_type_obj: PhoneCall, Message, Fax, etc.
:param related_obj: eg HER Prj, DEV Prj, etc.
:return:
"""
comm = Communication(
subject=subject,
date=date,
# comm_type=comm_type_obj
)
comm.comm_type = comm_type_obj
if from_contacts:
comm.from_contacts = from_contacts
if to_contacts:
comm.to_contacts = to_contacts
comm.save()
CommunicationRelation(
comm=comm,
related_object=related_obj
).save()
return comm
@classmethod
def get_communications_related_to(cls, related_object):
"""
Takes some object (eg development project instance) and returns communication objects
related to it.
:param related_object:
:return: communication queryset
"""
return Communication.objects.filter(
related_communication__related_object_oid=related_object.id,
related_communication__related_object_ct=ContentType.objects.get_for_model(related_object)
)
def get_absolute_url(self):
"""
:return:the url to the comm_type object not the parent communication object itself.
"""
return self.comm_type.get_absolute_url()
class CommunicationRelation(models.Model):
"""
Relates a communication instance to some other model in the database. The expectation
for now is that the related_object will be a development project or heritage project.
"""
comm = models.ForeignKey(Communication, related_name='related_communication')
related_object_ct = models.ForeignKey(ContentType)
related_object_oid = models.PositiveIntegerField()
related_object = GenericForeignKey('related_object_ct', 'related_object_oid')
def __str__(self):
return "{}: {}: {}".format(self.comm.date, self.comm.comm_type_ct, self.related_object)
class CommunicationAsset(assets.models.SecureAsset):
"""
This is a deprecated model - created prior to generic link assets.
"""
@property
def storage_string(self):
return "communication_assets"
objects = InheritanceManager()
class CommunicationFileRelation(models.Model):
"""
Provides a method for communication type instances to have a x-many
relationship with any (asset) model instance(s). The presumption here is that
the "asset" points to an implementation of the assets.SecureAsset class.
"""
asset_ct = models.ForeignKey(ContentType, related_name='communicationfilerelation_ct')
asset_oid = models.PositiveIntegerField()
asset = GenericForeignKey('asset_ct', 'asset_oid')
# Generic foreign keys to communication types (Fax, PhoneCall, Message, etc.)
comm_type_ct = models.ForeignKey(ContentType)
comm_type_oid = models.PositiveIntegerField()
comm_type = GenericForeignKey('comm_type_ct', 'comm_type_oid')
class CommunicationTypeAbstract(models.Model):
communication = GenericRelation(Communication, content_type_field='comm_type_ct', object_id_field='comm_type_oid')
class Meta:
abstract = True
class MailAccount(models.Model):
protocol_choices = (
('pop3', 'pop3'),
('imap', 'imap'),
('imap-exchange', 'imap-exchange'),
('gmail', 'imap-gmail')
)
email_address = models.EmailField(help_text="Email address for this account. May differ from username.")
username = models.CharField(
max_length=100,
help_text="Username required for login to the mail server.")
password = EncryptedCharField(max_length=50, blank=True, null=True)
server_address = models.CharField(
max_length=300,
verbose_name="Address of the server",
help_text="Address of the mail server. Eg: www.example.com, 192.168.5.1, etc.")
protocol = models.CharField(
max_length=20,
choices=protocol_choices,
default='imap',
help_text="If you use gmail SSL must be enabled."
)
ssl = models.BooleanField(default=False)
def get_folders(self):
"""
Queries server via a temp mailbox for folder names
:return: list of foldernames on the server.
"""
# use a temporary mailbox for it's connection:
m = Mailbox(mail_account=self)
return m.get_mail_folders()
def update_folders(self):
"""
Creates mailboxes for each folder returned from the server.
:return: list of names of created folders
"""
new = []
for folder in self.get_folders():
mbx, created = Mailbox.objects.get_or_create(folder_name=folder, mail_account=self)
if created:
new.append(mbx)
return new
def harvest_mail(self):
comm_utils.harvest_mailboxes(self.mailbox_set.filter(active=True))
def __str__(self):
return '{} - {}'.format(self.username, self.server_address)
class Meta:
permissions = (
("harvest_mail_account", "Can run mailharvest on mail account"),
)
class Mailbox(models.Model):
folder_name = models.CharField(max_length=300,
default='INBOX',
help_text='This is the un-url-quoted folder name')
active = models.BooleanField(
_(u'Active'),
help_text=(_(
"Check this e-mail inbox for new e-mail messages during polling "
"cycles. This checkbox does not have an effect upon whether "
"mail is collected here when this mailbox receives mail from a "
"pipe, and does not affect whether e-mail messages can be "
"dispatched from this mailbox. "
)),
blank=True,
default=False,
)
mail_account = models.ForeignKey(MailAccount)
incoming = models.BooleanField(
default=True,
verbose_name="Is Incoming",
help_text="False if this is an outgoing mailbox (e.g. 'Sent Mail'), True if otherwise.")
# hierarchy_delimiter = models.CharField(
# max_length=1,
# blank=True,
# null=True,
# verbose_name='IMAP folder hierarchy delimiter. Set automatically by the mailaccount when folders (mailboxes) are created.')
objects = models.Manager()
@property
def uri_template(self):
return '{protocol}://{user}:{password}@{server_address}?folder={folder}'
@property
def uri(self):
"""
Most important property of mailbox. Everything derives from this.
:return:
"""
if self.mail_account.ssl:
protocol = self.mail_account.protocol + "+ssl"
else:
protocol = self.mail_account.protocol
password = None
if self.mail_account.password:
password = parse.quote(self.mail_account.password)
return self.uri_template.format(
protocol=protocol,
user=parse.quote(self.mail_account.username),
password=password,
server_address=self.mail_account.server_address,
folder=parse.quote(self.folder_name)
)
@property
def uri_sani_pretty(self):
"""
Same as uri property but with user/pass excluded and things unquoted.
:return:
"""
return self.uri_template.format(
protocol=self.mail_account.protocol,
user="username",
password="password",
server_address=self.mail_account.server_address,
folder=self.folder_name
)
@property
def _protocol_info(self):
return parse.urlparse(self.uri)
@property
def _query_string(self):
return parse.parse_qs(self._protocol_info.query)
@property
def _domain(self):
return self._protocol_info.hostname
@property
def folder(self):
"""Returns the folder to fetch mail from."""
# return parse.quote(self.folder_name)
folder = self._query_string.get('folder', None)[0]
# see BUG: https://bugs.python.org/issue13940
# if there are special characters we should quote them ourselves:
# folder = '"{}"'.format(folder)
return folder
@property
def folder_pretty(self):
# Todo: implement field to store imap folder hierachy delimiter. For now, assume it's a "."
f = self.folder
return f.split('.')[-1]
@property
def name(self):
return '{}__{}'.format(self.mail_account.username, self.folder)
@property
def port(self):
"""Returns the port to use for fetching messages."""
return self._protocol_info.port
@property
def username(self):
"""Returns the username to use for fetching messages."""
return parse.unquote(self._protocol_info.username)
@property
def password(self):
"""Returns the password to use for fetching messages."""
return parse.unquote(self._protocol_info.password)
@property
def from_email(self):
return self.mail_account.email_address
@property
def location(self):
"""Returns the location (domain and path) of messages."""
return self._domain if self._domain else '' + self._protocol_info.path
@property
def type(self):
"""Returns the 'transport' name for this mailbox."""
scheme = self._protocol_info.scheme.lower()
if '+' in scheme:
return scheme.split('+')[0]
return scheme
@property
def use_ssl(self):
"""Returns whether or not this mailbox's connection uses SSL."""
return '+ssl' in self._protocol_info.scheme.lower()
@property
def use_tls(self):
"""Returns whether or not this mailbox's connection uses STARTTLS."""
return '+tls' in self._protocol_info.scheme.lower()
@property
def archive(self):
"""Returns (if specified) the folder to archive messages to."""
archive_folder = self._query_string.get('archive', None)
if not archive_folder:
return None
return archive_folder[0]
def get_connection(self):
"""
Decides on the transport required and initiates the connection.
:return:
"""
# Define method-level variable that connect_to_transport() can reference outside of its own scope.
# I have doubts that this will work when connect_to_transport() is executed in its own process.
transport = None
if not self.uri:
transport = None
elif self.type == 'imap':
transport = HarvestImapTransport(
self.location,
port=self.port if self.port else None,
ssl=self.use_ssl,
tls=self.use_tls,
archive=self.archive,
folder=self.folder
)
elif self.type == 'imap-exchange':
transport = HarvestImapExchangeTransport(
self.location,
port=self.port if self.port else None,
ssl=self.use_ssl,
tls=self.use_tls,
archive=self.archive,
folder=self.folder
)
elif self.type == 'gmail':
mail_account = self.mail_account
credentials = mail_account.gmailcredential.credential
transport = HarvestGmailTransport(
self.location,
port=self.port if self.port else None,
ssl=True,
archive=self.archive,
credentials=credentials,
folder=self.folder
)
elif self.type == 'pop3':
transport = HarvestPop3Transport(
self.location,
port=self.port if self.port else None,
ssl=self.use_ssl
| |
import time
import h5py
import hdbscan
import numpy as np
import torch
from sklearn.cluster import MeanShift
from pytorch3dunet.datasets.hdf5 import SliceBuilder
from pytorch3dunet.unet3d.utils import get_logger
from pytorch3dunet.unet3d.utils import unpad
logger = get_logger('UNet3DPredictor')
class _AbstractPredictor:
def __init__(self, model, loader, output_file, config, **kwargs):
self.model = model
self.loader = loader
self.output_file = output_file
self.config = config
self.predictor_config = kwargs
@staticmethod
def _volume_shape(dataset):
# TODO: support multiple internal datasets
raw = dataset.raws[0]
if raw.ndim == 3:
return raw.shape
else:
return raw.shape[1:]
@staticmethod
def _get_output_dataset_names(number_of_datasets, prefix='predictions'):
if number_of_datasets == 1:
return [prefix]
else:
return [f'{prefix}{i}' for i in range(number_of_datasets)]
def predict(self):
raise NotImplementedError
class StandardPredictor(_AbstractPredictor):
"""
Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
Predictions from the network are kept in memory. If the results from the network don't fit in into RAM
use `LazyPredictor` instead.
The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
of the output head from the network.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
def __init__(self, model, loader, output_file, config, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
def predict(self):
out_channels = self.config['model'].get('out_channels')
if out_channels is None:
out_channels = self.config['model']['dt_out_channels']
prediction_channel = self.config.get('prediction_channel', None)
if prediction_channel is not None:
logger.info(f"Using only channel '{prediction_channel}' from the network output")
device = self.config['device']
output_heads = self.config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(self.loader)} batches...')
# dimensionality of the the output predictions
volume_shape = self._volume_shape(self.loader.dataset)
if prediction_channel is None:
prediction_maps_shape = (out_channels,) + volume_shape
else:
# single channel prediction map
prediction_maps_shape = (1,) + volume_shape
logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')
avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True)
logger.info(f'Avoid block artifacts: {avoid_block_artifacts}')
# create destination H5 file
h5_output_file = h5py.File(self.output_file, 'w')
# allocate prediction and normalization arrays
logger.info('Allocating prediction and normalization arrays...')
prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape,
output_heads, h5_output_file)
# Sets the module in evaluation mode explicitly (necessary for batchnorm/dropout layers if present)
self.model.eval()
# Set the `testing=true` flag otherwise the final Softmax/Sigmoid won't be applied!
self.model.testing = True
# Run predictions on the entire input dataset
with torch.no_grad():
for batch, indices in self.loader:
# send batch to device
batch = batch.to(device)
# forward pass
predictions = self.model(batch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
predictions = [predictions]
# for each output head
for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps,
normalization_masks):
# convert to numpy array
prediction = prediction.cpu().numpy()
# for each batch sample
for pred, index in zip(prediction, indices):
# save patch index: (C,D,H,W)
if prediction_channel is None:
channel_slice = slice(0, out_channels)
else:
channel_slice = slice(0, 1)
index = (channel_slice,) + index
if prediction_channel is not None:
# use only the 'prediction_channel'
logger.info(f"Using channel '{prediction_channel}'...")
pred = np.expand_dims(pred[prediction_channel], axis=0)
logger.info(f'Saving predictions for slice:{index}...')
if avoid_block_artifacts:
# unpad in order to avoid block artifacts in the output probability maps
u_prediction, u_index = unpad(pred, index, volume_shape)
# accumulate probabilities into the output prediction array
prediction_map[u_index] += u_prediction
# count voxel visits for normalization
normalization_mask[u_index] += 1
else:
# accumulate probabilities into the output prediction array
prediction_map[index] += pred
# count voxel visits for normalization
normalization_mask[index] += 1
# save results to
self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset)
# close the output H5 file
h5_output_file.close()
def _allocate_prediction_maps(self, output_shape, output_heads, output_file):
# initialize the output prediction arrays
prediction_maps = [np.zeros(output_shape, dtype='float32') for _ in range(output_heads)]
# initialize normalization mask in order to average out probabilities of overlapping patches
normalization_masks = [np.zeros(output_shape, dtype='uint8') for _ in range(output_heads)]
return prediction_maps, normalization_masks
def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset):
# save probability maps
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks,
prediction_datasets):
prediction_map = prediction_map / normalization_mask
if dataset.mirror_padding:
pad_width = dataset.pad_width
logger.info(f'Dataset loaded with mirror padding, pad_width: {pad_width}. Cropping before saving...')
prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width]
logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...')
output_file.create_dataset(prediction_dataset, data=prediction_map, compression="gzip")
class LazyPredictor(StandardPredictor):
"""
Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
Predicted patches are directly saved into the H5 and they won't be stored in memory. Since this predictor
is slower than the `StandardPredictor` it should only be used when the predicted volume does not fit into RAM.
The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
of the output head from the network.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
def __init__(self, model, loader, output_file, config, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
def _allocate_prediction_maps(self, output_shape, output_heads, output_file):
# allocate datasets for probability maps
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
prediction_maps = [
output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True,
compression='gzip')
for dataset_name in prediction_datasets]
# allocate datasets for normalization masks
normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization')
normalization_masks = [
output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True,
compression='gzip')
for dataset_name in normalization_datasets]
return prediction_maps, normalization_masks
def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset):
if dataset.mirror_padding:
logger.warn(
f'Mirror padding unsupported in LazyPredictor. Output predictions will be padded with pad_width: {dataset.pad_width}')
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization')
# normalize the prediction_maps inside the H5
for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps,
normalization_masks,
prediction_datasets,
normalization_datasets):
# split the volume into 4 parts and load each into the memory separately
logger.info(f'Normalizing {prediction_dataset}...')
z, y, x = prediction_map.shape[1:]
# take slices which are 1/27 of the original volume
patch_shape = (z // 3, y // 3, x // 3)
for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape):
logger.info(f'Normalizing slice: {index}')
prediction_map[index] /= normalization_mask[index]
# make sure to reset the slice that has been visited already in order to avoid 'double' normalization
# when the patches overlap with each other
normalization_mask[index] = 1
logger.info(f'Deleting {normalization_dataset}...')
del output_file[normalization_dataset]
class EmbeddingsPredictor(_AbstractPredictor):
"""
Applies the embedding model on the given dataset and saves the result in the `output_file` in the H5 format.
The resulting volume is the segmentation itself (not the embedding vectors) obtained by clustering embeddings
with HDBSCAN or MeanShift algorithm patch by patch and then stitching the patches together.
"""
def __init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
self.iou_threshold = iou_threshold
self.noise_label = noise_label
self.clustering = clustering
assert clustering in ['hdbscan', 'meanshift'], 'Only HDBSCAN and MeanShift are supported'
logger.info(f'IoU threshold: {iou_threshold}')
self.clustering_name = clustering
self.clustering = self._get_clustering(clustering, kwargs)
def predict(self):
device = self.config['device']
output_heads = self.config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(self.loader)} patches...')
# dimensionality of the the output segmentation
volume_shape = self._volume_shape(self.loader.dataset)
logger.info(f'The shape of the output segmentation (DHW): {volume_shape}')
logger.info('Allocating segmentation array...')
# initialize the output prediction arrays
output_segmentations = [np.zeros(volume_shape, dtype='int32') for _ in range(output_heads)]
# initialize visited_voxels arrays
visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for _ in range(output_heads)]
# Sets the module in evaluation mode explicitly
self.model.eval()
self.model.testing = True
# Run predictions on the entire input dataset
with torch.no_grad():
for batch, indices in self.loader:
# logger.info(f'Predicting embeddings for slice:{index}')
# send batch to device
batch = batch.to(device)
# forward pass
embeddings = self.model(batch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
embeddings = [embeddings]
for prediction, output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations,
visited_voxels_arrays):
# convert to numpy array
prediction = prediction.cpu().numpy()
# iterate sequentially because of the current simple stitching that we're using
for pred, index in zip(prediction, indices):
# convert embeddings to segmentation with hdbscan clustering
segmentation = self._embeddings_to_segmentation(pred)
# stitch patches
self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array)
# save results
with h5py.File(self.output_file, 'w') as output_file:
prediction_datasets = self._get_output_dataset_names(output_heads,
prefix=f'segmentation/{self.clustering_name}')
for output_segmentation, | |
#!/usr/bin/env python3
""" HIASCDI Entities Module.
This module provides the functionality to create, retrieve
and update HIASCDI entities.
MIT License
Copyright (c) 2021 Asociación de Investigacion en Inteligencia Artificial
Para la Leucemia <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files(the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Contributors:
- <NAME>
"""
import json
import jsonpickle
import os
import sys
from mgoquery import Parser
from modules.subscriptions import subscriptions
class entities():
""" HIASCDI Entities Module.
This module provides the functionality to create, retrieve
and update HIASCDI entities.
"""
def __init__(self, helpers, mongodb, broker):
""" Initializes the class. """
self.helpers = helpers
self.program = "HIASCDI Entities Module"
self.mongodb = mongodb
self.broker = broker
self.subscriptions = subscriptions(
self.helpers, self.mongodb, self.broker)
self.helpers.logger.info(
self.program + " initialization complete.")
def get_entities(self, arguments, accepted=[]):
""" Gets entity data from the MongoDB.
You can access this endpoint by naviating your
browser to https://YourServer/hiascdi/v1/entities If
you are not logged in to the HIAS network you will
be shown an authentication pop up where you should
provide your HIAS network user and password.
References:
FIWARE-NGSI v2 Specification
https://fiware.github.io/specifications/ngsiv2/stable/
Reference
- Entities
- List entities
"""
params = []
cparams = []
sort = []
query = {}
headers = {}
keyValues_opt = False
count_opt = False
values_opt = False
unique_opt = False
options = arguments.get('options')
# Processes the options parameter
options = options if options is not None else None
if options is not None:
options = options.split(",")
for option in options:
keyValues_opt = True if option == "keyValues" else keyValues_opt
values_opt = True if option == "values" else values_opt
unique_opt = True if option == "unique" else unique_opt
count_opt = True if option == "count" else count_opt
# Removes the MongoDB ID
fields = {
'_id': False
}
if arguments.get('type') is not None:
# Sets a type query
eor = []
types = arguments.get('type').split(",")
if len(types) == 1:
query.update({"type":
{'$in': [types[0]]}
})
else:
for eid in types:
eor.append({"type":
{'$in': [eid]}
})
params.append({"$or": eor})
elif arguments.get('typePattern') is not None:
query.update({"type":
{'$regex': arguments.get('typePattern')}
})
if arguments.get('id') is not None:
# Sets a id query
eor = []
ids = arguments.get('id').split(",")
if len(ids) == 1:
query.update({"id":
{'$in': [ids[0]]}
})
else:
for eid in ids:
eor.append({"id":
{'$in': [eid]}
})
params.append({"$or": eor})
elif arguments.get('idPattern') is not None:
query.update({"id":
{'$regex': arguments.get('idPattern')}
})
if arguments.get('category') is not None:
# Sets a category query
eor = []
categories = arguments.get('category').split(",")
if len(categories) == 1:
query.update({"category.value":
{'$in': [categories[0]]}
})
else:
for category in categories:
eor.append({"category.value":
{'$in': [category]}
})
params.append({"$or": eor})
attribs = []
if arguments.get('attrs') is not None:
# Sets a attrs query
attribs = arguments.get('attrs').split(",")
if '*' in attribs:
# Removes builtin attributes
if 'dateCreated' not in attribs:
fields.update({'dateCreated': False})
if 'dateModified' not in attribs:
fields.update({'dateModified': False})
if 'dateExpired' not in attribs:
fields.update({'dateExpired': False})
else:
for attr in attribs:
fields.update({attr: True})
mattribs = []
if arguments.get('metadata') is not None:
# Sets a metadata query
mattribs = arguments.get('metadata').split(",")
if '*' in mattribs:
# Removes builtin attributes
if 'dateCreated' not in mattribs:
fields.update({'dateCreated': False})
if 'dateModified' not in mattribs:
fields.update({'dateModified': False})
if 'dateExpired' not in mattribs:
fields.update({'dateExpired': False})
else:
for attr in mattribs:
fields.update({attr: True})
if arguments.get('q') is not None:
# Sets a q query
qs = arguments.get('q').split(";")
for q in qs:
if "||" in q:
qor = q.split("||")
ors = {}
for qori in qor:
if "==" in qori:
qp = qori.split("==")
ors.update({qp[0]:
{'$in': [self.broker.cast(qp[1])]}
})
elif ":" in qori:
qp = qori.split(":")
ors.update({qp[0]:
{'$in': [self.broker.cast(qp[1])]}
})
elif "!=" in qori:
qp = qori.split("!=")
ors.update({qp[0]:
{'$ne': self.broker.cast(qp[1])}
})
elif ">=" in qori:
qp = qori.split(">=")
ors.update({qp[0]:
{'$gte': self.broker.cast(qp[1])}
})
elif "<=" in qori:
qp = qori.split("<=")
ors.update({qp[0]:
{'$lte': self.broker.cast(qp[1])}
})
elif "<" in qori:
qp = qori.split("<")
ors.update({qp[0]:
{'$lt': self.broker.cast(qp[1])}
})
elif ">" in qori:
qp = qori.split(">")
ors.update({qp[0]:
{'$gt': self.broker.cast(qp[1])}
})
query.update({'$or': ors })
elif "==" in q:
qp = q.split("==")
query.update({qp[0]:
{'$in': [self.broker.cast(qp[1])]}
})
elif ":" in q:
qp = q.split(":")
query.update({qp[0]:
{'$in': [self.broker.cast(qp[1])]}
})
elif "!=" in q:
qp = q.split("!=")
query.update({qp[0]:
{'$ne': self.broker.cast(qp[1])}
})
elif ">=" in q:
qp = q.split(">=")
query.update({qp[0]:
{'$gte': self.broker.cast(qp[1])}
})
elif "<=" in q:
qp = q.split("<=")
query.update({qp[0]:
{'$lte': self.broker.cast(qp[1])}
})
elif "<" in q:
qp = q.split("<")
query.update({qp[0]:
{'$lt': self.broker.cast(qp[1])}
})
elif ">" in q:
qp = q.split(">")
query.update({qp[0]:
{'$gt': self.broker.cast(qp[1])}
})
elif arguments.get('mq') is not None:
# Sets an mq query
qs = arguments.get('mq').split(";")
for q in qs:
if "==" in q:
qp = q.split("==")
query.update({qp[0]:
{'$in': [self.broker.cast(qp[1])]}
})
elif ":" in q:
qp = q.split(":")
query.update({qp[0]:
{'$in': [self.broker.cast(qp[1])]}
})
elif "!=" in q:
qp = q.split("!=")
query.update({qp[0]:
{'$ne': self.broker.cast(qp[1])}
})
elif ">=" in q:
qp = q.split(">=")
query.update({qp[0]:
{'$gte': self.broker.cast(qp[1])}
})
elif "<=" in q:
qp = q.split("<=")
query.update({qp[0]:
{'$lte': self.broker.cast(qp[1])}
})
elif "<" in q:
qp = q.split("<")
query.update({qp[0]:
{'$lt': self.broker.cast(qp[1])}
})
elif ">" in q:
qp = q.split(">")
query.update({qp[0]:
{'$gt': self.broker.cast(qp[1])}
})
# Sets a geospatial query
if arguments.get('georel') is not None and \
+ arguments.get('geometry') is not None and \
+ arguments.get('coords') is not None:
georels = arguments.get('georel').split(";")
georelslen = len(georels)
coords = arguments.get('coords').split(";")
coordslen = len(coords)
geometry = arguments.get('geometry').capitalize()
geotype = georels[0]
if geotype == 'near':
# Near geospatial query
if geometry != "Point":
return self.broker.respond(
400, self.helpers.confs["errorMessages"]["400b"],
{}, False, accepted)
if georelslen < 2:
return self.broker.respond(
400, self.helpers.confs["errorMessages"]["400b"],
{}, False, accepted)
if coordslen > 1:
return self.broker.respond(
400, self.helpers.confs["errorMessages"]["400b"],
{}, False, accepted)
data = {"location.value": {
"$near": {
"$geometry": {
"type": geometry,
"coordinates": [
float(p) for p in coords[0].split(",")]
}
}
}}
modifiers = georels[1:]
for modifier in modifiers:
msplit = modifier.split(":")
data["location.value"]["$near"].update(
{"$"+msplit[0]: int(msplit[1])})
query.update(data)
elif geotype == 'intersects':
# Intersects geospatial query
if geometry != "Polygone":
return self.broker.respond(
400, self.helpers.confs["errorMessages"]["400b"],
{}, False, accepted)
if coordslen > 4:
return self.broker.respond(
400, self.helpers.confs["errorMessages"]["400b"],
{}, False, accepted)
polygone = []
for poly in coords:
polygone.append([
float(p) for p in poly.split(",")])
query.update({"location.value": {
"$geoIntersects": {
"$geometry": {
"type": geometry,
"coordinates": polygone
}
}
}})
elif geotype == 'coveredBy':
# coveredBy geospatial query
if geometry != "Polygone":
return self.broker.respond(
400, self.helpers.confs["errorMessages"]["400b"],
{}, False, accepted)
if coordslen > 4:
return self.broker.respond(
400, self.helpers.confs["errorMessages"]["400b"],
{}, False, accepted)
polygone = []
for poly in coords:
polygone.append([
float(p) for p in poly.split(",")])
query.update({"location.value": {
"$geoWithin": {
"$geometry": {
"type": geometry,
"coordinates": polygone
}
}
}})
elif geotype == 'equals':
# Equals geospatial query
eor = []
coords = arguments.get('coords').split(";")
for coord in coords:
coord = coord.split(",")
eor.append({"location.value.coordinates": [
float(coord[0]),
float(coord[1])]})
params.append({"$or": eor})
elif geotype == 'disjoint':
# Disjoint geospatial query
return self.broker.respond(
501, self.helpers.confs["errorMessages"][str(501)],
{}, False, accepted)
else:
# Non-supported geospatial query
return self.broker.respond(
400, self.helpers.confs["errorMessages"]["400b"],
{}, False, accepted)
# TO REMOVE
if arguments.get('values') is not None:
valuesArr = arguments.get('values').split(",")
for value in valuesArr:
pair = value.split("|")
query.update(
{pair[0]: int(pair[1]) if pair[1].isdigit() else pair[1]})
if len(params):
query.update({"$and": params})
# Sets the query ordering
if arguments.get('orderBy') is not None:
orders = arguments.get('orderBy').split(",")
for order in orders:
if order[0] is "!":
orderBy = -1
order = order[1:]
else:
orderBy = 1
sort.append((order, orderBy))
# Prepares the offset
if arguments.get('offset') is None:
offset = False
else:
offset = int(arguments.get('offset'))
# Prepares the query limit
| |
#!/usr/bin/env python
###############################################################################
import os, sys
from copy import deepcopy
#toz - syntax error r19 - missing opcode sbc
# cant add opcode due to ambiguous parse (sbc/sbci)
###############################################################################
os.system("avr-g++ -O3 test.cpp -S -o test.avr")
os.system("avr-g++ -g -O3 test.cpp -o test.o")
os.system("avr-objdump -t -S -d test.o > test.lst")
###############################################################################
# LEXER
###############################################################################
import ply.lex as lex
tokens = (
'IDENTIFIER',
'DIRECTIVE',
'HEXNUMBER',
'NUMBER',
"COMMENT",
"OPCODE",
'STRING',
'NEWLINE'
)
literals = "+-*/()=:;,.@"
t_IDENTIFIER = r'[_.a-zA-Z][_.a-zA-Z0-9]*'
t_DIRECTIVE = r'\.(stabn|stabs|stabd|stabn|file|text|global|type|section|startup|data|size|word|ident)'
t_COMMENT = r'/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/'
t_STRING = r'\"(.+?)?\"'
t_OPCODE = r'(ldi|lds|st|rjmp|lpm|add|adc|adiw|cpi|cpc|brne|sbci|subi|ret)'
t_HEXNUMBER = r'0[xX][\da-f]+'
t_NUMBER = r'[\d]+'
# Define a rule so we can track line numbers
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += len(t.value)
t.type = "NEWLINE"
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex() # Build the lexer
###############################################################################
# reassembler contextual data
###############################################################################
_regmap = {
"r26": "XL",
"r27": "XH",
"r28": "YL",
"r29": "YH",
"r30": "ZL",
"r31": "ZH"
}
class context:
def __init__(self):
self._identifiers = {}
self._labels = {}
self._avr_pc = 0
self._mos_pc = 4
self._PCMAP = {}
def mapitem(self,item,rev=False,comment=True):
if item in _regmap:
item = _regmap[item]
elif item in self._labels:
mapped = self._labels[item]
if mapped in main_ctx._PCMAP:
mapped = main_ctx._PCMAP[mapped]
if comment:
if rev:
item = "%s /* $%04x */" % (item,mapped)
else:
item = "$%04x /* %s */" % (self._labels[item],item)
else:
item = "%d" % (mapped)
elif item in self._identifiers:
mapped = self._identifiers[item]
if mapped in main_ctx._PCMAP:
mapped = main_ctx._PCMAP[mapped]
if comment:
if rev:
item = "%s /* %s */" % (item,mapped)
else:
item = "%s /* %s */" % (mapped,item)
else:
item = "%d" % (mapped)
elif item == ".":
if comment:
if rev:
item = ". /* $%04x */" % (self._avr_pc)
else:
item = "$%04x /* . */" % (self._avr_pc)
else:
item = "%d" % (self._avr_pc)
return item
main_ctx = context()
_output_items = []
###############################################################################
def genabsaddr(addr):
addrtype = type(addr)
#print addr
if addrtype == type(str):
if addr=='X':
addr = 26
elif addr=='Y':
addr = 28
elif addr=='Z':
addr = 30
elif addr[0]=='r':
addr = int(addr[1:])
elif addr[0]=='$':
addr = "0x"+addr[1:]
addr = int(addr)
elif addr in main_ctx._identifiers:
name = addr
addr = int(main_ctx._identifiers[addr])
#print "IDEN: %s:$%04x" % (name,addr)
if addr in main_ctx._PCMAP:
addr = main_ctx._PCMAP[addr]
elif addr in main_ctx._labels:
name = addr
addr = int(main_ctx._labels[addr])
#print "LABL: %s:$%04x" % (name,addr)
if addr in main_ctx._PCMAP:
addr = main_ctx._PCMAP[addr]
else:
addr = int(addr)
elif addrtype == type(int):
addr = addr
if addr<0:
addr = addr&0xff;
elif isinstance(addr,expr_node):
addr = addr.eval()
else:
print addr, addrtype
assert(False)
return addr
###############################################################################
class expr_node:
def __init__(self,a,op,b):
self._a = a
self._op = op
self._b = b
def __str__(self):
if self._op:
a = self._a #main_ctx.mapitem(self._a,comment=False)
b = self._b #main_ctx.mapitem(self._b,comment=False)
return "%s %s %s" % (a,self._op,b)
else:
a = main_ctx.mapitem(self._a)
return "expr_node( %s )" % (a)
def eval(self):
a = main_ctx.mapitem(self._a,comment=False)
b = main_ctx.mapitem(self._b,comment=False)
op = self._op
rval = None
if op == "+":
rval = int(a)+int(b)
return rval
###############################################################################
# PARSER
###############################################################################
precedence = (
('left', '+', '-'),
('left', '*', '/'),
#('right', 'UMINUS'),
)
#######################################
diritems = []
def p_directiveitem(p):
'''directiveitem : STRING
| IDENTIFIER
| DIRECTIVE
| ","
| "@"
| "-"
| NUMBER
| HEXNUMBER'''
diritems.append(p[1])
def p_directiveitems(p):
'''directiveitems : directiveitem directiveitems
directiveitems : directiveitem'''
#######################################
opcitems = []
def p_opcodeitem(p):
'''opcodeitem : expression
| ","'''
opcitems.append(p[1])
def p_opcodeitems(p):
'''opcodeitems : opcodeitem opcodeitems
opcodeitems : opcodeitem'''
###############################################################################
def p_expression_binop(p):
'''expression : expression '+' expression
| expression '-' expression
| expression '*' expression
| expression '/' expression'''
if p[2] == '+':
p[0] = expr_node(p[1],"+",p[3])
elif p[2] == '-':
p[0] = expr_node(p[1],"-",p[3])
elif p[2] == '*':
p[0] = expr_node(p[1],"*",p[3])
elif p[2] == '/':
p[0] = expr_node(p[1],"/",p[3])
###################
def p_expression_uminus(p):
"expression : '-' expression"
p[0] = "-"+p[2]
###################
def p_expression_group(p):
"expression : '(' expression ')'"
p[0] = p[2]
###################
def p_expression_number(p):
'''expression : NUMBER
| HEXNUMBER'''
p[0] = p[1]
###################
def p_expression_name(p):
"expression : IDENTIFIER"
p[0] = p[1]
###########################################################
# opcode
###########################################################
def gen_6502_opcode_LDI(item):
ctx = item["ctx"]
opcitems = item["opcitems"]
pc = item["PC"]
rval = ""
dest = opcitems[0]
if len(opcitems)==4: # reg , mod immv
mod = opcitems[2]
immv = int(opcitems[3])
regno = int(dest[1:])
if mod=="lo8":
rval += "lda #<$%04x\n" % (immv)
rval += "sta $%02x\n" % (regno)
main_ctx._mos_pc += 4
elif len(opcitems)==3: # reg , immv
imm = int(opcitems[2])
rval += "lda #$%02x\n" % (imm)
regno = int(dest[1:])
rval += "sta $%02x\n" % (regno)
main_ctx._mos_pc += 4
return rval
#############################
def gen_6502_opcode_LDS(item):
ctx = item["ctx"]
opcitems = item["opcitems"]
addr = opcitems[2] #ctx.mapitem(opcitems[2],comment=False)
#addr = genabsaddr(addr)
if addr<256:
rval = "lda %s\n" % (addr)
main_ctx._mos_pc += 2
else:
rval = "lda %s\n" % (addr)
main_ctx._mos_pc += 3
reg = genabsaddr(opcitems[0])
rval += "sta $%02x" % (reg)
main_ctx._mos_pc += 2
return rval;
#############################
def gen_6502_opcode_ST(item):
ctx = item["ctx"]
opcitems = item["opcitems"]
d = opcitems[0]
s = opcitems[2]
dest = genabsaddr(d)
src = genabsaddr(s)
rval = "lda $%02x\n" % (src)
rval += "ldy #$00\n"
rval += "sta ($%02x),y" % (dest)
main_ctx._mos_pc += 4
return rval
#############################
def gen_6502_opcode_ADD(item):
ctx = item["ctx"]
opcitems = item["opcitems"]
dest = genabsaddr(opcitems[0])
src = genabsaddr(opcitems[2])
rval = "clc\n"
rval += "lda $%02x\n" % (src)
rval += "adc $%02x\n" % (dest)
rval += "sta $%02x" % (dest)
main_ctx._mos_pc += 7
return rval
#############################
def gen_6502_opcode_ADC(item):
ctx = item["ctx"]
opcitems = item["opcitems"]
dest = genabsaddr(opcitems[0])
src = genabsaddr(opcitems[2])
rval = "lda $%02x\n" % (src)
rval += "adc $%02x\n" % (dest)
rval += "sta $%02x" % (dest)
main_ctx._mos_pc += 6
return rval
#############################
def gen_6502_opcode_SBC(item):
assert(False)
ctx = item["ctx"]
opcitems = item["opcitems"]
dest = genabsaddr(opcitems[0])
src = genabsaddr(opcitems[2])
rval = "lda $%02x\n" % (src)
rval += "sbc $%02x\n" % (dest)
rval += "sta $%02x" % (dest)
main_ctx._mos_pc += 6
return rval
#############################
def gen_6502_opcode_SBCI(item):
ctx = item["ctx"]
opcitems = item["opcitems"]
reg = genabsaddr(opcitems[0])
imm = opcitems[2]
if imm == "lo8":
imm = int(opcitems[3])
imm = imm&0xff;
else:
imm = int(imm)
if imm<0:
imm = imm&0xff;
rval = "lda $%02x\n" % (reg)
rval += "sbc #$%02x\n" % (imm)
rval += "sta $%02x" % (reg)
main_ctx._mos_pc += 6
return rval
#############################
def gen_6502_opcode_SUBI(item):
ctx = item["ctx"]
opcitems = item["opcitems"]
reg = genabsaddr(opcitems[0])
imm = opcitems[2]
if imm == "lo8":
imm = int(opcitems[3])
imm = imm&0xff;
else:
imm = int(imm)
if imm<0:
imm = imm&0xff;
print "reg<%s> imm<%s>\n" % (reg,imm)
rval = "lda $%02x\n" % (reg)
rval += "sec\n"
rval += "sbc #$%02x\n" % (imm)
rval += "sta $%02x" % (reg)
main_ctx._mos_pc += 7
return rval
#############################
def gen_6502_opcode_ADIW(item):
ctx = item["ctx"]
opcitems = item["opcitems"]
dest = genabsaddr(opcitems[0])
src = genabsaddr(opcitems[2])
rval = "clc\n"
rval += "lda #<$%04x\n" % (src)
rval += "adc $%02x\n" % (dest)
rval += "sta $%02x\n" % (dest)
rval += "lda #>$%04x\n" % (src)
rval += "adc $%02x\n" % (dest+1)
rval += "sta $%02x\n" % (dest+1)
main_ctx._mos_pc += 13
return rval
#############################
def gen_6502_opcode_CPI(item):
ctx = item["ctx"]
opcitems = item["opcitems"]
#reg = genabsaddr(opcitems[0])
rval = ""
dest = opcitems[0]
if len(opcitems)==4: # reg , mod immv
mod = opcitems[2]
immv = int(opcitems[3])
regno = int(dest[1:])
if mod=="lo8":
rval += "lda #<$%04x\n" % (immv)
#rval += "sta $%02x\n" % (regno)
rval += "cmp #$%02x\n" % (regno)
elif len(opcitems)==3: # reg , immv
imm = int(opcitems[2])
rval += "lda #$%02x\n" % (imm)
regno = int(dest[1:])
#rval += "sta $%02x\n" % (regno)
rval += "cmp #$%02x\n" % (regno)
#imm = genabsaddr(opcitems[2])
#rval = "lda $%02x\n" % (reg)
#rval += "cmp #$%02x\n" % (imm)
main_ctx._mos_pc += 4
return rval
#############################
def gen_6502_opcode_CPC(item):
ctx = item["ctx"]
opcitems = item["opcitems"]
reg = genabsaddr(opcitems[0])
imm = genabsaddr(opcitems[2])
rval = "lda $%02x\n" % (reg)
rval += "sbc $%02x\n" % (imm)
main_ctx._mos_pc += 4
return rval
#############################
def gen_6502_opcode_BRNE(item):
ctx = item["ctx"]
opcitems = item["opcitems"]
absaddr = genabsaddr(opcitems[0])
mapped = absaddr
if mapped in main_ctx._PCMAP:
mapped = main_ctx._PCMAP[absaddr]
curpc = main_ctx._mos_pc
delta = mapped-curpc
rval = "bne $%04x ; delta=%d\n" % (mapped,delta)
main_ctx._mos_pc += 2
return rval
#############################
def gen_6502_opcode_RET(item):
main_ctx._mos_pc += 1
return "rts ; RET"
#############################
def gen_6502_opcode_WORD(item):
ctx = item["ctx"]
dump = item["dump"]
comment = dump(item)
opcitems = item["opcitems"][0]
wordval = int(opcitems[1])
if opcitems[0]=='-':
wordval = 65536-wordval
rval = ".WORD $%04x; %s\n" % (wordval,comment)
main_ctx._mos_pc += 2
return rval
#############################
def gen_6502_opcode_LABEL(item):
ctx = item["ctx"]
name = item["name"]
rval = "%s: ; LABEL" % (name)
return rval
#############################
def gen_6502_opcode_ASSIGN(item):
ctx = item["ctx"]
name = item["name"]
| |
= {}
assert pmnc.versioned.foo(1, 2, biz = "baz", __module_properties = module_props) == \
((1, 2), { "biz": "baz" })
assert module_props == { "version": 1 }
module_props["version"] = "should be a copy"
write_module("versioned.py",
"__all__ = ['foo']\n"
"def foo(*args, **kwargs):\n"
" return args, kwargs\n"
"# EOF")
assert pmnc.versioned.foo(1, 2, biz = "baz", __module_properties = module_props) == \
((1, 2), { "biz": "baz" })
assert module_props == { "version": 2 }
write_module("versioned.py",
"*broken*\n"
"# EOF")
assert pmnc.versioned.foo(__module_properties = module_props) == ((), {})
assert module_props == { "version": 2 }
print("ok")
###################################
print("attribute lookup and typecheck: ", end = "")
fake_request(30.0)
# static lookup
write_module(os_path.join("..", ".shared", "foo.py"),
"__all__ = ['foo', 'not_there']\n"
"from typecheck import either\n"
"def foo(arg: either(str, int)) -> int:\n"
" return arg\n"
"def bar(arg: either(str, int)) -> str:\n"
" return arg\n"
"# EOF")
assert pmnc.foo.foo(1) == 1
with expected(InputParameterError):
pmnc.foo.foo(1.0)
with expected(ReturnValueError):
pmnc.foo.foo("foo")
with expected(InvalidMethodAccessError("attribute bar is not "
"declared in __all__ list of module foo")):
pmnc.foo.bar
with expected(AttributeError, ".*not_there.*"):
pmnc.foo.not_there
# dynamic lookup
write_module(os_path.join("..", ".shared", "foo.py"),
"__all__ = ['__get_module_attr__', 'foo']\n"
"from typecheck import either\n"
"def foo(arg: either(str, int), *, __source_module_name) -> int:\n"
" assert __source_module_name == '__main__'\n"
" return arg\n"
"def bar(arg: either(str, int), *, __source_module_name) -> str:\n"
" assert __source_module_name == '__main__'\n"
" return arg\n"
"def __get_module_attr__(name, *, __source_module_name):\n"
" assert __source_module_name == '__main__'\n"
" if name == 'bar':\n"
" return bar\n"
" raise AttributeError(name)\n"
"# EOF")
assert pmnc.foo.foo(1) == 1
with expected(InputParameterError):
pmnc.foo.foo(1.0)
with expected(ReturnValueError):
pmnc.foo.foo("foo")
assert pmnc.foo.bar("bar") == "bar"
with expected(InputParameterError):
pmnc.foo.bar(1.0)
with expected(ReturnValueError):
pmnc.foo.bar(1)
with expected(AttributeError("not_there")):
pmnc.foo.not_there
# complex dynamic lookup
write_module(os_path.join("..", ".shared", "foo.py"),
"__all__ = ['__get_module_attr__']\n"
"def wrap(name, *, __source_module_name):\n"
" def wrapped():\n"
" return name, __source_module_name\n"
" return wrapped\n"
"def __get_module_attr__(name, *, __source_module_name):\n"
" assert __source_module_name == 'bar'\n"
" if name == 'wrap': return wrap\n"
"# EOF")
write_module(os_path.join("..", ".shared", "bar.py"),
"__all__ = ['__get_module_attr__']\n"
"def __get_module_attr__(name, *, __source_module_name):\n"
" assert __source_module_name == '__main__'\n"
" return pmnc.foo.wrap(name)\n"
"# EOF")
assert pmnc.bar.foo() == ("foo", "bar")
# dynamic lookup vs. conventional lookup
write_module(os_path.join("..", ".shared", "biz.py"),
"__all__ = ['__get_module_attr__', 'have']\n"
"def __get_module_attr__(name, *, __source_module_name):\n"
" if name == 'provide':\n"
" return provide\n"
" else:\n"
" return lambda: 'this i dont have'\n"
"def have():\n"
" return 'to have'\n"
"def have_not():\n"
" return 'not to have'\n"
"def provide():\n"
" return 'this i will provide'\n"
"# EOF")
assert pmnc.biz.have() == "to have";
assert pmnc.biz.not_there() == "this i dont have";
assert pmnc.biz.have_not() == "this i dont have";
assert pmnc.biz.provide() == "this i will provide";
print("ok")
###################################
print("intermediate call attributes: ", end = "")
fake_request(30.0)
write_module(os_path.join("..", ".shared", "attrs.py"),
"__all__ = ['foo', 'baz']\n"
"def foo(*, __call_attributes):\n"
" return __call_attributes\n"
"def baz():\n"
" pass\n"
"# EOF")
assert pmnc.attrs.foo() == []
assert pmnc.attrs.foo.bar() == [ "bar" ]
assert pmnc.attrs.foo.bar.biz() == [ "bar", "biz" ]
assert pmnc.attrs.baz() is None
with expected(InvalidMethodAccessError("method baz does not support intermediate call attributes")):
pmnc.attrs.baz.foo
print("ok")
###################################
print("international characters: ", end = "")
fake_request(30.0)
rus = "\u0410\u0411\u0412\u0413\u0414\u0415\u0401\u0416\u0417\u0418\u0419" \
"\u041a\u041b\u041c\u041d\u041e\u041f\u0420\u0421\u0422\u0423\u0424" \
"\u0425\u0426\u0427\u0428\u0429\u042c\u042b\u042a\u042d\u042e\u042f" \
"\u0430\u0431\u0432\u0433\u0434\u0435\u0451\u0436\u0437\u0438\u0439" \
"\u043a\u043b\u043c\u043d\u043e\u043f\u0440\u0441\u0442\u0443\u0444" \
"\u0445\u0446\u0447\u0448\u0449\u044c\u044b\u044a\u044d\u044e\u044f"
write_module(os_path.join("..", ".shared", "rus.py"),
"#!/usr/bin/env python\n"
"#-*- coding: cp866 -*-\n"
"__all__ = ['get_rus']\n"
"rus = '" + rus + "'\n"
"def get_rus():\n"
" return rus\n"
"# EOF", "cp866")
assert pmnc.rus.get_rus() == rus
print("ok")
###################################
print("compiled module: ", end = "")
fake_request(30.0)
py_name = os_path.join(cage_dir, "pyc.py")
pyc_name = os_path.join(cage_dir, "pyc.pyc")
write_module(py_name,
"__all__ = ['get_name']\n"
"def get_name():\n"
" return __name__\n"
"# (NO LONGER NEEDED) EOF")
Popen([ python, "-c", "import pyc" ], cwd = cage_dir).wait()
try:
from imp import get_tag
except ImportError:
pass
else:
pycache_name = os_path.join(cage_dir, "__pycache__", "pyc.{0:s}.pyc".format(get_tag()))
assert os_path.isfile(pycache_name)
rename(pycache_name, pyc_name)
assert not os_path.isfile(pycache_name)
remove(py_name)
assert not os_path.isfile(py_name)
assert os_path.isfile(pyc_name)
assert pmnc.pyc.get_name() == "pyc"
print("ok")
###################################
print("module reload timeout: ", end = "")
fake_request(0.1)
sleep(0.5)
write_module(os_path.join("..", ".shared", "reload_timeout.py"),
"__all__ = ['foo']\n"
"def foo():\n"
" return 1\n"
"# EOF")
with expected(ModuleReloadTimedOutError("request deadline waiting for exclusive access to module reload_timeout")):
pmnc.reload_timeout.foo()
fake_request(3.0)
assert pmnc.reload_timeout.foo() == 1
print("ok")
###################################
print("__all__ declaration: ", end = "")
fake_request(30.0)
write_module(os_path.join("..", ".shared", "all_test.py"),
"def inaccessible(): pass\n"
"class Inaccessible(): pass\n"
"# EOF")
with expected(InvalidMethodAccessError("attribute inaccessible is not declared "
"in __all__ list of module all_test")):
pmnc.all_test.inaccessible()
with expected(InvalidMethodAccessError("attribute Inaccessible is not declared "
"in __all__ list of module all_test")):
pmnc.all_test.Inaccessible()
sleep(1.5)
write_module(os_path.join("..", ".shared", "all_test.py"),
"__all__ = ['foo']\n"
"def inaccessible2(): pass\n"
"class Inaccessible2(): pass\n"
"# EOF")
with expected(InvalidMethodAccessError("attribute inaccessible2 is not declared "
"in __all__ list of module all_test")):
pmnc.all_test.inaccessible2()
with expected(InvalidMethodAccessError("attribute Inaccessible2 is not declared "
"in __all__ list of module all_test")):
pmnc.all_test.Inaccessible2()
sleep(1.5)
write_module(os_path.join("..", ".shared", "all_test.py"),
"__all__ = ['accessible', 'Accessible']\n"
"def accessible(): pass\n"
"class Accessible(): pass\n"
"# EOF")
pmnc.all_test.accessible()
pmnc.all_test.Accessible()
sleep(1.5)
write_module(os_path.join("..", ".shared", "all_test.py"),
"__all__ = [1]\n"
"# EOF")
with expected(AssertionError("__all__ attribute must be a list of strings")):
pmnc.all_test
print("ok")
###################################
print("one module loads another and vice versa: ", end = "")
fake_request(30.0)
write_module(os_path.join("..", ".shared", "re_reload_1.py"),
"__all__ = ['f', 'g']\n"
"result = None\n"
"def f():\n"
" global result\n"
" result = 'ok'\n"
" return pmnc.re_reload_2.h()\n"
"def g():\n"
" return result\n"
"# EOF")
write_module(os_path.join("..", ".shared", "re_reload_2.py"),
"__all__ = ['h']\n"
"def h():\n"
" return pmnc.re_reload_1.g()\n"
"# EOF")
assert pmnc.re_reload_1.f() == "ok"
print("ok")
###################################
print("remote cage calls: ", end = "")
fake_request(30.0)
write_module("remote_call.py",
"__all__ = ['execute_sync', 'execute_async', 'test_sync',\n"
" 'test_async_1', 'test_async_2', 'test_async_3', 'test_async_4']\n"
"def execute_sync(cage, module, method, args, kwargs, **options):\n"
" return 'sync', cage, module, method, args, kwargs, options\n"
"def execute_async(cage, module, method, args, kwargs, **options):\n"
" return 'async', cage, module, method, args, kwargs, options\n"
"def test_sync(*args, **kwargs):\n"
" return pmnc('sync_cage', opt_1 = 'aaa').foo.bar(*args, **kwargs)\n"
"def test_async_1(*args, **kwargs):\n"
" return pmnc('async_cage_1:retry', opt_2 = 'bbb').biz.baz(*args, **kwargs)\n"
"def test_async_2(*args, **kwargs):\n"
" return pmnc('async_cage_2', queue = 'queue', opt_3 = 'ccc').tic.tac(*args, **kwargs)\n"
"def test_async_3(*args, **kwargs):\n"
" return pmnc(':retry', opt_4 = 'ddd').zip.zap(*args, **kwargs)\n"
"def test_async_4(*args, **kwargs):\n"
" return pmnc(queue = 'queue', opt_5 = 'eee').abc.cba(*args, **kwargs)\n"
"# EOF")
assert pmnc.remote_call.test_sync(1, "2", foo = "bar") == \
("sync", "sync_cage", "foo", "bar", (1, "2"), {"foo": "bar"}, {"opt_1": "aaa"})
assert pmnc.remote_call.test_async_1(3, "4", biz = "baz") == \
("async", "async_cage_1", "biz", "baz", (3, "4"), {"biz": "baz"}, {"opt_2": "bbb"})
assert pmnc.remote_call.test_async_2(5, "6", ppp = "vvv") == \
("async", "async_cage_2", "tic", "tac", (5, "6"), {"ppp": "vvv"}, {"queue": "queue", "opt_3": "ccc"})
assert pmnc.remote_call.test_async_3(7, "8", sss = "ttt") == \
("async", "cage", "zip", "zap", (7, "8"), {"sss": "ttt"}, {"opt_4": "ddd"})
assert pmnc.remote_call.test_async_4(9, "10", ggg = "hhh") == \
("async", "cage", "abc", "cba", (9, "10"), {"ggg": "hhh"}, {"queue": "queue", "opt_5": "eee"})
write_module("reverse_call.py",
"__all__ = ['execute_reverse', 'test_reverse']\n"
"def execute_reverse(cage, module, method, args, kwargs, **options):\n"
" return 'reverse', cage, module, method, args, kwargs, options\n"
"def test_reverse(*args, **kwargs):\n"
" return pmnc('reverse_cage:reverse', opt_6 = 'fff').ping.pong(*args, **kwargs)\n"
"# EOF")
assert pmnc.reverse_call.test_reverse(11, "12", qqq = "rrr") == \
("reverse", "reverse_cage", "ping", "pong", (11, "12"), {"qqq": "rrr"}, {"opt_6": "fff"})
print("ok")
###################################
print("sys modules can't be reloaded: ", end = "")
fake_request(30.0)
write_module("time.py",
"# EOF")
with expected(ModuleAlreadyImportedError):
pmnc.time
print("ok")
###################################
print("modules can be marked as not reloadable: ", end = "")
fake_request(30.0)
write_module("stateful.py",
"__reloadable__ = False\n"
"__all__ = ['get_version']\n"
"def get_version():\n"
" return 1\n"
"# EOF")
assert pmnc.stateful.get_version() == 1
write_module("stateful.py",
"__all__ = ['get_version']\n"
"def get_version():\n"
" return 2\n"
"# EOF")
assert pmnc.stateful.get_version() == 1
print("ok")
###################################
print("class instance lifetime: ", end = "")
fake_request(30.0)
write_module("instance.py",
"__all__ = ['SomeClass', 'get_version']\n"
"class SomeClass:\n"
" def __init__(self, *args, **kwargs):\n"
" self._args, self._kwargs = args, kwargs\n"
" def get_init_args(self):\n"
" return self._args, self._kwargs\n"
" def get_class_version(self):\n"
" return 'A'\n"
" def get_static_module_version(self):\n"
" return get_version()\n"
" def get_dynamic_module_version(self):\n"
" return pmnc.instance.get_version()\n"
"def get_version():\n"
" return 1\n"
"# EOF")
sc = pmnc.instance.SomeClass("foo", "bar", biz = "baz")
assert pmnc.instance.get_version() == 1
assert sc.get_init_args() == (("foo", "bar"), {"biz": "baz"})
assert sc.get_static_module_version() == 1
assert sc.get_dynamic_module_version() == 1
assert sc.get_class_version() == 'A'
write_module("instance.py",
"__all__ = ['SomeClass', 'get_version']\n"
"class SomeClass:\n"
" def __init__(self, *args, **kwargs):\n"
" self._args, self._kwargs = args, kwargs\n"
" def get_init_args(self):\n"
" return self._args, self._kwargs\n"
" def get_class_version(self):\n"
" return 'B'\n"
" def get_static_module_version(self):\n"
" return get_version()\n"
" def get_dynamic_module_version(self):\n"
" return pmnc.instance.get_version()\n"
"def get_version():\n"
" return 2\n"
"# EOF")
sc2 = pmnc.instance.SomeClass("foo2", "bar2", biz2 = "baz2")
| |
height = int(height)
# Get the previous surface if the width/height is the same
if width == self._widgets_surface_last[0] and \
height == self._widgets_surface_last[1]:
self._widgets_surface = self._widgets_surface_last[2]
else:
self._widgets_surface = make_surface(width, height)
self._widgets_surface_last = (width, height, self._widgets_surface)
# Set position
self._scrollarea.set_world(self._widgets_surface)
self._scrollarea.set_position(*self.get_position())
# Check if the scrollbars changed
sx, sy = self._get_scrollbar_thickness()
if (sx, sy) != self._last_scroll_thickness[0] and \
self._last_scroll_thickness[1] == 0:
self._last_scroll_thickness[0] = (sx, sy)
self._last_scroll_thickness[1] += 1
self._widgets_surface_need_update = True
self._render()
else:
self._last_scroll_thickness[1] = 0
# Update times
dt = time.time() - t0
self._stats.total_building_time += dt
self._stats.last_build_surface_time = dt
def _check_id_duplicated(self, widget_id: str) -> None:
"""
Check if widget ID is duplicated. Throws ``IndexError`` if the index is
duplicated.
:param widget_id: New widget ID
:return: None
"""
assert isinstance(widget_id, str)
for widget in self._widgets:
if widget.get_id() == widget_id:
raise IndexError(
'widget id "{0}" already exists on the current menu ({1})'
''.format(widget_id, widget.get_class_id())
)
def _close(self) -> bool:
"""
Execute close callbacks and disable the Menu, only if ``onclose`` is not
None (or :py:mod:`pygame_menu.events.NONE`).
:return: ``True`` if the Menu has executed the ``onclose`` callback
"""
onclose = self._onclose
# Apply action
if onclose is None or onclose == _events.NONE:
return False
else:
# Closing disables the Menu
self.disable()
# If action is an event
if _events.is_event(onclose):
# Sort through events
if onclose == _events.BACK:
self.reset(1)
elif onclose == _events.CLOSE:
pass
elif onclose == _events.EXIT:
self._exit()
elif onclose == _events.RESET:
self.full_reset()
# If action is callable (function)
elif is_callable(onclose):
try:
onclose(self)
except TypeError:
onclose()
return True
def close(self) -> bool:
"""
Closes the **current** Menu firing ``onclose`` callback. If ``callback=None``
this method does nothing.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().reset(...)``.
:return: ``True`` if the Menu has executed the ``onclose`` callback
"""
if not self.is_enabled():
self._current._runtime_errors.throw(
self._current._runtime_errors.close, 'menu already closed'
)
return self._current._close()
def _get_depth(self) -> int:
"""
Return the Menu depth.
:return: Menu depth
"""
if self._top is None:
return 0
prev = self._top._prev
depth = 0
if prev is not None:
while True:
if prev is not None:
prev = prev[0]
depth += 1
else:
break
return depth
def disable(self) -> 'Menu':
"""
Disables the Menu *(doesn't check events and draw on the surface)*.
.. note::
This method does not fires ``onclose`` callback. Use ``Menu.close()``
instead.
:return: Self reference
"""
check_widget_mouseleave(force=True)
self._top._enabled = False
return self
def set_relative_position(self, position_x: NumberType, position_y: NumberType) -> 'Menu':
"""
Set the Menu position relative to the window.
.. note::
- Menu left position (x) must be between ``0`` and ``100``, if ``0``
the margin is at the left of the window, if ``100`` the Menu is at
the right of the window.
- Menu top position (y) must be between ``0`` and ``100``, if ``0``
the margin is at the top of the window, if ``100`` the margin is at
the bottom of the window.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param position_x: Left position of the window
:param position_y: Top position of the window
:return: Self reference
"""
assert isinstance(position_x, NumberInstance)
assert isinstance(position_y, NumberInstance)
assert 0 <= position_x <= 100
assert 0 <= position_y <= 100
position_x = float(position_x) / 100
position_y = float(position_y) / 100
window_width, window_height = self._window_size
self._position = (int((window_width - self._width) * position_x),
int((window_height - self._height) * position_y))
self._widgets_surface = None # This forces an update of the widgets
return self
def center_content(self) -> 'Menu':
"""
Centers the content of the Menu vertically. This action rewrites ``widget_offset``.
.. note::
If the height of the widgets is greater than the height of the Menu,
the drawing region will cover all Menu inner surface.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:return: Self reference
"""
self._stats.center_content += 1
if len(self._widgets) == 0: # If this happen, get_widget_max returns an immense value
self._widget_offset[1] = 0
return self
if self._widgets_surface is None:
self._update_widget_position() # For position (max/min)
available = self.get_height(inner=True)
widget_height = self.get_height(widget=True)
if widget_height >= available: # There's nothing to center
if self._widget_offset[1] != 0:
self._widgets_surface = None
self._widget_offset[1] = 0
return self
new_offset = int(max(float(available - widget_height) / 2, 0))
if abs(new_offset - self._widget_offset[1]) > 1:
self._widget_offset[1] = new_offset
self._widgets_surface = None # Rebuild on the next draw
return self
def _get_scrollbar_thickness(self) -> Tuple2IntType:
"""
Return the scrollbar thickness from x-axis and y-axis (horizontal and vertical).
:return: Scrollbar thickness in px
"""
return self._scrollarea.get_scrollbar_thickness(ORIENTATION_HORIZONTAL), \
self._scrollarea.get_scrollbar_thickness(ORIENTATION_VERTICAL)
def get_width(self, inner: bool = False, widget: bool = False) -> int:
"""
Get the Menu width.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param inner: If ``True`` returns the available width (menu width minus scroll if visible)
:param widget: If ``True`` returns the total width used by the widgets
:return: Width in px
"""
if widget:
return int(self._widget_max_position[0] - self._widget_min_position[0])
if not inner:
return int(self._width)
return int(self._width - self._get_scrollbar_thickness()[1])
def get_height(self, inner: bool = False, widget: bool = False) -> int:
"""
Get the Menu height.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param inner: If ``True`` returns the available height (menu height minus scroll and menubar)
:param widget: If ``True`` returns the total height used by the widgets
:return: Height in px
"""
if widget:
return int(self._widget_max_position[1] - self._widget_min_position[1])
if not inner:
return int(self._height)
return int(self._height - self._menubar.get_height() - self._get_scrollbar_thickness()[0])
def get_size(self, inner: bool = False, widget: bool = False) -> Vector2IntType:
"""
Return the Menu size as a tuple of (width, height) in px.
.. note::
This is applied only to the base Menu (not the currently displayed,
stored in ``_current`` pointer); for such behaviour apply to
:py:meth:`pygame_menu.menu.Menu.get_current` object.
:param inner: If ``True`` returns the available (width, height) (menu height minus scroll and menubar)
:param widget: If ``True`` returns the total (width, height) used by the widgets
:return: Tuple of (width, height) in px
"""
return self.get_width(inner=inner, widget=widget), self.get_height(inner=inner, widget=widget)
def render(self) -> 'Menu':
"""
Force the **current** Menu to render. Useful to force widget update.
.. note::
This method should not be called if the Menu is being drawn as this
method is called by :py:meth:`pygame_menu.menu.Menu.draw`
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().render(...)``
:return: Self reference **(current)**
"""
self._current._widgets_surface = None
self._current._render()
self._current._stats.render_public += 1
return self
def _render(self) -> bool:
"""
Menu rendering.
:return: ``True`` if the surface has changed (if it was ``None``)
"""
t0 = time.time()
changed = False
if self._widgets_surface_need_update:
self._widgets_surface = None
if self._widgets_surface is None:
self._widgets_surface_need_update = False
if self._auto_centering:
self.center_content()
self._build_widget_surface()
self._stats.render_private += 1
changed = True
self._stats.total_rendering_time += time.time() - t0
return changed
def draw(self, surface: 'pygame.Surface', clear_surface: bool = False) -> 'Menu':
"""
Draw the **current** Menu into the given surface.
.. warning::
This method should not be used along :py:meth:`pygame_menu.menu.Menu.get_current`,
for example, ``menu.get_current().draw(...)``
:param surface: Pygame surface to draw the Menu
:param clear_surface: Clear surface using theme default color
:return: Self reference **(current)**
"""
assert isinstance(surface, pygame.Surface)
assert isinstance(clear_surface, bool)
if not self.is_enabled():
self._current._runtime_errors.throw(self._current._runtime_errors.draw, 'menu is not enabled')
return self._current
if self._current._disable_draw:
return self._current
# Render menu; if True, the surface widget has changed, thus cache should
# change if enabled
render = self._current._render()
# Updates title
if self._current._theme.title_updates_pygame_display and \
pygame.display.get_caption()[0] != self._current.get_title():
pygame.display.set_caption(self._current.get_title())
# Clear surface
if clear_surface:
surface.fill(self._current._theme.surface_clear_color)
# Call background function (set from mainloop)
if self._top._background_function[1] is not None:
if self._top._background_function[0]:
self._top._background_function[1](self._current)
else:
self._top._background_function[1]()
# Draw the prev decorator
self._current._decorator.draw_prev(surface)
# Draw widgets, update cache if enabled
if not self._current._widget_surface_cache_enabled or | |
LA5_1 = self.input.LA(2)
if (LA5_1 == 93) :
alt5 = 2
elif ((0 <= LA5_1 <= 92) or (94 <= LA5_1 <= 65535)) :
alt5 = 1
elif ((0 <= LA5_0 <= 92) or (94 <= LA5_0 <= 65535)) :
alt5 = 1
if alt5 == 1:
# src/SavedFSM/Monitor.g:166:46: .
pass
self.matchAny()
else:
break #loop5
self.match("]]")
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ANNOTATION"
# $ANTLR start "ML_COMMENT"
def mML_COMMENT(self, ):
try:
_type = ML_COMMENT
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:169:5: ( '/*' ( options {greedy=false; } : . )* '*/' )
# src/SavedFSM/Monitor.g:169:9: '/*' ( options {greedy=false; } : . )* '*/'
pass
self.match("/*")
# src/SavedFSM/Monitor.g:169:14: ( options {greedy=false; } : . )*
while True: #loop6
alt6 = 2
LA6_0 = self.input.LA(1)
if (LA6_0 == 42) :
LA6_1 = self.input.LA(2)
if (LA6_1 == 47) :
alt6 = 2
elif ((0 <= LA6_1 <= 46) or (48 <= LA6_1 <= 65535)) :
alt6 = 1
elif ((0 <= LA6_0 <= 41) or (43 <= LA6_0 <= 65535)) :
alt6 = 1
if alt6 == 1:
# src/SavedFSM/Monitor.g:169:41: .
pass
self.matchAny()
else:
break #loop6
self.match("*/")
#action start
_channel=HIDDEN;
#action end
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "ML_COMMENT"
# $ANTLR start "LINE_COMMENT"
def mLINE_COMMENT(self, ):
try:
_type = LINE_COMMENT
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:172:14: ( '//' ( options {greedy=false; } : . )* '\\n' )
# src/SavedFSM/Monitor.g:172:16: '//' ( options {greedy=false; } : . )* '\\n'
pass
self.match("//")
# src/SavedFSM/Monitor.g:172:21: ( options {greedy=false; } : . )*
while True: #loop7
alt7 = 2
LA7_0 = self.input.LA(1)
if (LA7_0 == 10) :
alt7 = 2
elif ((0 <= LA7_0 <= 9) or (11 <= LA7_0 <= 65535)) :
alt7 = 1
if alt7 == 1:
# src/SavedFSM/Monitor.g:172:48: .
pass
self.matchAny()
else:
break #loop7
self.match(10)
#action start
_channel=HIDDEN;
#action end
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "LINE_COMMENT"
# $ANTLR start "StringLiteral"
def mStringLiteral(self, ):
try:
_type = StringLiteral
_channel = DEFAULT_CHANNEL
# src/SavedFSM/Monitor.g:174:14: ( '\"' (~ ( '\\\\' | '\"' ) )* '\"' )
# src/SavedFSM/Monitor.g:174:16: '\"' (~ ( '\\\\' | '\"' ) )* '\"'
pass
self.match(34)
# src/SavedFSM/Monitor.g:174:20: (~ ( '\\\\' | '\"' ) )*
while True: #loop8
alt8 = 2
LA8_0 = self.input.LA(1)
if ((0 <= LA8_0 <= 33) or (35 <= LA8_0 <= 91) or (93 <= LA8_0 <= 65535)) :
alt8 = 1
if alt8 == 1:
# src/SavedFSM/Monitor.g:174:22: ~ ( '\\\\' | '\"' )
pass
if (0 <= self.input.LA(1) <= 33) or (35 <= self.input.LA(1) <= 91) or (93 <= self.input.LA(1) <= 65535):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
break #loop8
self.match(34)
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "StringLiteral"
def mTokens(self):
# src/SavedFSM/Monitor.g:1:8: ( INTERACTION | INT | STRING | PLUS | MINUS | MULT | DIV | FULLSTOP | RESV | SEND | TYPE | VALUE | BRANCH | UNORDERED | RECLABEL | PARALLEL | PROTOCOL | ASSERT | GLOBAL_ESCAPE | EMPTY | ROLES | T__34 | T__35 | T__36 | T__37 | T__38 | T__39 | T__40 | T__41 | T__42 | T__43 | T__44 | T__45 | T__46 | T__47 | T__48 | T__49 | T__50 | T__51 | T__52 | T__53 | T__54 | T__55 | T__56 | T__57 | T__58 | T__59 | T__60 | T__61 | ID | NUMBER | WHITESPACE | ASSERTION | ANNOTATION | ML_COMMENT | LINE_COMMENT | StringLiteral )
alt9 = 57
alt9 = self.dfa9.predict(self.input)
if alt9 == 1:
# src/SavedFSM/Monitor.g:1:10: INTERACTION
pass
self.mINTERACTION()
elif alt9 == 2:
# src/SavedFSM/Monitor.g:1:22: INT
pass
self.mINT()
elif alt9 == 3:
# src/SavedFSM/Monitor.g:1:26: STRING
pass
self.mSTRING()
elif alt9 == 4:
# src/SavedFSM/Monitor.g:1:33: PLUS
pass
self.mPLUS()
elif alt9 == 5:
# src/SavedFSM/Monitor.g:1:38: MINUS
pass
self.mMINUS()
elif alt9 == 6:
# src/SavedFSM/Monitor.g:1:44: MULT
pass
self.mMULT()
elif alt9 == 7:
# src/SavedFSM/Monitor.g:1:49: DIV
pass
self.mDIV()
elif alt9 == 8:
# src/SavedFSM/Monitor.g:1:53: FULLSTOP
pass
self.mFULLSTOP()
elif alt9 == 9:
# src/SavedFSM/Monitor.g:1:62: RESV
pass
self.mRESV()
elif alt9 == 10:
# src/SavedFSM/Monitor.g:1:67: SEND
pass
self.mSEND()
elif alt9 == 11:
# src/SavedFSM/Monitor.g:1:72: TYPE
pass
self.mTYPE()
elif alt9 == 12:
# src/SavedFSM/Monitor.g:1:77: VALUE
pass
self.mVALUE()
elif alt9 == 13:
# src/SavedFSM/Monitor.g:1:83: BRANCH
pass
self.mBRANCH()
elif alt9 == 14:
# src/SavedFSM/Monitor.g:1:90: UNORDERED
pass
self.mUNORDERED()
elif alt9 == 15:
# src/SavedFSM/Monitor.g:1:100: RECLABEL
pass
self.mRECLABEL()
elif alt9 == 16:
# src/SavedFSM/Monitor.g:1:109: PARALLEL
pass
self.mPARALLEL()
elif alt9 == 17:
# src/SavedFSM/Monitor.g:1:118: PROTOCOL
pass
self.mPROTOCOL()
elif alt9 == 18:
# src/SavedFSM/Monitor.g:1:127: ASSERT
pass
self.mASSERT()
elif alt9 == 19:
# src/SavedFSM/Monitor.g:1:134: GLOBAL_ESCAPE
pass
self.mGLOBAL_ESCAPE()
elif alt9 == 20:
# src/SavedFSM/Monitor.g:1:148: EMPTY
pass
self.mEMPTY()
elif alt9 == 21:
# src/SavedFSM/Monitor.g:1:154: ROLES
pass
self.mROLES()
elif alt9 == 22:
# src/SavedFSM/Monitor.g:1:160: T__34
pass
self.mT__34()
elif alt9 == 23:
# src/SavedFSM/Monitor.g:1:166: T__35
pass
self.mT__35()
elif alt9 == 24:
# src/SavedFSM/Monitor.g:1:172: T__36
pass
self.mT__36()
elif alt9 == 25:
# src/SavedFSM/Monitor.g:1:178: T__37
pass
self.mT__37()
elif alt9 == 26:
# src/SavedFSM/Monitor.g:1:184: T__38
pass
self.mT__38()
elif alt9 == 27:
# src/SavedFSM/Monitor.g:1:190: T__39
pass
self.mT__39()
elif alt9 == 28:
# src/SavedFSM/Monitor.g:1:196: T__40
pass
self.mT__40()
elif alt9 == 29:
# src/SavedFSM/Monitor.g:1:202: T__41
pass
self.mT__41()
elif alt9 == 30:
# src/SavedFSM/Monitor.g:1:208: T__42
pass
self.mT__42()
elif alt9 == 31:
# src/SavedFSM/Monitor.g:1:214: T__43
pass
self.mT__43()
elif alt9 == 32:
# src/SavedFSM/Monitor.g:1:220: T__44
pass
self.mT__44()
elif alt9 == 33:
# src/SavedFSM/Monitor.g:1:226: T__45
pass
self.mT__45()
elif alt9 == 34:
# src/SavedFSM/Monitor.g:1:232: T__46
pass
self.mT__46()
elif alt9 == 35:
# src/SavedFSM/Monitor.g:1:238: T__47
pass
self.mT__47()
elif alt9 == 36:
# src/SavedFSM/Monitor.g:1:244: T__48
pass
self.mT__48()
elif alt9 == 37:
# src/SavedFSM/Monitor.g:1:250: T__49
pass
self.mT__49()
elif alt9 == 38:
# src/SavedFSM/Monitor.g:1:256: T__50
pass
self.mT__50()
elif alt9 == 39:
# src/SavedFSM/Monitor.g:1:262: T__51
pass
self.mT__51()
elif alt9 == 40:
# src/SavedFSM/Monitor.g:1:268: T__52
pass
self.mT__52()
elif alt9 == 41:
# src/SavedFSM/Monitor.g:1:274: T__53
pass
self.mT__53()
elif alt9 == 42:
# src/SavedFSM/Monitor.g:1:280: T__54
pass
self.mT__54()
elif alt9 == 43:
# src/SavedFSM/Monitor.g:1:286: T__55
pass
self.mT__55()
elif alt9 == 44:
# src/SavedFSM/Monitor.g:1:292: T__56
pass
self.mT__56()
elif alt9 == 45:
# src/SavedFSM/Monitor.g:1:298: T__57
pass
self.mT__57()
elif alt9 == 46:
# src/SavedFSM/Monitor.g:1:304: T__58
pass
self.mT__58()
elif alt9 == 47:
# src/SavedFSM/Monitor.g:1:310: T__59
pass
self.mT__59()
elif alt9 == 48:
# src/SavedFSM/Monitor.g:1:316: T__60
pass
self.mT__60()
elif alt9 == 49:
# src/SavedFSM/Monitor.g:1:322: T__61
pass
self.mT__61()
elif alt9 == 50:
# src/SavedFSM/Monitor.g:1:328: ID
pass
self.mID()
elif alt9 == 51:
# src/SavedFSM/Monitor.g:1:331: NUMBER
pass
self.mNUMBER()
elif alt9 == 52:
# src/SavedFSM/Monitor.g:1:338: WHITESPACE
pass
self.mWHITESPACE()
elif alt9 == 53:
# src/SavedFSM/Monitor.g:1:349: ASSERTION
pass
self.mASSERTION()
elif alt9 == 54:
# src/SavedFSM/Monitor.g:1:359: ANNOTATION
pass
self.mANNOTATION()
elif alt9 == 55:
# src/SavedFSM/Monitor.g:1:370: ML_COMMENT
pass
self.mML_COMMENT()
elif alt9 == 56:
# src/SavedFSM/Monitor.g:1:381: LINE_COMMENT
pass
self.mLINE_COMMENT()
elif alt9 == 57:
# src/SavedFSM/Monitor.g:1:394: StringLiteral
pass
self.mStringLiteral()
# lookup tables for DFA #9
DFA9_eot = DFA.unpack(
u"\1\uffff\2\44\3\uffff\1\57\1\uffff\13\44\2\uffff\2\44\4\uffff\1"
u"\44\1\uffff\7\44\6\uffff\3\44\3\uffff\17\44\1\140\1\141\4\44\1"
u"\147\1\44\1\151\1\44\1\153\1\154\1\44\1\160\23\44\2\uffff\1\u0084"
u"\2\44\1\u0087\1\u0088\1\uffff\1\44\1\uffff\1\u008a\2\uffff\3\44"
u"\1\uffff\3\44\1\u0091\2\44\1\u0094\1\u0095\12\44\1\u00a0\1\uffff"
u"\1\u00a1\1\44\2\uffff\1\44\1\uffff\6\44\1\uffff\1\44\1\u00ac\2"
u"\uffff\1\u00ad\6\44\1\u00b4\2\44\2\uffff\6\44\1\u00bd\1\u00be\1"
u"\u00bf\1\44\2\uffff\1\u00c1\3\44\1\u00c5\1\44\1\uffff\2\44\1\u00c9"
u"\1\u00ca\4\44\3\uffff\1\44\1\uffff\3\44\1\uffff\3\44\2\uffff\4"
u"\44\1\u00da\1\44\1\u00dc\1\u00dd\1\44\1\u00df\1\u00e0\2\44\1\u00e3"
u"\1\44\1\uffff\1\u00e5\2\uffff\1\44\2\uffff\1\u00e7\1\44\1\uffff"
u"\1\u00e9\1\uffff\1\44\1\uffff\1\u00eb\1\uffff\1\44\1\uffff\1\44"
u"\1\u00ee\1\uffff"
)
DFA9_eof = DFA.unpack(
u"\u00ef\uffff"
)
DFA9_min = DFA.unpack(
u"\1\11\1\155\1\164\3\uffff\1\52\1\uffff\2\105\1\131\1\101\1\122"
u"\1\116\1\101\1\123\1\114\1\115\1\141\2\uffff\1\162\1\156\4\uffff"
u"\1\145\1\uffff\1\157\1\150\1\162\1\156\1\157\1\171\1\156\6\uffff"
u"\1\154\1\160\1\162\3\uffff\1\103\1\114\1\116\1\120\1\114\1\101"
u"\1\117\1\122\1\117\1\123\1\117\1\120\1\157\1\162\1\157\2\60\1\144"
u"\1\154\1\143\1\156\1\60\1\157\1\60\1\144\2\60\1\157\1\60\1\151"
u"\1\157\1\151\1\126\1\114\1\105\1\104\1\105\1\125\1\116\1\122\1"
u"\101\1\124\1\105\1\102\1\124\1\164\1\141\1\155\2\uffff\1\60\2\145"
u"\2\60\1\uffff\1\151\1\uffff\1\60\2\uffff\2\162\1\157\1\uffff\1"
u"\156\1\162\1\156\1\60\1\101\1\123\2\60\1\105\1\103\1\104\1\114"
u"\1\117\1\122\1\101\1\131\1\157\1\154\1\60\1\uffff\1\60\1\141\2"
u"\uffff\1\143\1\uffff\1\144\1\141\1\144\1\145\1\164\1\147\1\uffff"
u"\1\102\1\60\2\uffff\1\60\1\110\1\105\1\114\1\103\1\124\1\114\1"
u"\60\1\143\1\154\2\uffff\1\164\2\145\1\143\2\165\3\60\1\105\2\uffff"
u"\1\60\1\122\1\105\1\117\1\60\1\137\1\uffff\1\157\1\145\2\60\1\162"
u"\1\164\1\160\1\143\3\uffff\1\114\1\uffff\1\105\2\114\1\uffff\1"
u"\105\2\154\2\uffff\1\145\1\151\1\164\1\145\1\60\1\104\2\60\1\123"
u"\2\60\1\144\1\157\1\60\1\163\1\uffff\1\60\2\uffff\1\103\2\uffff"
u"\1\60\1\156\1\uffff\1\60\1\uffff\1\101\1\uffff\1\60\1\uffff\1\120"
u"\1\uffff\1\105\1\60\1\uffff"
)
DFA9_max = DFA.unpack(
u"\1\175\1\156\1\164\3\uffff\1\57\1\uffff\1\117\1\105\1\131\1\101"
u"\1\122\1\116\1\122\1\123\1\114\1\115\1\162\2\uffff\1\162\1\164"
u"\4\uffff\1\165\1\uffff\1\157\1\150\1\162\1\156\1\157\1\171\1\156"
u"\6\uffff\1\164\1\160\1\162\3\uffff\1\123\1\114\1\116\1\120\1\114"
u"\1\101\1\117\1\122\1\117\1\123\1\117\1\120\1\157\1\162\1\157\2"
u"\172\1\144\1\154\1\160\1\156\1\172\1\157\1\172\1\144\2\172\1\157"
u"\1\172\1\151\1\157\1\151\1\126\1\114\1\105\1\104\1\105\1\125\1"
u"\116\1\122\1\101\1\124\1\105\1\102\1\124\1\164\1\141\1\155\2\uffff"
u"\1\172\2\145\2\172\1\uffff\1\151\1\uffff\1\172\2\uffff\2\162\1"
u"\157\1\uffff\1\156\1\162\1\156\1\172\1\101\1\123\2\172\1\105\1"
u"\103\1\104\1\114\1\117\1\122\1\101\1\131\1\157\1\154\1\172\1\uffff"
u"\1\172\1\141\2\uffff\1\143\1\uffff\1\144\1\162\1\144\1\145\1\164"
u"\1\147\1\uffff\1\102\1\172\2\uffff\1\172\1\110\1\105\1\114\1\103"
u"\1\124\1\114\1\172\1\143\1\154\2\uffff\1\164\2\145\1\143\2\165"
u"\3\172\1\105\2\uffff\1\172\1\122\1\105\1\117\1\172\1\137\1\uffff"
u"\1\157\1\145\2\172\1\162\1\164\1\160\1\143\3\uffff\1\114\1\uffff"
u"\1\105\2\114\1\uffff\1\105\2\154\2\uffff\1\145\1\151\1\164\1\145"
u"\1\172\1\104\2\172\1\123\2\172\1\144\1\157\1\172\1\163\1\uffff"
u"\1\172\2\uffff\1\103\2\uffff\1\172\1\156\1\uffff\1\172\1\uffff"
u"\1\101\1\uffff\1\172\1\uffff\1\120\1\uffff\1\105\1\172\1\uffff"
)
DFA9_accept = DFA.unpack(
u"\3\uffff\1\4\1\5\1\6\1\uffff\1\10\13\uffff\1\30\1\31\2\uffff\1"
u"\35\1\36\1\37\1\40\1\uffff\1\43\7\uffff\1\62\1\63\1\64\1\65\1\66"
u"\1\71\3\uffff\1\67\1\70\1\7\60\uffff\1\33\1\34\5\uffff\1\44\1\uffff"
u"\1\46\1\uffff\1\56\1\60\3\uffff\1\2\23\uffff\1\55\2\uffff\1\50"
u"\1\52\1\uffff\1\51\6\uffff\1\11\2\uffff\1\12\1\13\12\uffff\1\32"
u"\1\41\12\uffff\1\25\1\14\6\uffff\1\24\10\uffff\1\53\1\26\1\3\1"
u"\uffff\1\15\3\uffff\1\22\3\uffff\1\47\1\45\17\uffff\1\17\1\uffff"
u"\1\20\1\21\1\uffff\1\27\1\54\2\uffff\1\57\1\uffff\1\16\1\uffff"
u"\1\61\1\uffff\1\42\1\uffff\1\1\2\uffff\1\23"
)
DFA9_special = DFA.unpack(
u"\u00ef\uffff"
)
DFA9_transition = [
DFA.unpack(u"\2\46\1\uffff\2\46\22\uffff\1\46\1\uffff\1\51\5\uffff"
u"\1\31\1\32\1\5\1\3\1\23\1\4\1\7\1\6\12\45\1\34\1\24\4\uffff\1\47"
u"\1\17\1\14\2\44\1\21\1\44\1\20\10\44\1\16\1\44\1\10\1\11\1\12\1"
u"\15\1\13\4\44\1\50\3\uffff\1\44\1\uffff\1\26\1\42\1\36\1\41\1\40"
u"\1\25\2\44\1\1\5\44\1\37\1\22\1\44\1\33\1\2\1\35\1\43\5\44\1\27"
u"\1\uffff\1\30"),
DFA.unpack(u"\1\53\1\52"),
DFA.unpack(u"\1\54"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\55\4\uffff\1\56"),
DFA.unpack(u""),
DFA.unpack(u"\1\60\11\uffff\1\61"),
DFA.unpack(u"\1\62"),
DFA.unpack(u"\1\63"),
DFA.unpack(u"\1\64"),
DFA.unpack(u"\1\65"),
DFA.unpack(u"\1\66"),
DFA.unpack(u"\1\67\20\uffff\1\70"),
DFA.unpack(u"\1\71"),
DFA.unpack(u"\1\72"),
DFA.unpack(u"\1\73"),
DFA.unpack(u"\1\75\20\uffff\1\74"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\76"),
DFA.unpack(u"\1\101\4\uffff\1\77\1\100"),
| |
# -*- coding: utf-8 -*-
#インポート
import discord
from discord.ext import commands
from discord.ext.commands.errors import BotMissingPermissions
from contextlib import redirect_stdout
from discord.ext.commands import Bot
import random
import asyncio
import aiohttp
import os
import subprocess
import datetime
import time
import ast
import re
import zlib
import io
import requests
import traceback
import json
import textwrap
import platform
import uptime
import string
import star_sky_module as s
#最初の定義
prefix = commands.when_mentioned_or("s!")
bot = commands.Bot(command_prefix=prefix, intents=discord.Intents.all())
#変数
no = '👎'
ok = '👍'
left = '⏪'
right = '⏩'
yl = "⬅"
yr = "➡"
counts = 0
col = random.randint(0, 0xFFFFFF)
bcol = 0x03a9fc
dicenum = random.randint(0, 6)
ver = "4.0a"
release = "0.0.0"
updateinfos = "・Heroku稼働"
act = f"s!help | discord.py {discord.__version__} | Python {platform.python_version()} | Build {ver}"
#リスト
admin = [663155028793491477]
subowner = [645068195115565088,345342072045174795]
all_admin = [663155028793491477645068195115565088,345342072045174795,584008752005513216]
#設定
bot.remove_command('help')
#最初の処理
@bot.event
async def on_ready():
print("ログインに成功しました")
await bot.change_presence(activity = discord.Game(name="起動しています… | Starry Sky Project"),status = discord.Status.idle)
print(bot.user.name)
print(bot.user.id)
print("起動時の情報を送信しています… / Owner")
with open("config.json", encoding="utf-8") as f:
bot.json_config = json.load(f)
print("[Starry Sky System] config.jsonを読み込みました。")
bot.load_extension("jishaku")
print("[Starry Sky System] jishakuを読み込みました。")
bot.owner_ids = [584008752005513216]
print("[Starry Sky System] BOTオーナーのIDを、584008752005513216にしました。")
print("起動時の情報を送信しています… / User")
print("最終処理を実行しています…")
await bot.change_presence(activity = discord.Game(name=act),status=discord.Status.idle)
print("[Starry Sky System] アクティビティを設定しました。")
print("Debug Console.")
for allguild in bot.guilds:
print(allguild)
print("[Starry Sky System] All Done. 全ての初回起動動作は正常に終了しました。")
#関数群
def cleanup_code(content):
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
return content.strip('` \n')
#デバッグ系コード
@bot.command(name="eval",description="Pythonのソースを評価するよ。\n`BOT運営全員`が使用できるね。")
async def eval_(ctx, *, code: str):
if ctx.author.id in admin or ctx.author.id in subowner or ctx.author.id == <PASSWORD>:
env = {
"client": bot,
"discord": discord,
"commands": commands,
"ctx": ctx,
"import": __import__,
"bot": bot,
"time": time,
"platform": platform,
"os": os,
"subprocess": subprocess,
"_message": ctx.message,
"_guild": ctx.guild,
"_author": ctx.author,
"_channel": ctx.channel,
"_msg": ctx.message,
"_mes": ctx.message,
"_send": ctx.send,
}
env.update(globals())
code = cleanup_code(code)
stdout = io.StringIO()
to_compile = f'async def func():\n{textwrap.indent(code, " ")}'
try:
exec(to_compile, env)
except Exception as e:
await ctx.message.add_reaction("❌")
return await ctx.send(f'```py\n{e.__class__.__name__}: {e}\n```')
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception as e:
value = stdout.getvalue()
await ctx.message.add_reaction("❌")
await ctx.send(f'```py\n{value}{traceback.format_exc()}\n```')
else:
value = stdout.getvalue()
try:
await ctx.message.add_reaction("✅")
except:
pass
if ret is None:
if value:
await ctx.send(f'```py\n{value}\n```')
else:
_last_result = ret
await ctx.send(f'```py\n{value}{ret}\n```')
else:
await s.no_admin(ctx,"admin",0xff0000)
print(f"[Eval Log]{ctx.author}さんがevalを使用しました。")
@bot.command(aliases=["cmd"],description="コマンドの`有効`と`無効`を切り替えるよ。\nBOT製作者しか使えないよ。")
@commands.is_owner()
async def command(ctx,mode,command_name):
cmd = bot.get_command(command_name)
if mode == "off":
if cmd:
msg = await ctx.reply("コマンド無効化中・・・", mention_author=False)
cmd.enabled = False
await msg.edit(content=f"⭕ | `{cmd}`を`無効`にしたよ。")
elif command_name in "command" or "eval" or "jishaku" or "reboot" or "down" or "shell":
msg = await ctx.send("コマンド無効化中・・・")
await msg.edit(content=f"❌ | そのコマンドは無効にできないよ。")
else:
msg = await ctx.reply("コマンド無効化中・・・", mention_author=False)
await msg.edit(content=f"❌ | `{command_name}`っていう名前のコマンドが存在しないよ。")
elif mode == "on":
cmd = bot.get_command(command_name)
if cmd:
msg = await ctx.reply("コマンド有効化中・・・", mention_author=False)
cmd.enabled = True
await msg.edit(content=f"⭕ | `{cmd}`を`有効`にしたよ。")
else:
msg = await ctx.send("コマンド有効化中・・・")
await msg.edit(content=f"❌ | `{command_name}`っていう名前のコマンドが存在しないか、無効化されていないよ。")
else:
await ctx.reply("❌ | 存在しないモードだよ。", mention_author=False)
@bot.command(description="指定したコマンドを削除するよ。\nBOT製作者しか使えないよ。\n注意:再起動するまで削除状態になるよ。")
@commands.is_owner()
async def remove(ctx,command_name):
cmd = bot.get_command(command_name)
if cmd:
msg = await ctx.send("コマンド削除中・・・")
bot.remove_command(cmd)
await msg.edit(content=f"⭕ | `{cmd}`を削除したよ。")
else:
msg = await ctx.send("コマンド削除中・・・")
await msg.edit(content=f"❌ | `{command_name}`っていう名前のコマンドが存在しないよ。")
@bot.command(aliases=["sh","system","sys"],description="コマンドプロンプトのコマンドを実行するよ。\nBOT製作者しか使えないよ。")
@commands.is_owner()
async def shell(ctx, *, command):
try:
e=discord.Embed(title="コマンド実行 - 確認", description="実行する場合は`ok`、しない場合は`x`を送信してください。",color=0x03a9fc,timestamp=datetime.datetime.utcnow())
e.add_field(name="入力コマンド:",value=f"```fix\n{command}\n```",inline=False)
msg = await ctx.send(embed=e)
def c(b):
return b.author.id == ctx.author.id
try:
guess = await bot.wait_for("message", timeout=30, check=c)
except asyncio.TimeoutError:
e=discord.Embed(description="制限時間が過ぎたため、自動で操作を拒否したよ。")
await msg.edit(embed=e)
return
if guess.content == "ok":
print("操作開始")
e=discord.Embed(title="コマンド実行", description="実行中・・・",color=0x03a9fc,timestamp=datetime.datetime.utcnow())
e.add_field(name="入力コマンド:",value=f"```fix\n{command}\n```",inline=False)
await msg.edit(embed=e)
result = subprocess.check_call(command.split())
await ctx.message.add_reaction("✅")
e=discord.Embed(title="コマンド実行", description="完了",color=0x03a9fc,timestamp=datetime.datetime.utcnow())
e.add_field(name="入力コマンド:",value=f"```fix\n{command}\n```",inline=False)
e.add_field(name="終了コード:",value=f"```c\n{result}\n```",inline=False)
e.add_field(name="結果:",value=f"```diff\n+ 操作は正常に終了しました。\n```",inline=False)
await msg.edit(embed=e)
print("操作終了")
return
elif guess.content == "x":
e=discord.Embed(description="操作を拒否したよ。",color=0xff0000)
await msg.edit(embed=e)
return
else:
embed2 = discord.Embed(description="`ok`か`x`で実行してね。", color=0xff0000)
await msg.edit(embed=embed2)
return
except Exception as e:
await ctx.message.add_reaction("❌")
e=discord.Embed(title="コマンド実行", description="失敗",color=0xff0000,timestamp=datetime.datetime.utcnow())
e.add_field(name="入力コマンド:",value=f"```fix\n{command}\n```",inline=False)
e.add_field(name="エラー内容:",value=f"```py\n{traceback.format_exc()}\n```",inline=False)
e.add_field(name="結果:",value="```diff\n- エラーが発生したため、操作はできませんでした。\n```",inline=False)
await msg.edit(embed=e)
return
@bot.command(aliases=["leaveg","lg"],description="指定したサーバーから退出するよ。\n`gid`にサーバーIDを入れてね。")
@commands.is_owner()
async def leaveguild(ctx, gid:int):
try:
await bot.get_guild(gid).leave()
e = discord.Embed(title="サーバー退出", description=f"{gid}から退出したよ。",color=bcol)
await ctx.send(embed=e)
except:
await ctx.send(embed=discord.Embed(tile="サーバー退出",description="エラーが発生したから、サーバーから退出できなかったよ。",color=0xff0000))
@bot.command(aliases=["end","shutdown","close"],description="BOTをシャットダウンするよ。\nBOT製作者しか使えないよ。")
@commands.is_owner()
async def down(ctx):
await ctx.send(embed=discord.Embed(title="シャットダウン", description="BOTをシャットダウンするよ。", color=ctx.author.color,timestamp=datetime.datetime.utcnow()))
await bot.change_presence(activity = discord.Game(name="Closing Bot..."),status=discord.Status.dnd)
await asyncio.sleep(5)
await bot.close()
@bot.command(aliases=["restart","run","reload"],description="BOTを再起動するよ。\nBOT製作者しか使えないよ。")
@commands.is_owner()
async def reboot(ctx):
e = discord.Embed(title="再起動", description="BOTを再起動するよ。", color=ctx.author.color,timestamp=datetime.datetime.utcnow())
await ctx.send(embed=e)
await bot.change_presence(activity = discord.Game(name="Rebooting Bot..."),status=discord.Status.idle)
await asyncio.sleep(5)
cmd = "python StarSky.py"
subprocess.Popen(cmd.split())
await bot.close()
@bot.command(aliases=["activity"],description="BOTのアクティビティを変更するよ。\n`BOTサブオーナー`しか使えないね。")
async def setactivity(ctx, *, status):
if ctx.author.id in subowner or ctx.author.id == 5<PASSWORD>:
await bot.change_presence(activity = discord.Game(name=f"{status}"),status = discord.Status.idle)
e = discord.Embed(title="操作成功", description=f"アクティビティを変更したよ。\n現在のアクテビティ:{status}", color=ctx.author.color,timestamp=datetime.datetime.utcnow())
await ctx.send(embed=e)
else:
await s.no_admin(ctx,"subowner",0xff0000)
@bot.command(aliases=["resetact","ract"],description="アクティビティをリセットするよ。\n`BOTサブオーナー`しか使えないね。")
async def resetactivity(ctx):
if ctx.author.id in subowner or ctx.author.id == <PASSWORD>:
await bot.change_presence(activity = discord.Game(f"{act}"),status =discord.Status.idle)
e = discord.Embed(title="操作成功", description="アクティビティをデフォルトに戻したよ。", color=ctx.author.color,timestamp=datetime.datetime.utcnow())
await ctx.send(embed=e)
else:
await s.no_admin(ctx,"subowner",0xff0000)
@bot.command(aliases=["online"],description="BOTのステータスをオンラインにするよ。\n`BOTサブオーナーとBOT運営`しか使えないね。")
async def setonline(ctx):
if ctx.author.id in admin or ctx.author.id in subowner or ctx.author.id == <PASSWORD>:
await bot.change_presence(activity = discord.Game(f"{act}"),status =discord.Status.online)
e = discord.Embed(title="操作成功", description="ステータスをオンラインにしたよ。", color=ctx.author.color,timestamp=datetime.datetime.utcnow())
await ctx.send(embed=e)
else:
await s.no_admin(ctx,"subowner",0xff0000)
@bot.command(aliases=["idle"],description="BOTのステータスを退席中にするよ。\n`BOTサブオーナーとBOT運営`しか使えないね。")
async def setidle(ctx):
if ctx.author.id in admin or ctx.author.id in subowner or ctx.author.id == <PASSWORD>:
await bot.change_presence(activity = discord.Game(f"{act}"),status =discord.Status.idle)
e = discord.Embed(title="操作成功", description="ステータスを退席中にしたよ。", color=ctx.author.color,timestamp=datetime.datetime.utcnow())
await ctx.send(embed=e)
else:
await s.no_admin(ctx,"subowner",0xff0000)
@bot.command(aliases=["dnd"],description="BOTのステータスを取り込み中にするよ。\n`BOTサブオーナーとBOT運営`しか使えないね。")
async def setdnd(ctx):
if ctx.author.id in admin or ctx.author.id in subowner or ctx.author.id == <PASSWORD>:
await bot.change_presence(activity = discord.Game(f"{act}"),status =discord.Status.dnd)
e = discord.Embed(title="操作成功", description="ステータスを取り込み中にしたよ。", color=ctx.author.color,timestamp=datetime.datetime.utcnow())
await ctx.send(embed=e)
else:
await s.no_admin(ctx,"subowner",0xff0000)
#BAN&KICK
@bot.command(description="指定したユーザーをBANするよ。\nユーザーをKICK出来る人のみ。")
async def kick(ctx, user: discord.User=None,reason=None):
no = '👎'
ok = '👍'
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.kick_members) or ctx.guild.owner == ctx.author:
if user is None:
e = discord.Embed(title="実行エラー",description="名前を指定してね。",color=0xff0000)
await ctx.send(embed=e)
else:
embeds = discord.Embed(
title=f"**「@{user.name}」KICKしちゃう?**",color=0xC41415)
msg = await ctx.send(embed=embeds)
await msg.add_reaction(no)
await msg.add_reaction(ok)
try:
def predicate1(message,author):
def check(reaction,users):
if reaction.message.id != message.id or users == ctx.bot.user or author != users:
return False
if reaction.emoji == ok or reaction.emoji == no:
return True
return False
return check
react = await ctx.bot.wait_for('reaction_add',timeout=20,check=predicate1(msg,ctx.message.author))
if react[0].emoji == ok:
await ctx.guild.kick(user, reason=reason)
print(f"[Kick Log]{user.name}が{ctx.message.author.name}によってKICKされたよ。")
embed = discord.Embed(title=f"{user.name}はKICKされたよ。",color=0xC41415,timestamp=datetime.datetime.utcnow())
embed.add_field(name="-------------------------", value=f"名前: **{user.name}**\nID: **{user.id}**\n理由:**{reason}**", inline=False)
return await ctx.send(embed=embed)
elif react[0].emoji == no:
embeds = discord.Embed(
title=f"{user.name}はKICKされなかったよ。",color=0x10cfee)
return await ctx.send(embed=embeds)
except asyncio.TimeoutError:
embeds = discord.Embed(
title=f"{user.name}はKICKされなかったよ。",color=0x10cfee)
return await ctx.send(embed=embeds)
else:
await s.no_per(ctx,"kick",0xff0000)
@bot.command(description="指定したユーザーをBANするよ。\nユーザーをBAN出来る人のみ。")
async def ban(ctx, user: discord.User=None,reason=None):
no = '👎'
ok = '👍'
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.ban_members) or ctx.guild.owner == ctx.author:
if user is None:
e = discord.Embed(title="実行エラー",description="名前を指定してね。",color=0xff0000)
await ctx.send(embed=e)
else:
embeds = discord.Embed(
title=f"**「@{user.name}」BANしちゃう?**",color=0xC41415)
msg = await ctx.send(embed=embeds)
await msg.add_reaction(no)
await msg.add_reaction(ok)
try:
def predicate1(message,author):
def check(reaction,users):
if reaction.message.id != message.id or users == ctx.bot.user or author != users:
return False
if reaction.emoji == ok or reaction.emoji == no:
return True
return False
return check
react = await ctx.bot.wait_for('reaction_add',timeout=20,check=predicate1(msg,ctx.message.author))
if react[0].emoji == ok:
await ctx.guild.ban(user, reason=reason)
print(f"[Ban Log]{user.name}が{ctx.message.author.name}によってBANされたよ。")
embed = discord.Embed(title=f"{user.name}はBANされたよ。",color=0xC41415,timestamp=datetime.datetime.utcnow())
embed.add_field(name="-------------------------", value=f"名前: **{user.name}**\nID: **{user.id}**\n理由:**{reason}**", inline=False)
return await ctx.send(embed=embed)
elif react[0].emoji == no:
embeds = discord.Embed(
title=f"{user.name}はBANされなかったよ。",color=0x10cfee)
return await ctx.send(embed=embeds)
except asyncio.TimeoutError:
embeds = discord.Embed(
title=f"{user.name}はBANされなかったよ。",color=0x10cfee)
return await ctx.send(embed=embeds)
else:
await s.no_per(ctx,"ban",0xff0000)
#役職系コード
@bot.command(aliases=["radd"],description="指定したユーザーに役職を付与するよ。\n役職を管理できる人のみ。")
async def roleadd(ctx, member: discord.Member, role: discord.Role):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_roles) or ctx.guild.owner == ctx.author:
await member.add_roles(role)
e = discord.Embed(title="操作成功", description=f'{member.mention}さんに{role.mention}を付与したよ。',color=ctx.author.color,timestamp=datetime.datetime.utcnow())
await ctx.send(embed=e)
else:
await s.no_per(ctx,role,0xff0000)
@bot.command(aliases=["rre"],description="指定したユーザーから役職を削除するよ。\n役職を管理できる人のみ。")
async def roleremove(ctx, member: discord.Member, role: discord.Role):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_roles) or ctx.guild.owner == ctx.author:
await member.remove_roles(role)
e = discord.Embed(title="操作成功", description=f'{member.mention}さんから{role.mention}を剥奪したよ。',color=ctx.author.color,timestamp=datetime.datetime.utcnow())
await ctx.send(embed=e)
else:
await s.no_per(ctx,"role",0xff0000)
@bot.command(aliases=["rdel"],description="役職を削除するよ。\n役職を管理できる人のみ。")
async def roledelete(ctx, role: discord.Role):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_roles) or ctx.guild.owner == ctx.author:
await role.delete()
e = discord.Embed(title="操作成功", description=f'{role.name}を削除したよ。',color=ctx.author.color,timestamp=datetime.datetime.utcnow())
await ctx.send(embed=e)
else:
await s.no_per(ctx,"role",0xff0000)
@bot.command(aliases=["rcr"],description="役職を作成するよ。\n役職を管理できる人のみ。")
async def rolecreate(ctx, rolename):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_roles) or ctx.guild.owner == ctx.author:
role = await ctx.guild.create_role(name=rolename)
e = discord.Embed(title="操作成功", description=f'{role.mention}を作成したよ。',color=ctx.author.color,timestamp=datetime.datetime.utcnow())
await ctx.send(embed=e)
else:
await s.no_per(ctx,"role",0xff0000)
@bot.command(aliases=["rusers","ru"],description="役職を持つメンバー一覧を表示するよ。")
async def roleusers(ctx,*,role:discord.Role):
try:
users = "\n".join(list(c.name for c in role.members))
if len(users) > 0:
e = discord.Embed(title=f"{role}を持つメンバー一覧",description=f"```\n{users}\n```",color=role.color)
await ctx.send(embed=e)
else:
e = discord.Embed(title=f"{role}を持つメンバー一覧",description="```diff\n- なし\n```",color=role.color)
await ctx.send(embed=e)
except discord.HTTPException:
await ctx.send("❌** | 表示数が2000文字を超えているため、表示できないよ。。**")
@bot.command(aliases=["re"],description="役職を変更するよ。\n`[role]`には編集したい役職名\n`[name]`には名前\n`[permissions]`には権限の値\n`[r] [g] [b]`にはrgbの色コード\nを入れてね。")
async def roleedit(ctx,role:discord.Role,name:str,permissions:int,r:int,g:int,b:int):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_roles) or ctx.guild.owner == ctx.author:
await role.edit(name=name,permissions=discord.Permissions(permissions=permissions),color=discord.Colour.from_rgb(r,g,b))
e = discord.Embed(title="操作成功", description=f'{role.mention}を変更したよ。\n名前:{name}\n権限:{permissions}\n色(R G B):{r} {g} {b}',color=ctx.author.color,timestamp=datetime.datetime.utcnow())
await ctx.send(embed=e)
else:
await s.no_per(ctx,"role",0xff0000)
@bot.command(aliases=["roleallmemadd","rama"],description="指定した役職を全メンバーに付与するよ。\n役職を管理できる人のみ。\n※BOT含む")
async def roleallmembersadd(ctx, role:discord.Role):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_roles) or ctx.guild.owner == ctx.author:
msg = await ctx.send(embed=discord.Embed(title="操作開始", description=f"全員に{role}を付与するよ。", color=ctx.author.color,timestamp=datetime.datetime.utcnow()))
[await member.add_roles(role) for member in ctx.guild.members]
await msg.edit(embed=discord.Embed(title="操作成功",description=f"{role}を全員に付与したよ。", color=ctx.author.color,timestamp=datetime.datetime.utcnow()))
else:
await s.no_per(ctx,"role",0xff0000)
@bot.command(aliases=["roleallmemremove","roleallmemr","ramr"],description="指定した役職を全メンバーから削除するよ。\n役職を管理できる人のみ。\n※BOT含む")
async def roleallmembersremove(ctx, role:discord.Role):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_roles) or ctx.guild.owner == ctx.author:
msg = await ctx.send(embed=discord.Embed(title="操作開始", description=f"全員から{role}を剥奪するよ。", color=ctx.author.color,timestamp=datetime.datetime.utcnow()))
[await member.remove_roles(role) for member in ctx.guild.members]
await msg.edit(embed=discord.Embed(title="操作成功",description=f"{role}を全員から剥奪したよ。", color=ctx.author.color,timestamp=datetime.datetime.utcnow()))
else:
await s.no_per(ctx,"role",0xff0000)
#チャンネル&カテゴリー系コード
@bot.command(aliases=["textchannelcr","textchcr","tchc"],description="指定した名前のテキストチャンネルを作成するよ。\nチャンネルを管理できる人のみ。")
async def textchannelcreate(ctx,channel):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_channels) or ctx.guild.owner == ctx.author:
channel = await ctx.channel.category.create_text_channel(name=channel)
e = discord.Embed(title="操作成功", description=f'テキストチャンネル:{channel.mention}を作成したよ。',color=ctx.author.color,timestamp=datetime.datetime.utcnow())
await ctx.send(embed=e)
else:
await s.no_per(ctx,"channel",0xff0000)
@bot.command(aliases=["textchanneldel","textchdel","tchd"],description="指定した名前のチャンネルを削除するよ。\nチャンネルを管理できる人のみ。")
async def textchanneldelete(ctx,channel:discord.TextChannel):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_channels) or ctx.guild.owner == ctx.author:
await channel.delete()
e = discord.Embed(title="操作成功", description=f'テキストチャンネル:{channel.name}を削除したよ。',color=ctx.author.color,timestamp=datetime.datetime.utcnow())
await ctx.send(embed=e)
else:
await s.no_per(ctx,"channel",0xff0000)
@bot.command(aliases=["voicechannelcr","voicechcr","vchc"],description="指定した名前のボイスチャンネルを作成するよ。\nチャンネルを管理できる人のみ。")
async def voicechannelcreate(ctx,channel):
if (ctx.guild.me.top_role < ctx.author.top_role and ctx.author.guild_permissions.manage_channels) or ctx.guild.owner == ctx.author:
channel = await ctx.channel.category.create_voice_channel(name=channel)
e = discord.Embed(title="操作成功", description=f'ボイスチャンネル:{channel.name}を作成したよ。',color=ctx.author.color,timestamp=datetime.datetime.utcnow())
await ctx.send(embed=e)
else:
await s.no_per(ctx,"channel",0xff0000)
@bot.command(aliases=["voicechanneldel","voicechdel","vchd"],description="指定した名前のボイスチャンネルを作成するよ。\nチャンネルを管理できる人のみ。")
async def voicechanneldelete(ctx,channel:discord.VoiceChannel):
| |
"""
Deletes an integration with the given ID
Args:
client: Demisto client instance
instance_id: The instance ID to Delete
Returns:
True if integration was deleted else False
"""
self.build_context.logging_module.debug(f'Deleting {self} instance')
instance_id = instance_id or self.integration_configuration_from_server.get('id')
if not instance_id:
self.build_context.logging_module.debug(f'no instance ID for integration {self} was supplied')
return True
try:
res = demisto_client.generic_request_func(self=client, method='DELETE',
path='/settings/integration/' + urllib.parse.quote(
instance_id))
except ApiException:
self.build_context.logging_module.exception(
'Failed to delete integration instance, error trying to communicate with demisto.')
return False
if int(res[1]) != 200:
self.build_context.logging_module.error(f'delete integration instance failed\nStatus code {res[1]}')
self.build_context.logging_module.error(pformat(res))
return False
if self.module_instance:
self.module_instance = {}
return True
def test_integration_instance(self, client: DefaultApi) -> bool:
"""Runs test module on the integration instance
Args:
client: The demisto_client instance to use
Returns:
The integration configuration as it exists on the server after it was configured
"""
if not self.configuration.should_validate_test_module: # type: ignore
self.build_context.logging_module.debug(
f'Skipping test-module on {self} because the "validate_test" flag is set to False')
return True
connection_retries = 3
response_code = 0
integration_of_instance = self.integration_configuration_from_server.get('brand', '')
instance_name = self.integration_configuration_from_server.get('name', '')
self.build_context.logging_module.info(
f'Running "test-module" for instance "{instance_name}" of integration "{integration_of_instance}".')
for i in range(connection_retries):
try:
response_data, response_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/settings/integration/test',
body=self.module_instance,
_request_timeout=120)
break
except ApiException:
self.build_context.logging_module.exception(
f'Failed to test integration {self} instance, error trying to communicate with demisto server')
return False
except urllib3.exceptions.ReadTimeoutError:
self.build_context.logging_module.warning(f"Could not connect. Trying to connect for the {i + 1} time")
if int(response_code) != 200:
self.build_context.logging_module.error(
f'Integration-instance test-module failed. Bad status code: {response_code}')
return False
result_object = ast.literal_eval(response_data)
success, failure_message = bool(result_object.get('success')), result_object.get('message')
if not success:
server_url = client.api_client.configuration.host
test_failed_msg = f'Test integration failed - server: {server_url}.\n' \
f'Failure message: {failure_message}' if failure_message else ' No failure message.'
self.build_context.logging_module.error(test_failed_msg)
return success
def disable_integration_instance(self, client) -> None:
"""Disables the integration
Args:
client: The demisto_client instance to use
Returns:
The integration configuration as it exists on the server after it was configured
"""
# tested with POSTMAN, this is the minimum required fields for the request.
module_instance = {
key: self.integration_configuration_from_server[key] for key in ['id', 'brand', 'name', 'data', 'isIntegrationScript', ]
}
module_instance['enable'] = "false"
module_instance['version'] = -1
self.build_context.logging_module.debug(f'Disabling integration instance "{module_instance.get("name")}"')
try:
res = demisto_client.generic_request_func(self=client, method='PUT',
path='/settings/integration',
body=module_instance)
except ApiException:
self.build_context.logging_module.exception('Failed to disable integration instance')
return
if res[1] != 200:
self.build_context.logging_module.error(f'disable instance failed, Error: {pformat(res)}')
def get_docker_images(self) -> List[str]:
"""
Gets the docker image name from the configured integration instance's body if such body exists
Returns:
"""
if self.integration_configuration_from_server:
return Docker.get_integration_image(self.integration_configuration_from_server)
else:
raise Exception('Cannot get docker image - integration instance was not created yet')
def __str__(self):
return f'"{self.name}"'
def __repr__(self):
return str(self)
class TestContext:
def __init__(self,
build_context: BuildContext,
playbook: TestPlaybook,
client: DefaultApi,
server_context: 'ServerContext'):
"""
Initializes the TestContext class
Args:
build_context: The context of the current build
playbook: The TestPlaybook instance to run in the current test execution
client: A demisto client instance to use for communication with the server
server_context (ServerContext): The ServerContext instance in which the TestContext instance is created in
"""
self.build_context = build_context
self.server_context = server_context
self.playbook = playbook
self.incident_id: Optional[str] = None
self.test_docker_images: Set[str] = set()
self.client: DefaultApi = client
self.tunnel_command = \
f'ssh -i ~/.ssh/oregon-ci.pem -4 -o StrictHostKeyChecking=no -f -N "{SSH_USER}@{LOAD_BALANCER_DNS}" ' \
f'-L "{self.server_context.tunnel_port}:{self.server_context.server_ip}:443"'
def _get_investigation_playbook_state(self) -> str:
"""
Queried the server for the current status of the test's investigation
Returns:
A string representing the status of the playbook
"""
try:
investigation_playbook_raw = demisto_client.generic_request_func(
self=self.client,
method='GET',
path=f'/inv-playbook/{self.incident_id}')
investigation_playbook = ast.literal_eval(investigation_playbook_raw[0])
except ApiException:
self.build_context.logging_module.exception(
'Failed to get investigation playbook state, error trying to communicate with demisto server'
)
return PB_Status.FAILED
try:
state = investigation_playbook['state']
return state
except Exception: # noqa: E722
return PB_Status.NOT_SUPPORTED_VERSION
def _collect_docker_images(self) -> None:
"""
Collects docker images of the playbook's integration.
This method can be called only after the integrations were configured in the server.
"""
for integration in self.playbook.integrations:
docker_images = integration.get_docker_images()
if docker_images:
self.test_docker_images.update(docker_images)
def _print_investigation_error(self):
try:
res = demisto_client.generic_request_func(
self=self.client,
method='POST',
path='/investigation/' + urllib.parse.quote(self.incident_id), # type: ignore
body={"pageSize": 1000})
if res and int(res[1]) == 200:
resp_json = ast.literal_eval(res[0])
entries = resp_json['entries']
self.build_context.logging_module.error(f'Playbook {self.playbook} has failed:')
for entry in entries:
if entry['type'] == ENTRY_TYPE_ERROR and entry['parentContent']:
self.build_context.logging_module.error(f'- Task ID: {entry["taskId"]}')
# Checks for passwords and replaces them with "******"
parent_content = re.sub(
r' (P|p)assword="[^";]*"', ' password=******', entry['parentContent'])
self.build_context.logging_module.error(f' Command: {parent_content}')
self.build_context.logging_module.error(f' Body:\n{entry["contents"]}')
else:
self.build_context.logging_module.error(
f'Failed getting entries for investigation: {self.incident_id}. Res: {res}')
except ApiException:
self.build_context.logging_module.exception(
'Failed to print investigation error, error trying to communicate with demisto server')
def _poll_for_playbook_state(self) -> str:
"""
Polls for the playbook execution in the incident and return it's state.
Returns:
A string representing the status of the playbook
"""
timeout = time.time() + self.playbook.configuration.timeout
number_of_attempts = 1
# wait for playbook to finish run
while True:
# give playbook time to run
time.sleep(5)
try:
# fetch status
playbook_state = self._get_investigation_playbook_state()
except demisto_client.demisto_api.rest.ApiException:
playbook_state = 'Pending'
self.build_context.logging_module.exception('Error when trying to get investigation playbook state')
if playbook_state in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION):
break
if playbook_state == PB_Status.FAILED:
self.build_context.logging_module.error(f'{self.playbook} failed with error/s')
self._print_investigation_error()
break
if time.time() > timeout:
self.build_context.logging_module.error(f'{self.playbook} failed on timeout')
break
if number_of_attempts % DEFAULT_INTERVAL == 0:
self.build_context.logging_module.info(
f'loop no. {number_of_attempts / DEFAULT_INTERVAL}, playbook state is {playbook_state}')
number_of_attempts = number_of_attempts + 1
return playbook_state
def _run_incident_test(self) -> str:
"""
Creates an incident in demisto server and return it's status
Returns:
Empty string or
"""
try:
server_url = self.client.api_client.configuration.host
if not self.playbook.configure_integrations(self.client):
return PB_Status.FAILED
test_module_result = self.playbook.run_test_module_on_integrations(self.client)
if not test_module_result:
self.playbook.disable_integrations(self.client)
return PB_Status.FAILED
incident = self.playbook.create_incident(self.client)
if not incident:
return ''
self.incident_id = incident.investigation_id
investigation_id = self.incident_id
if investigation_id is None:
self.build_context.logging_module.error(f'Failed to get investigation id of incident: {incident}')
return ''
self.build_context.logging_module.info(f'Investigation URL: {server_url}/#/WorkPlan/{investigation_id}')
self.build_context.logging_module.info(
f'ssh tunnel command: {self.tunnel_command}')
playbook_state = self._poll_for_playbook_state()
self.playbook.disable_integrations(self.client)
self._clean_incident_if_successful(playbook_state)
return playbook_state
except Exception:
self.build_context.logging_module.exception(f'Failed to run incident test for {self.playbook}')
return PB_Status.FAILED
def _clean_incident_if_successful(self, playbook_state: str) -> None:
"""
Deletes the integration instances and the incident if the test was successful or failed on docker rate limit
Args:
playbook_state: The state of the playbook with which we can check if the test was successful
"""
test_passed = playbook_state in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION)
if self.incident_id and test_passed:
self.playbook.delete_incident(self.client, self.incident_id)
self.playbook.delete_integration_instances(self.client)
def _run_docker_threshold_test(self):
self._collect_docker_images()
if self.test_docker_images:
error_message = Docker.check_resource_usage(
server_url=self.server_context.server_ip,
docker_images=self.test_docker_images,
def_memory_threshold=self.playbook.configuration.memory_threshold,
def_pid_threshold=self.playbook.configuration.pid_threshold,
docker_thresholds=self.build_context.conf.docker_thresholds,
logging_module=self.build_context.logging_module)
if error_message:
self.build_context.logging_module.error(error_message)
return False
return True
def _send_slack_message(self, channel, text, user_name, as_user):
self.build_context.slack_client.api_call(
"chat.postMessage",
json={
'channel': channel,
'username': user_name,
'as_user': as_user,
'text': text,
'mrkdwn': 'true'
}
)
def _notify_failed_test(self):
text = f'{self.build_context.build_name} - {self.playbook} Failed\n' \
f'for more details run: `{self.tunnel_command}` and browse into the following link\n' \
f'{self.client.api_client.configuration.host}'
text += f'/#/WorkPlan/{self.incident_id}' if self.incident_id else ''
if self.build_context.slack_user_id:
self.build_context.slack_client.api_call(
"chat.postMessage",
json={
'channel': self.build_context.slack_user_id,
'username': 'Content CircleCI',
'as_user': 'False',
'text': text
}
)
def _add_to_succeeded_playbooks(self) -> None:
"""
Adds the playbook to the succeeded playbooks list
"""
self.build_context.tests_data_keeper.succeeded_playbooks.append(self.playbook.configuration.playbook_id)
def _add_to_failed_playbooks(self, is_second_playback_run: bool = False) -> None:
"""
Adds the playbook to the succeeded playbooks list
Args:
is_second_playback_run: Is The playbook run on a second playback after a freshly created record
"""
playbook_name_to_add = self.playbook.configuration.playbook_id
if not self.playbook.is_mockable:
playbook_name_to_add += " (Mock Disabled)"
if is_second_playback_run:
self.build_context.logging_module.error(
'Playback on newly created record has failed, see the following confluence page for help:\n'
'https://confluence.paloaltonetworks.com/display/DemistoContent/Debug+Proxy-Related+Test+Failures')
playbook_name_to_add += ' (Second Playback)'
self.build_context.logging_module.error(f'Test failed: {self}')
self.build_context.tests_data_keeper.failed_playbooks.add(playbook_name_to_add)
@staticmethod
def _get_circle_memory_data() -> Tuple[str, str]:
"""
Checks how many bytes are currently in use in the circle build instance
Returns:
The number of bytes in use
"""
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.usage_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout.decode(), stderr.decode()
@staticmethod
def _get_circle_processes_data() -> Tuple[str, str]:
"""
Returns some data about the processes currently running in the circle build instance
"""
process = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout.decode(), stderr.decode()
def _send_circle_memory_and_pid_stats_on_slack(self):
"""
Sends a slack messages with the number of bytes currently in use and the number of processes currently in use
"""
if self.build_context.is_nightly and self.build_context.memCheck and not self.build_context.is_local_run:
stdout, stderr = self._get_circle_memory_data()
text = 'Memory Usage: {}'.format(stdout) if not stderr else stderr
self._send_slack_message(SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
stdout, stderr = self._get_circle_processes_data()
text = stdout if not stderr | |
# -*- coding: utf-8 -*-
"""
This file contains implementations of the functions used to train a CNN model:
train_cnn - Function used to facilitate the training of the Convolutinal Neural Network model.
test_cnn - Function used to facilitate the testing of the Convolutinal Neural Network model.
"""
# Built-in/Generic Imports
import os
import time
# Library Imports
import torch
import numpy as np
import pandas as pd
from torch.cuda import amp
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.optim import SGD, LBFGS, lr_scheduler
from torch.utils.tensorboard import SummaryWriter
# Own Modules
from utils import log
from model import Classifier
from dataset import get_datasets
__author__ = ["<NAME>"]
__copyright__ = "Copyright 2020, Selective Dermatology"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "3.0.0"
__maintainer = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def train_cnn(arguments, device):
"""
Function for training of the Convolutional neural network.
:param arguments: ArgumentParser Namespace object with arguments used for training.
:param device: PyTorch device that will be used for training.
:return: Lists of training and validation losses and an integer for the best performing epoch.
"""
# Loads a TensorBoard Summary Writer.
if arguments.tensorboard_dir != "":
writer = SummaryWriter(os.path.join(arguments.tensorboard_dir, arguments.task, arguments.experiment))
# Loads the training and validation data.
train_data, val_data, _ = get_datasets(arguments)
# Creates the training data loader using the dataset objects.
training_data_loader = DataLoader(train_data, batch_size=arguments.batch_size,
shuffle=True, num_workers=arguments.data_workers,
pin_memory=False, drop_last=False)
# Creates the validation data loader using the dataset objects.
validation_data_loader = DataLoader(val_data, batch_size=arguments.batch_size,
shuffle=False, num_workers=arguments.data_workers,
pin_memory=False, drop_last=False)
log(arguments, "Loaded Datasets\n")
# Initialises the classifier model.
classifier = Classifier(arguments.efficient_net)
# Sets the classifier to training mode.
classifier.train()
# Moves the classifier to the selected device.
classifier.to(device)
# Initialises the optimiser used to optimise the parameters of the model.
optimiser = SGD(params=classifier.parameters(), lr=arguments.starting_lr)
# Initialises the learning rate scheduler to adjust the learning rate during training.
scheduler = lr_scheduler.CyclicLR(optimiser, base_lr=arguments.starting_lr, max_lr=arguments.maximum_lr)
# Initialises the gradient scaler used for 16 but precision.
if arguments.precision == 16 and device != torch.device("cpu"):
scaler = amp.GradScaler()
log(arguments, "Models Initialised")
# Declares the main logging variables for the training.
start_time = time.time()
losses, validation_losses, temperatures = [], [], []
best_loss, best_epoch, total_batches = 1e10, 0, 0
log(arguments, "Training Timer Started\n")
# The beginning of the main training loop.
for epoch in range(1, arguments.max_epochs + 1):
# Declares the logging variables for the epoch.
epoch_acc, epoch_loss, epoch_risk, epoch_coverage,num_batches = 0, 0, 0, 0, 0
# Loops through the training data batches.
for images, labels in training_data_loader:
# Moves the images and labels to the selected device.
images = images.to(device)
labels = labels.to(device)
# Resets the gradients in the model.
optimiser.zero_grad()
# Perform training with 16 bit precision.
if arguments.precision == 16 and device != torch.device("cpu"):
with amp.autocast():
# Performs forward propagation with the model.
logits = classifier(images, dropout=True)
# Calculates the loss.
loss = F.cross_entropy(logits, labels)
# Using the gradient scaler performs backward propagation.
scaler.scale(loss).backward()
# Update the weights of the model using the optimiser.
scaler.step(optimiser)
# Updates the scale factor of the gradient scaler.
scaler.update()
# Performs training with 32 bit precision.
else:
# Performs forward propagation with the model.
logits = classifier(images, dropout=True)
# Calculates the loss.
loss = F.cross_entropy(logits, labels)
# Performs backward propagation.
loss.backward()
# Update the weights of the model using the optimiser.
optimiser.step()
# Updates the learning rate scheduler.
scheduler.step()
# Calculates the accuracy of the batch.
batch_accuracy = (logits.max(dim=1)[1] == labels).sum().double() / labels.shape[0]
# Calculates the selection scores for the validation predictions.
selections = torch.max(F.softmax(logits), 1)[0]
# Calculates the coverage for the batch.
batch_coverage = selections.mean()
# Calculates the log probability for the predictions and selections.
log_prob = -1. * F.log_softmax(logits, 1) * selections.view([labels.shape[0], 1])
# Calculates the selective risk for the batch using the selections and predictions.
batch_risk = log_prob.gather(1, labels.unsqueeze(1)).mean() / batch_coverage
# Adds the number of batches, loss and accuracy to epoch sum.
num_batches += 1
epoch_loss += loss.item()
epoch_acc += batch_accuracy
epoch_coverage += batch_coverage
epoch_risk += batch_risk
# Writes the batch loss and accuracy to TensorBoard logger.
if arguments.tensorboard_dir != "":
writer.add_scalar("Loss/batch", loss.item(), num_batches + total_batches)
writer.add_scalar("Accuracy/batch", batch_accuracy, num_batches + total_batches)
# Logs the details of the epoch progress.
if num_batches % arguments.log_interval == 0:
log(arguments, "Time: {}s\tTrain Epoch: {} [{}/{}] ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:.6f}".format(
str(int(time.time() - start_time)).rjust(6, '0'), str(epoch).rjust(2, '0'),
str(num_batches * arguments.batch_size).rjust(len(str(len(train_data))), '0'),
len(train_data), 100. * num_batches / (len(train_data) / arguments.batch_size),
epoch_loss / num_batches, epoch_acc / num_batches))
# If the number of batches have been reached end epoch.
if num_batches == arguments.batches_per_epoch:
break
# Updates the total number of batches (used for logging).
total_batches += num_batches
# Writes epoch loss and accuracy to TensorBoard.
if arguments.tensorboard_dir != "":
writer.add_scalar("Loss/train", epoch_loss / num_batches, epoch)
writer.add_scalar("Accuracy/train", epoch_acc / num_batches, epoch)
writer.add_scalar("Coverage/train", epoch_coverage / num_batches, epoch)
writer.add_scalar("Selective Risk/train", epoch_risk / num_batches, epoch)
# Declares the logging variables for validation.
validation_acc, validation_loss, validation_risk, validation_coverage, validation_batches = 0, 0, 0, 0, 0
logit_list, label_list = [], []
temperature = torch.nn.Parameter(torch.ones(1, device=device))
temp_optimiser = LBFGS([temperature], lr=0.01, max_iter=1000, line_search_fn="strong_wolfe")
# Performs the validation epoch with no gradient calculations.
with torch.no_grad():
# Loops through the training data batches.
for images, labels in validation_data_loader:
# Moves the images and labels to the selected device.
images = images.to(device)
labels = labels.to(device)
# Performs forward propagation using 16 bit precision.
if arguments.precision == 16 and device != torch.device("cpu"):
with amp.autocast():
# Performs forward propagation with the model.
logits = classifier(images, dropout=False)
# Calculates the loss.
loss = F.cross_entropy(logits, labels)
# Performs forward propagation using 32 bit precision.
else:
# Performs forward propagation with the model.
logits = classifier(images, dropout=True)
# Calculates the loss.
loss = F.cross_entropy(logits, labels)
logit_list.append(logits)
label_list.append(labels)
# Calculates the accuracy of the batch.
batch_accuracy = (logits.max(dim=1)[1] == labels).sum().double() / labels.shape[0]
# Calculates the selection scores for the validation predictions.
selections = torch.max(F.softmax(logits), 1)[0]
# Calculates the coverage for the batch.
batch_coverage = selections.mean()
# Calculates the log probability for the predictions and selections.
log_prob = -1. * F.log_softmax(logits, 1) * selections.view([labels.shape[0], 1])
# Calculates the selective risk for the batch using the selections and predictions.
batch_risk = log_prob.gather(1, labels.unsqueeze(1)).mean() / batch_coverage
# Adds the number of batches, loss and accuracy to validation sum.
validation_batches += 1
validation_loss += loss.item()
validation_acc += batch_accuracy
validation_coverage += batch_coverage
validation_risk += batch_risk
# If the number of batches have been reached end validation.
if validation_batches == arguments.batches_per_epoch:
break
logit_list = torch.cat(logit_list).to(device)
label_list = torch.cat(label_list).to(device)
def _eval():
temp_loss = F.cross_entropy(torch.div(logit_list, temperature), label_list)
temp_loss.backward()
return temp_loss
temp_optimiser.step(_eval)
temperatures.append(temperature.item())
# Writes validation loss and accuracy to TensorBoard.
if arguments.tensorboard_dir != "":
writer.add_scalar("Loss/validation", validation_loss / validation_batches, epoch)
writer.add_scalar("Accuracy/validation", validation_acc / validation_batches, epoch)
writer.add_scalar("Coverage/validation", validation_coverage / validation_batches, epoch)
writer.add_scalar("Selective Risk/validation", validation_risk / validation_batches, epoch)
# Adds the training and validation losses to their respective lists.
losses.append(epoch_loss / num_batches)
validation_losses.append(validation_loss / validation_batches)
# Logs the details of the training epoch.
log(arguments, "\nEpoch: {}\Training Loss: {:.6f}\tTraining Accuracy: {:.6f}\t"
"Training Coverage: {:.6f}\tTraining Selective Risk: {:.6f}\n"
"Validation Loss: {:.6f}\tValidation Accuracy: {:.6f}\t"
"Validation Coverage: {:.6f}\tValidation Selective Risk: {:.6f}\n".
format(epoch, losses[-1], epoch_acc / num_batches, epoch_coverage / num_batches, epoch_risk / num_batches,
validation_losses[-1], validation_acc / validation_batches,
validation_coverage / validation_batches, validation_risk / validation_batches))
# If the current epoch has the best validation loss then save the model with the prefix best.
if validation_losses[-1] < best_loss:
best_loss = validation_losses[-1]
best_epoch = epoch
classifier.save_model(arguments.model_dir, arguments.experiment)
# Saves the model with the current epoch as the prefix.
classifier.save_model(arguments.model_dir, arguments.experiment, str(epoch))
# Checks if the training has performed the minimum number of epochs.
if epoch >= arguments.min_epochs:
# Calculates the generalised validation loss.
g_loss = 100 *((validation_losses[-1] / min(validation_losses[:-1])) - 1)
# Calculates the training progress using a window over the training losses.
t_progress = 1000 * ((sum(losses[-(arguments.window + 1): - 1]) /
(arguments.window * min(losses[-(arguments.window + 1): - 1]))) - 1)
# Compares the generalised loss and training progress against a selected target value.
if g_loss / t_progress > arguments.stop_target:
break
# Logs the final training information.
log(arguments, f"\nTraining finished after {epoch} epochs in {int(time.time() - start_time)}s")
log(arguments, f"Best Epoch {best_epoch} with a | |
h0=0, x0=0.25):
# The plunge maneuver is defined by the following equation:
# V(t) = -Vmax*sin^2(pi*t/T), where T is the maneuver duration and
# Vmax is the peak plunge velocity, reached in the middle of the
# maneuver, for t/T = 0.5. If we integrate V, we obtain the motion of
# the airfoil in the vertical direction. The pitching is constant
# in the plunge maneuver. Then, the whole motions is:
# Plunging: h(t) = h0 - Vmax * t/2 + Vmax*T/(4*pi)*sin(2*pi*t/T)
# Pitching: alpha(t) = alpha_m
# Horizontal flight: x(t) = x0 - Uinf*t
#
# Inputs: G=Vmax/Uinf (velocity ratio)
# T (maneuver duration)
# alpha_m (pitching for the simulation)
# x0, h0 (initial position of the pivot point)
#
# If the time of simulation is higher than the plunge maneuver duration
# (tf > T), once completed the maneuver, the airfoil continues
# in horizontal flight.
pi = np.pi
Uinf = self.Uinf
nt = self.nt
chord = self.chord
# Definition of motion kinematics
# alpha_m = alpha_m # Mean Pitch [degrees]
alpha_m = alpha_m * pi / 180
Vmax = G*Uinf
T = T * chord/Uinf
h0 = h0 # initial position of 'pivot' point
x0 = x0 # initial position of 'pivot' point
self.G = G
self.T = T
# Initialize arrays for the motion
alpha, alpha_dot, alpha_e = np.zeros(nt), np.zeros(nt), np.zeros(nt)
h , h_dot = np.zeros(nt), np.zeros(nt)
x , x_dot = np.zeros(nt), np.zeros(nt)
# Defining motion of the pivot point
for i in range(nt):
ti = self.t[i]
if ti <= T:# plunge maneuver until T (duration of maneuver)
alpha[i] = alpha_m
alpha_dot[i] = 0
h[i] = h0 - Vmax * ti/2 + Vmax * T/(4*pi) * np.sin(2*pi*ti/T)
h_dot[i] = - Vmax*np.sin(pi*ti/T)**2
x[i] = x0 - Uinf*ti
x_dot[i] = - Uinf
else: # from T to t_final -> horizontal flight (after plunge maneuver)
alpha[i] = alpha_m
alpha_dot[i] = 0
h[i] = h[i-1]
h_dot[i] = 0
x[i] = x0 - Uinf*ti
x_dot[i] = - Uinf
xpiv, hpiv = x, h
alpha_e = alpha - np.arctan2(h_dot/Uinf) # effective angle of attack
# Get motion of the entire airfoil as a function of time
path_airfoil = np.zeros([self.nt, 2, self.Npoints]) # t,xy, Npoints
for i in range(nt):
ti = self.t[i]
# First we compute the Leading Edge motion
path_airfoil[i,0,0] = xpiv[i] - self.piv*np.cos(-alpha[i]) #xLE new
path_airfoil[i,1,0] = hpiv[i] + self.piv*np.sin(-alpha[i]) #yLE new
# The position of a new generic point Q results from rotating the
# vector LEQ a clockwise (-) angle alpha, such that:
# xQ_new = xLE_new + xQ*cos(-alpha) - yQ*sin(-alpha)
# yQ_new = yLE_new + xQ*sin(-alpha) + yQ*cos(-alpha)
path_airfoil[i,0,1:] = path_airfoil[i,0,0] + np.cos(-alpha[i]) * self.airfoil['x'][1:] \
- np.sin(-alpha[i]) * self.airfoil['eta'][1:]
path_airfoil[i,1,1:] = path_airfoil[i,1,0] + np.sin(-alpha[i]) * self.airfoil['x'][1:] \
+ np.cos(-alpha[i]) * self.airfoil['eta'][1:]
# Gamma points are located at xgamma of each panel
path_airfoil_gamma_points = path_airfoil[:,:,:-1] + \
self.xgamma*(path_airfoil[:,:,1:]-path_airfoil[:,:,:-1])
self.alpha, self.alpha_dot = alpha, alpha_dot
self.hpiv , self.h_dot = hpiv , h_dot
self.xpiv , self.x_dot = xpiv , x_dot
self.path = {'airfoil': path_airfoil, \
'airfoil_gamma_points':path_airfoil_gamma_points}
return None
def induced_velocity(self, circulation, xw, zw, xp, zp, viscous = True):
# Calculates the induced velocity at points 'xp,yp', generated by
# vortices located at 'xw,yw'. If 'viscous' is True, it uses the
# Vatista's vortex model with core-radius v_core = 1.3*dt_star*chord.
# If 'viscous' is not True, v_core = 0 and thus it uses point vortices.
Np, Nw = len(xp), len(xw)
x_dist = np.zeros([Np, Nw])
z_dist = np.zeros([Np, Nw])
for k in range(Np):
x_dist[k,:] = xp[k] - xw
z_dist[k,:] = zp[k] - zw
if viscous == True: v_core = self.v_core
else: v_core = 0
Ku = z_dist/(2*np.pi*np.sqrt((x_dist**2 + z_dist**2)**2 + v_core**4))
Kw = x_dist/(2*np.pi*np.sqrt((x_dist**2 + z_dist**2)**2 + v_core**4))
u2, w2 = circulation*Ku, - circulation*Kw
u , w = np.sum(u2, axis=1), np.sum(w2, axis=1)
return u, w
def airfoil_downwash(self, circulation, xw, zw, i):
# Computes induced velocity normal to the airfoil surface W(x,t).
# i is the time-step index
# If xw, zw are the wake vortices coordinates, computes the wake downwash
# over the airfoil.
alpha = self.alpha[i]
alpha_dot = self.alpha_dot[i]
h_dot = self.h_dot[i]
xp, zp = self.path['airfoil_gamma_points'][i,0,:], self.path['airfoil_gamma_points'][i,1,:]
u1, w1 = self.induced_velocity(circulation, xw, zw, xp, zp)
# u1, w1 are in global coordinates, we need to rotate them to local
u = u1*np.cos(alpha) - w1*np.sin(alpha) # tangential to chord
w = u1*np.sin(alpha) + w1*np.cos(alpha) # normal to chord
W = self.airfoil['detadx_panel']*(self.Uinf*np.cos(alpha) + h_dot*np.sin(alpha) + u \
- alpha_dot*self.airfoil['eta_panel']) \
- self.Uinf*np.sin(alpha) - alpha_dot*(self.airfoil['x_panel'] - self.piv) \
+ h_dot*np.cos(alpha) - w
return W
def time_loop(self, print_dt = 50, BCcheck = False):
pi = np.pi
Uinf = self.Uinf
theta = self.airfoil['theta']
theta_panel = self.airfoil['theta_panel']
LESPcrit = self.LESPcrit
epsilon = self.epsilon
chord = self.chord
rho = self.rho
dt = self.dt
# Initializing vortices coordinates and circulation
nvort = self.nt-1
n_freevort = self.n_freevort
# initializing paths of shed vortices
# 1st index: time; 2nd index: x,y; 3rd index: Number of vortex
self.path['TEV'] = np.zeros([self.nt, 2, nvort]) # At each dt, a TEV is shed
self.path['LEV'] = np.zeros([self.nt, 2, nvort]) # There will be nt LEV shed as maximum
self.path['FREE'] = np.zeros([self.nt, 2, n_freevort]) # Free vortices
self.path['FREE'][0,:,:] = self.xy_freevort # Placing free vortices at their initial positions
# initializing circulations
self.circulation = {'TEV': np.zeros([nvort])} #initializing dictionary
self.circulation['LEV'] = np.zeros([nvort])
self.circulation['FREE'] = self.circulation_freevort # Filling free vortices with their initial circulation
self.circulation['bound'] = np.zeros([nvort])
self.circulation['airfoil'] = np.zeros([nvort, self.Npoints-1]) # dGamma(x,t) = gamma(x,t)*dx
self.BC = np.zeros([nvort, self.Npoints]) # Boundary condition computation (normal velocity to airfoil)
self.circulation['gamma_airfoil'] = np.zeros([nvort, self.Npoints-1]) # gamma(x,t) = Fourier series
self.circulation['Gamma_airfoil'] = np.zeros([nvort, self.Npoints-1]) # Gamma(x,t) = int_0^x dGamma(x,t)
# initializing loads and pressure distribution
self.dp = np.zeros([self.nt, self.Npoints-1])
self.Fn = np.zeros([self.nt])
self.Fs = np.zeros([self.nt])
self.L = np.zeros([self.nt])
self.D = np.zeros([self.nt])
self.T = np.zeros([self.nt])
self.M = np.zeros([self.nt])
# Initializing fourier coefficients vector and LESP vector
self.fourier = np.zeros([self.nt, 2, self.Ncoeffs]) # axis = 1 -> 0 coeffs, 1 derivatives
self.LESP = np.zeros(self.nt)
self.LESP_prev = np.zeros(self.nt)
# Initial condition (distribution of a flat plate at a fixed angle of attack alpha_m)
# One can also load the A0, A1 initial coeffs for an specific airfoil at specific angle of attack (from another simulation)
A0, A1 = np.sin(self.alpha_m), 0
circulation_bound = Uinf*chord*pi*(A0 + A1/2)
self.fourier[0,0,:2] = A0, A1
# Initial Gamma to be accounted for in Kelvin's condition (this would be zero without free vortices and initial bound circulation)
self.circulation['IC'] = np.sum(self.circulation['FREE']) + circulation_bound
itev = 0 #tev counter
ilev = 0 #lev counter
LEV_shed = -1*np.ones(self.nt) # stores the information of intermittent LEV shedding per dt
'''----------------------------------------------------------'''
'''----------------------- Time loop ------------------------'''
'''----------------------------------------------------------'''
for i in range(1,self.nt): #starting from 2nd step
if (i == 1 or i == self.nt-1 or i/print_dt == int(i/print_dt)) and self.verbose == True:
print('Step {} out of {}. Elapsed time {}'.format(i, self.nt-1, timeit.default_timer() - self.start_time))
# Rewrite coordinates of the rest of vortices in the structure (not including vortices at time step i)
self.path['TEV'] [i,:,:itev] = self.path ['TEV'][i-1,:,:itev] # [:i] does not include i
self.path['LEV'] [i,:,:ilev] = self.path ['LEV'][i-1,:,:ilev]
self.path['FREE'][i,:,:] = self.path['FREE'][i-1,:,:]
'''--------------------------------------------------------------'''
'''---------------------- TEV computation -----------------------'''
'''--------------------------------------------------------------'''
# Compute the position of the shed TEV
if itev == 0:
# First TEV is located horizontally downstream at a distance 0.5*Uinf*dt from the trailing edge
self.path['TEV'][i,:,itev] = self.path['airfoil'][0,:,-1] + [0.5*Uinf*dt,0]
else:
# Shedding of the Trailing Edge Vortex (TEV)
# (X,Z)_tev_i = (X,Z)_TE + 1/3[(X,Z)_tev_i-1 - (X,Z)_TE]
# At 1/3 of the distance between the shedding edge and the
# previously shed vortex (in this dt).
self.path['TEV'][i,:,itev] = self.path['airfoil'][i,:,-1] + \
1/3*(self.path['TEV'][i,:,itev-1] - self.path['airfoil'][i,:,-1])
if self.method == 'Ramesh': # iterating with Newton method
f = 1 #initializing
niter = 1
shed_vortex_gamma = -1 # guess for Newton-Raphson
while abs(f) > self.maxerror and niter < self.maxiter:
self.circulation['TEV'][itev] = shed_vortex_gamma
circulation = np.append(np.append(self.circulation['TEV'][:itev+1], self.circulation['LEV'][:ilev+1]), self.circulation['FREE'])
xw = np.append(np.append(self.path['TEV'][i,0,:itev+1], | |
"""
This file contains several abstract classes:
* TorchForecastingModel is the super-class of all torch (deep learning) darts forecasting models.
* PastCovariatesTorchModel(TorchForecastingModel) for torch models consuming only past-observed covariates.
* FutureCovariatesTorchModel(TorchForecastingModel) for torch models consuming only future values of
future covariates.
* DualCovariatesTorchModel(TorchForecastingModel) for torch models consuming past and future values of some single
future covariates.
* MixedCovariatesTorchModel(TorchForecastingModel) for torch models consuming both past-observed
as well as past and future values of some future covariates.
* SplitCovariatesTorchModel(TorchForecastingModel) for torch models consuming past-observed as well as future
values of some future covariates.
* TorchParametricProbabilisticForecastingModel(TorchForecastingModel) is the super-class of all probabilistic torch
forecasting models.
"""
import numpy as np
import os
import re
from glob import glob
import shutil
from joblib import Parallel, delayed
from typing import Optional, Dict, Tuple, Union, Sequence, List
from abc import ABC, abstractmethod
import torch
from torch import Tensor
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import datetime
from darts.timeseries import TimeSeries
from darts.utils import _build_tqdm_iterator
from darts.utils.torch import random_method
from darts.utils.data.training_dataset import (TrainingDataset,
PastCovariatesTrainingDataset,
FutureCovariatesTrainingDataset,
DualCovariatesTrainingDataset,
MixedCovariatesTrainingDataset,
SplitCovariatesTrainingDataset)
from darts.utils.data.inference_dataset import (InferenceDataset,
PastCovariatesInferenceDataset,
FutureCovariatesInferenceDataset,
DualCovariatesInferenceDataset,
MixedCovariatesInferenceDataset,
SplitCovariatesInferenceDataset)
from darts.utils.data.sequential_dataset import (PastCovariatesSequentialDataset,
FutureCovariatesSequentialDataset,
DualCovariatesSequentialDataset,
MixedCovariatesSequentialDataset,
SplitCovariatesSequentialDataset)
from darts.utils.data.encoders import SequentialEncoder
from darts.utils.likelihood_models import Likelihood
from darts.logging import raise_if_not, get_logger, raise_log, raise_if
from darts.models.forecasting.forecasting_model import GlobalForecastingModel
DEFAULT_DARTS_FOLDER = '.darts'
CHECKPOINTS_FOLDER = 'checkpoints'
RUNS_FOLDER = 'runs'
logger = get_logger(__name__)
def _get_checkpoint_folder(work_dir, model_name):
return os.path.join(work_dir, CHECKPOINTS_FOLDER, model_name)
def _get_runs_folder(work_dir, model_name):
return os.path.join(work_dir, RUNS_FOLDER, model_name)
class TorchForecastingModel(GlobalForecastingModel, ABC):
# TODO: add is_stochastic & reset methods
def __init__(self,
input_chunk_length: int,
output_chunk_length: int,
batch_size: int = 32,
n_epochs: int = 100,
optimizer_cls: torch.optim.Optimizer = torch.optim.Adam,
optimizer_kwargs: Optional[Dict] = None,
lr_scheduler_cls: torch.optim.lr_scheduler._LRScheduler = None,
lr_scheduler_kwargs: Optional[Dict] = None,
loss_fn: nn.modules.loss._Loss = nn.MSELoss(),
model_name: str = None,
work_dir: str = os.path.join(os.getcwd(), DEFAULT_DARTS_FOLDER),
log_tensorboard: bool = False,
nr_epochs_val_period: int = 10,
torch_device_str: Optional[str] = None,
force_reset: bool = False,
save_checkpoints: bool =False,
add_encoders: Optional[Dict] = None):
""" Pytorch-based Forecasting Model.
This class is meant to be inherited to create a new pytorch-based forecasting module.
When subclassing this class, please make sure to set the self.model attribute
in the __init__ function and then call super().__init__ while passing the kwargs.
Parameters
----------
input_chunk_length
Number of past time steps that are fed to the internal forecasting module.
output_chunk_length
Number of time steps to be output by the internal forecasting module.
batch_size
Number of time series (input and output sequences) used in each training pass.
n_epochs
Number of epochs over which to train the model.
optimizer_cls
The PyTorch optimizer class to be used (default: `torch.optim.Adam`).
optimizer_kwargs
Optionally, some keyword arguments for the PyTorch optimizer (e.g., ``{'lr': 1e-3}``
for specifying a learning rate). Otherwise the default values of the selected `optimizer_cls`
will be used.
lr_scheduler_cls
Optionally, the PyTorch learning rate scheduler class to be used. Specifying `None` corresponds
to using a constant learning rate.
lr_scheduler_kwargs
Optionally, some keyword arguments for the PyTorch optimizer.
loss_fn
PyTorch loss function used for training.
This parameter will be ignored for probabilistic models if the `likelihood` parameter is specified.
Default: ``torch.nn.MSELoss()``.
model_name
Name of the model. Used for creating checkpoints and saving tensorboard data. If not specified,
defaults to the following string ``"YYYY-mm-dd_HH:MM:SS_torch_model_run_PID"``, where the initial part of the
name is formatted with the local date and time, while PID is the processed ID (preventing models spawned at
the same time by different processes to share the same model_name). E.g.,
``"2021-06-14_09:53:32_torch_model_run_44607"``.
work_dir
Path of the working directory, where to save checkpoints and Tensorboard summaries.
(default: current working directory).
log_tensorboard
If set, use Tensorboard to log the different parameters. The logs will be located in:
`[work_dir]/.darts/runs/`.
nr_epochs_val_period
Number of epochs to wait before evaluating the validation loss (if a validation
``TimeSeries`` is passed to the :func:`fit()` method).
torch_device_str
Optionally, a string indicating the torch device to use. (default: "cuda:0" if a GPU
is available, otherwise "cpu")
force_reset
If set to `True`, any previously-existing model with the same name will be reset (all checkpoints will
be discarded).
save_checkpoints
Whether or not to automatically save the untrained model and checkpoints from training.
If set to `False`, the model can still be manually saved using :func:`save_model()`
and loaded using :func:`load_model()`.
"""
super().__init__()
if torch_device_str is None:
self.device = self._get_best_torch_device()
else:
self.device = torch.device(torch_device_str)
# We will fill these dynamically, upon first call of fit_from_dataset():
self.model = None
self.train_sample = None
self.output_dim = None
self.input_chunk_length = input_chunk_length
self.output_chunk_length = output_chunk_length
self.log_tensorboard = log_tensorboard
self.nr_epochs_val_period = nr_epochs_val_period
if model_name is None:
current_time = datetime.datetime.now().strftime("%Y-%m-%d_%H.%M.%S.%f")
model_name = current_time + "_torch_model_run_" + str(os.getpid())
self.model_name = model_name
self.work_dir = work_dir
self.n_epochs = n_epochs
self.total_epochs = 0 # 0 means it wasn't trained yet.
self.batch_size = batch_size
# Define the loss function
self.criterion = loss_fn
# The tensorboard writer
self.tb_writer = None
# Persist optimiser and LR scheduler parameters
self.optimizer_cls = optimizer_cls
self.optimizer_kwargs = dict() if optimizer_kwargs is None else optimizer_kwargs
self.lr_scheduler_cls = lr_scheduler_cls
self.lr_scheduler_kwargs = dict() if lr_scheduler_kwargs is None else lr_scheduler_kwargs
# by default models are deterministic (i.e. not probabilistic)
self.likelihood = None
# by default models do not use encoders
self.encoders = None
self.force_reset = force_reset
self.save_checkpoints = save_checkpoints
checkpoints_folder = _get_checkpoint_folder(self.work_dir, self.model_name)
self.checkpoint_exists = \
os.path.exists(checkpoints_folder) and len(glob(os.path.join(checkpoints_folder, "checkpoint_*"))) > 0
if self.checkpoint_exists and self.save_checkpoints:
if self.force_reset:
self.reset_model()
else:
raise AttributeError("You already have model data for the '{}' name. Either load model to continue"
" training or use `force_reset=True` to initialize anyway to start"
" training from scratch and remove all the model data".format(self.model_name)
)
@property
def min_train_series_length(self) -> int:
"""
Class property defining the minimum required length for the training series;
overriding the default value of 3 of ForecastingModel
"""
return self.input_chunk_length + self.output_chunk_length
def _batch_collate_fn(self, batch: List[Tuple]) -> Tuple:
"""
Returns a batch Tuple from a list of samples
"""
aggregated = []
first_sample = batch[0]
for i in range(len(first_sample)):
elem = first_sample[i]
if isinstance(elem, np.ndarray):
aggregated.append(
torch.from_numpy(np.stack([sample[i] for sample in batch], axis=0))
)
elif elem is None:
aggregated.append(None)
elif isinstance(elem, TimeSeries):
aggregated.append([sample[i] for sample in batch])
return tuple(aggregated)
def reset_model(self):
""" Resets the model object and removes all the stored data - model, checkpoints and training history.
"""
shutil.rmtree(_get_checkpoint_folder(self.work_dir, self.model_name), ignore_errors=True)
shutil.rmtree(_get_runs_folder(self.work_dir, self.model_name), ignore_errors=True)
self.checkpoint_exists = False
self.total_epochs = 0
self.model = None
self.train_sample = None
def _init_model(self) -> None:
"""
Init self.model - the torch module of this class, based on examples of input/output tensors (to get the
sizes right).
"""
# the tensors have shape (chunk_length, nr_dimensions)
self.model = self._create_model(self.train_sample)
if np.issubdtype(self.train_sample[0].dtype, np.float32):
logger.info('Time series values are 32-bits; casting model to float32.')
self.model = self.model.float()
elif np.issubdtype(self.train_sample[0].dtype, np.float64):
logger.info('Time series values are 64-bits; casting model to float64.')
self.model = self.model.double()
self.model = self.model.to(self.device)
# A utility function to create optimizer and lr scheduler from desired classes
def _create_from_cls_and_kwargs(cls, kws):
try:
return cls(**kws)
except (TypeError, ValueError) as e:
raise_log(ValueError('Error when building the optimizer or learning rate scheduler;'
'please check the provided class and arguments'
'\nclass: {}'
'\narguments (kwargs): {}'
'\nerror:\n{}'.format(cls, kws, e)),
logger)
# Create the optimizer and (optionally) the learning rate scheduler
# we have to create copies because we cannot save model.parameters into object state (not serializable)
optimizer_kws = {k: v for k, v in self.optimizer_kwargs.items()}
optimizer_kws['params'] = self.model.parameters()
self.optimizer = _create_from_cls_and_kwargs(self.optimizer_cls, optimizer_kws)
if self.lr_scheduler_cls is not None:
lr_sched_kws = {k: v for k, v in self.lr_scheduler_kwargs.items()}
lr_sched_kws['optimizer'] = self.optimizer
self.lr_scheduler = _create_from_cls_and_kwargs(self.lr_scheduler_cls, lr_sched_kws)
else:
self.lr_scheduler = None # We won't use a LR scheduler
@abstractmethod
def _create_model(self, train_sample: Tuple[Tensor]) -> torch.nn.Module:
"""
This method has to be implemented by all children. It is in charge of instantiating the actual torch model,
based on examples input/output tensors (i.e. implement a model with the right input/output sizes).
"""
pass
@abstractmethod
def _build_train_dataset(self,
target: Sequence[TimeSeries],
past_covariates: Optional[Sequence[TimeSeries]],
future_covariates: Optional[Sequence[TimeSeries]],
max_samples_per_ts: Optional[int]) -> TrainingDataset:
"""
Each model must specify the default training dataset to use.
"""
pass
@abstractmethod
def _build_inference_dataset(self,
target: Sequence[TimeSeries],
n: int,
past_covariates: Optional[Sequence[TimeSeries]],
future_covariates: Optional[Sequence[TimeSeries]]) -> InferenceDataset:
"""
Each model must specify the default training dataset to use.
"""
pass
@abstractmethod
def _verify_train_dataset_type(self, train_dataset: | |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
from scipy.constants import epsilon_0
from ipywidgets import IntSlider, FloatSlider, FloatText, ToggleButtons
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib import rcParams
from SimPEG import Mesh, Maps, Utils, SolverLU
from ..base import widgetify
rcParams["font.size"] = 16
# Mesh parameters
npad = 20
cs = 0.5
hx = [(cs, npad, -1.3), (cs, 200), (cs, npad, 1.3)]
hy = [(cs, npad, -1.3), (cs, 100)]
mesh = Mesh.TensorMesh([hx, hy], "CN")
# bounds on electrical resistivity
rhomin = 1e2
rhomax = 1e3
eps = 1e-9 # to stabilize division
infinity = 100 # what is "far enough"
def r(xyz, src_loc):
"""
Distance from source to points on an xyz grid
"""
return (
np.sqrt(
(xyz[:, 0] - src_loc[0]) ** 2
+ (xyz[:, 1] - src_loc[1]) ** 2
+ (xyz[:, 2] - src_loc[2]) ** 2
)
+ eps
)
def sum_term(rho1, rho2, h, r):
m = Utils.mkvc(np.arange(1, infinity + 1))
k = (rho2 - rho1) / (rho2 + rho1)
return np.sum(
((k ** m.T) * np.ones_like(Utils.mkvc(r, 2)))
/ np.sqrt(1.0 + (2.0 * h * m.T / Utils.mkvc(r, 2)) ** 2),
1,
)
def sum_term_deriv(rho1, rho2, h, r):
m = Utils.mkvc(np.arange(1, infinity + 1))
k = (rho2 - rho1) / (rho2 + rho1)
return np.sum(
((k ** m.T) * np.ones_like(Utils.mkvc(r, 2)))
/ (1.0 + (2.0 * h * m.T / Utils.mkvc(r, 2)) ** 2) ** (3.0 / 2.0)
* ((2.0 * h * m.T) ** 2 / Utils.mkvc(r, 2) ** 3),
1,
)
def layer_potentials(rho1, rho2, h, A, B, xyz):
"""
Compute analytic solution of surface potential for 2-layered Earth
(Ref Telford 1990, section 8.3.4)s
"""
def V(I, src_loc):
return (I * rho1 / (2.0 * np.pi * r(xyz, src_loc))) * (
1 + 2 * sum_term(rho1, rho2, h, r(xyz, src_loc))
)
VA = V(1.0, A)
VB = V(-1.0, B)
return VA + VB
def layer_E(rho1, rho2, h, A, B, xyz):
def dr_dx(src_loc):
return (xyz[:, 0] - src_loc[0]) / r(xyz, src_loc)
def dr_dy(src_loc):
return (xyz[:, 1] - src_loc[1]) / r(xyz, src_loc)
def dr_dz(src_loc):
return (xyz[:, 2] - src_loc[2]) / r(xyz, src_loc)
# m = Utils.mkvc(np.arange(1, infinity + 1))
def deriv_1(r):
return (-1.0 / r) * (1.0 + 2.0 * sum_term(rho1, rho2, h, r))
def deriv_2(r):
return 2.0 * sum_term_deriv(rho1, rho2, h, r)
def Er(I, r):
return -(I * rho1 / (2.0 * np.pi * r)) * (deriv_1(r) + deriv_2(r))
def Ex(I, src_loc):
return Er(I, r(xyz, src_loc)) * dr_dx(src_loc)
def Ey(I, src_loc):
return Er(I, r(xyz, src_loc)) * dr_dy(src_loc)
def Ez(I, src_loc):
return Er(I, r(xyz, src_loc)) * dr_dz(src_loc)
ex = Ex(1.0, A) + Ex(-1.0, B)
ey = Ey(1.0, A) + Ey(-1.0, B)
ez = Ez(1.0, A) + Ez(-1.0, B)
return ex, ey, ez
def layer_J(rho1, rho2, h, A, B, xyz):
ex, ey, ez = layer_E(rho1, rho2, h, A, B, xyz)
sig = 1.0 / rho2 * np.ones_like(xyz[:, 0])
# print sig
sig[xyz[:, 1] >= -h] = 1.0 / rho1 # since the model is 2D
return sig * ex, sig * ey, sig * ez
def G(A, B, M, N):
"""
Geometric factor
"""
return 1.0 / (
1.0 / (np.abs(A - M) + eps)
- 1.0 / (np.abs(M - B) + eps)
- 1.0 / (np.abs(N - A) + eps)
+ 1.0 / (np.abs(N - B) + eps)
)
def rho_a(VM, VN, A, B, M, N):
"""
Apparent Resistivity
"""
return (VM - VN) * 2.0 * np.pi * G(A, B, M, N)
def solve_2D_potentials(rho1, rho2, h, A, B):
"""
Here we solve the 2D DC problem for potentials (using SimPEG Mesg Class)
"""
sigma = 1.0 / rho2 * np.ones(mesh.nC)
sigma[mesh.gridCC[:, 1] >= -h] = 1.0 / rho1 # since the model is 2D
q = np.zeros(mesh.nC)
a = Utils.closestPoints(mesh, A[:2])
b = Utils.closestPoints(mesh, B[:2])
q[a] = 1.0 / mesh.vol[a]
q[b] = -1.0 / mesh.vol[b]
# q = q * 1./mesh.vol
A = (
mesh.cellGrad.T
* Utils.sdiag(1.0 / (mesh.dim * mesh.aveF2CC.T * (1.0 / sigma)))
* mesh.cellGrad
)
Ainv = SolverLU(A)
V = Ainv * q
return V
def solve_2D_E(rho1, rho2, h, A, B):
"""
solve the 2D DC resistivity problem for electric fields
"""
V = solve_2D_potentials(rho1, rho2, h, A, B)
E = -mesh.cellGrad * V
E = mesh.aveF2CCV * E
ex = E[: mesh.nC]
ez = E[mesh.nC :]
return ex, ez, V
def solve_2D_J(rho1, rho2, h, A, B):
ex, ez, V = solve_2D_E(rho1, rho2, h, A, B)
sigma = 1.0 / rho2 * np.ones(mesh.nC)
sigma[mesh.gridCC[:, 1] >= -h] = 1.0 / rho1 # since the model is 2D
return Utils.sdiag(sigma) * ex, Utils.sdiag(sigma) * ez, V
def plot_layer_potentials(rho1, rho2, h, A, B, M, N, imgplt="Model"):
markersize = 8.0
fontsize = 16.0
ylim = np.r_[-1.0, 1.0] * rhomax / (5 * 2 * np.pi) * 1.5
fig, ax = plt.subplots(2, 1, figsize=(9, 7))
fig.subplots_adjust(right=0.8)
x = np.linspace(-40.0, 40.0, 200)
z = np.linspace(x.min(), 0, 100)
pltgrid = Utils.ndgrid(x, z)
xplt = pltgrid[:, 0].reshape(x.size, z.size, order="F")
zplt = pltgrid[:, 1].reshape(x.size, z.size, order="F")
V = layer_potentials(
rho1,
rho2,
h,
np.r_[A, 0.0, 0.0],
np.r_[B, 0.0, 0.0],
Utils.ndgrid(x, np.r_[0.0], np.r_[0.0]),
)
VM = layer_potentials(
rho1,
rho2,
h,
np.r_[A, 0.0, 0.0],
np.r_[B, 0.0, 0.0],
Utils.mkvc(np.r_[M, 0.0, 0], 2).T,
)
VN = layer_potentials(
rho1,
rho2,
h,
np.r_[A, 0.0, 0.0],
np.r_[B, 0.0, 0.0],
Utils.mkvc(np.r_[N, 0.0, 0], 2).T,
)
ax[0].plot(x, V, color=[0.1, 0.5, 0.1], linewidth=2)
ax[0].grid(
which="both", linestyle="-", linewidth=0.5, color=[0.2, 0.2, 0.2], alpha=0.5
)
ax[0].plot(A, 0, "+", markersize=12, markeredgewidth=3, color=[1.0, 0.0, 0])
ax[0].plot(B, 0, "_", markersize=12, markeredgewidth=3, color=[0.0, 0.0, 1.0])
ax[0].set_ylabel("Potential, (V)")
ax[0].set_xlabel("x (m)")
ax[0].set_xlim([x.min(), x.max()])
ax[0].set_ylim(ylim)
ax[0].plot(M, VM, "o", color="k")
ax[0].plot(N, VN, "o", color="k")
props = dict(boxstyle="round", facecolor="grey", alpha=0.3)
txtsp = 1
xytextM = (M + 0.5, np.max([np.min([VM, ylim.max()]), ylim.min()]) + 0.5)
xytextN = (N + 0.5, np.max([np.min([VN, ylim.max()]), ylim.min()]) + 0.5)
props = dict(boxstyle="round", facecolor="grey", alpha=0.4)
ax[0].annotate("%2.1e" % (VM), xy=xytextM, xytext=xytextM)
ax[0].annotate("%2.1e" % (VN), xy=xytextN, xytext=xytextN)
# ax[0].plot(np.r_[M, N], np.ones(2)*VN, color='k')
# ax[0].plot(np.r_[M, M], np.r_[VM, VN], color='k')
# ax[0].annotate('%2.1e'%(VM-VN) , xy=(M, (VM+VN)/2), xytext=(M-9, (VM+VN)/2.))
props = dict(boxstyle="round", facecolor="grey", alpha=0.4)
ax[0].text(
x.max() + 1,
ylim.max() - 0.1 * ylim.max(),
"$\\rho_a$ = %2.2f" % (rho_a(VM, VN, A, B, M, N)),
verticalalignment="bottom",
bbox=props,
)
if imgplt == "Model":
model = rho2 * np.ones(pltgrid.shape[0])
model[pltgrid[:, 1] >= -h] = rho1
model = model.reshape(x.size, z.size, order="F")
cb = ax[1].pcolor(xplt, zplt, model, norm=LogNorm())
ax[1].plot(
[xplt.min(), xplt.max()],
-h * np.r_[1.0, 1],
color=[0.5, 0.5, 0.5],
linewidth=1.5,
)
clim = [rhomin, rhomax]
clabel = "Resistivity ($\Omega$m)"
# elif imgplt == 'potential':
# Vplt = layer_potentials(rho1, rho2, h, np.r_[A, 0., 0.], np.r_[B, 0., 0.], np.c_[pltgrid, np.zeros_like(pltgrid[:, 0])])
# Vplt = Vplt.reshape(x.size, z.size, order='F')
# cb = ax[1].pcolor(xplt, zplt, Vplt)
# ax[1].contour(xplt, zplt, np.abs(Vplt), np.logspace(-2., 1., 10), colors='k', alpha=0.5)
# ax[1].set_ylabel('z (m)', fontsize=16)
# clim = ylim
# clabel = 'Potential (V)'
elif imgplt == "Potential":
Pc = mesh.getInterpolationMat(pltgrid, "CC")
V = solve_2D_potentials(rho1, rho2, h, np.r_[A, 0.0, 0.0], np.r_[B, 0.0, 0.0])
Vplt = Pc * V
Vplt = Vplt.reshape(x.size, z.size, order="F")
# since we are using a strictly 2D code, the potnetials at the surface
# do not match the analytic, so we scale the potentials to match the
# analytic 2.5D result at the surface.
fudgeFactor = (
layer_potentials(
rho1,
rho2,
h,
np.r_[A, 0.0, 0.0],
np.r_[B, 0.0, 0.0],
np.c_[x.min(), 0.0, 0.0],
)
/ Vplt[0, 0]
)
cb = ax[1].pcolor(xplt, zplt, Vplt * fudgeFactor, cmap="viridis")
ax[1].plot(
[xplt.min(), xplt.max()],
-h * np.r_[1.0, 1],
color=[0.5, 0.5, 0.5],
linewidth=1.5,
)
ax[1].contour(xplt, zplt, np.abs(Vplt), colors="k", alpha=0.5)
ax[1].set_ylabel("z (m)", fontsize=16)
clim = np.r_[-15.0, 15.0]
clabel = "Potential (V)"
elif imgplt == "E":
Pc = mesh.getInterpolationMat(pltgrid, "CC")
ex, ez, V = solve_2D_E(rho1, rho2, h, np.r_[A, 0.0, 0.0], np.r_[B, 0.0, 0.0])
ex, ez = Pc * ex, Pc * ez
Vplt = (Pc * V).reshape(x.size, z.size, order="F")
fudgeFactor = (
layer_potentials(
rho1,
rho2,
h,
np.r_[A, 0.0, 0.0],
np.r_[B, 0.0, 0.0],
np.c_[x.min(), 0.0, 0.0],
)
/ Vplt[0, 0]
)
# ex, ez, _ = layer_E(rho1, rho2, h, np.r_[A, 0., 0.], np.r_[B, 0., 0.], np.c_[pltgrid, np.zeros_like(pltgrid[:, 0])])
ex = fudgeFactor * ex.reshape(x.size, z.size, order="F")
ez = fudgeFactor * ez.reshape(x.size, | |
<reponame>Bpowers4/turicreate<filename>src/python/turicreate/toolkits/object_detector/object_detector.py
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
Class definition and utilities for the object detection toolkit.
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import time as _time
from datetime import datetime as _datetime
import six as _six
import turicreate as _tc
from turicreate.toolkits._model import Model as _Model
import turicreate.toolkits._internal_utils as _tkutl
from turicreate.toolkits import _coreml_utils
from turicreate.toolkits._internal_utils import (
_raise_error_if_not_sframe,
_numeric_param_check_range,
_raise_error_if_not_iterable,
)
from turicreate.toolkits._main import ToolkitError as _ToolkitError
from .. import _pre_trained_models
from .._mps_utils import (
MpsGraphAPI as _MpsGraphAPI,
MpsGraphNetworkType as _MpsGraphNetworkType,
)
def _get_mps_od_net(
input_image_shape, batch_size, output_size, anchors, config, weights={}
):
"""
Initializes an MpsGraphAPI for object detection.
"""
network = _MpsGraphAPI(network_id=_MpsGraphNetworkType.kODGraphNet)
c_in, h_in, w_in = input_image_shape
c_out = output_size
h_out = h_in // 32
w_out = w_in // 32
network.init(
batch_size,
c_in,
h_in,
w_in,
c_out,
h_out,
w_out,
weights=weights,
config=config,
)
return network
# Standard lib functions would be great here, but the formatting options of
# timedelta are not great
def _seconds_as_string(seconds):
"""
Returns seconds as a human-friendly string, e.g. '1d 4h 47m 41s'
"""
TIME_UNITS = [("s", 60), ("m", 60), ("h", 24), ("d", None)]
unit_strings = []
cur = max(int(seconds), 1)
for suffix, size in TIME_UNITS:
if size is not None:
cur, rest = divmod(cur, size)
else:
rest = cur
if rest > 0:
unit_strings.insert(0, "%d%s" % (rest, suffix))
return " ".join(unit_strings)
def _raise_error_if_not_detection_sframe(
dataset, feature, annotations, require_annotations
):
_raise_error_if_not_sframe(dataset, "datset")
if feature not in dataset.column_names():
raise _ToolkitError("Feature column '%s' does not exist" % feature)
if dataset[feature].dtype != _tc.Image:
raise _ToolkitError("Feature column must contain images")
if require_annotations:
if annotations not in dataset.column_names():
raise _ToolkitError("Annotations column '%s' does not exist" % annotations)
if dataset[annotations].dtype not in [list, dict]:
raise _ToolkitError("Annotations column must be of type dict or list")
def create(
dataset,
annotations=None,
feature=None,
model="darknet-yolo",
classes=None,
batch_size=0,
max_iterations=0,
verbose=True,
grid_shape=[13, 13],
**kwargs
):
"""
Create a :class:`ObjectDetector` model.
Parameters
----------
dataset : SFrame
Input data. The columns named by the ``feature`` and ``annotations``
parameters will be extracted for training the detector.
annotations : string
Name of the column containing the object detection annotations. This
column should be a list of dictionaries (or a single dictionary), with
each dictionary representing a bounding box of an object instance. Here
is an example of the annotations for a single image with two object
instances::
[{'label': 'dog',
'type': 'rectangle',
'coordinates': {'x': 223, 'y': 198,
'width': 130, 'height': 230}},
{'label': 'cat',
'type': 'rectangle',
'coordinates': {'x': 40, 'y': 73,
'width': 80, 'height': 123}}]
The value for `x` is the horizontal center of the box paired with
`width` and `y` is the vertical center of the box paired with `height`.
'None' (the default) indicates the only list column in `dataset` should
be used for the annotations.
feature : string
Name of the column containing the input images. 'None' (the default)
indicates the only image column in `dataset` should be used as the
feature.
model : string optional
Object detection model to use:
- "darknet-yolo" : Fast and medium-sized model
grid_shape : array optional
Shape of the grid used for object detection. Higher values increase precision for small objects, but at a higher computational cost
- [13, 13] : Default grid value for a Fast and medium-sized model
classes : list optional
List of strings containing the names of the classes of objects.
Inferred from the data if not provided.
batch_size: int
The number of images per training iteration. If 0, then it will be
automatically determined based on resource availability.
max_iterations : int
The number of training iterations. If 0, then it will be automatically
be determined based on the amount of data you provide.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : ObjectDetector
A trained :class:`ObjectDetector` model.
See Also
--------
ObjectDetector
Examples
--------
.. sourcecode:: python
# Train an object detector model
>>> model = turicreate.object_detector.create(data)
# Make predictions on the training set and as column to the SFrame
>>> data['predictions'] = model.predict(data)
# Visualize predictions by generating a new column of marked up images
>>> data['image_pred'] = turicreate.object_detector.util.draw_bounding_boxes(data['image'], data['predictions'])
"""
_raise_error_if_not_sframe(dataset, "dataset")
if len(dataset) == 0:
raise _ToolkitError("Unable to train on empty dataset")
_numeric_param_check_range("max_iterations", max_iterations, 0, _six.MAXSIZE)
start_time = _time.time()
supported_detectors = ["darknet-yolo"]
if feature is None:
feature = _tkutl._find_only_image_column(dataset)
if verbose:
print("Using '%s' as feature column" % feature)
if annotations is None:
annotations = _tkutl._find_only_column_of_type(
dataset, target_type=[list, dict], type_name="list", col_name="annotations"
)
if verbose:
print("Using '%s' as annotations column" % annotations)
_raise_error_if_not_detection_sframe(
dataset, feature, annotations, require_annotations=True
)
_tkutl._handle_missing_values(dataset, feature, "dataset")
_tkutl._check_categorical_option_type("model", model, supported_detectors)
base_model = model.split("-", 1)[0]
ref_model = _pre_trained_models.OBJECT_DETECTION_BASE_MODELS[base_model]()
pretrained_model = _pre_trained_models.OBJECT_DETECTION_BASE_MODELS[
"darknet_mlmodel"
]()
pretrained_model_path = pretrained_model.get_model_path()
params = {
"anchors": [
(1.0, 2.0),
(1.0, 1.0),
(2.0, 1.0),
(2.0, 4.0),
(2.0, 2.0),
(4.0, 2.0),
(4.0, 8.0),
(4.0, 4.0),
(8.0, 4.0),
(8.0, 16.0),
(8.0, 8.0),
(16.0, 8.0),
(16.0, 32.0),
(16.0, 16.0),
(32.0, 16.0),
],
"grid_shape": grid_shape,
"aug_resize": 0,
"aug_rand_crop": 0.9,
"aug_rand_pad": 0.9,
"aug_rand_gray": 0.0,
"aug_aspect_ratio": 1.25,
"aug_hue": 0.05,
"aug_brightness": 0.05,
"aug_saturation": 0.05,
"aug_contrast": 0.05,
"aug_horizontal_flip": True,
"aug_min_object_covered": 0,
"aug_min_eject_coverage": 0.5,
"aug_area_range": (0.15, 2),
"aug_pca_noise": 0.0,
"aug_max_attempts": 20,
"aug_inter_method": 2,
"lmb_coord_xy": 10.0,
"lmb_coord_wh": 10.0,
"lmb_obj": 100.0,
"lmb_noobj": 5.0,
"lmb_class": 2.0,
"non_maximum_suppression_threshold": 0.45,
"rescore": True,
"clip_gradients": 0.025,
"weight_decay": 0.0005,
"sgd_momentum": 0.9,
"learning_rate": 1.0e-3,
"shuffle": True,
"mps_loss_mult": 8,
# This large buffer size (8 batches) is an attempt to mitigate against
# the SFrame shuffle operation that can occur after each epoch.
"io_thread_buffer_size": 8,
"mlmodel_path": pretrained_model_path,
}
# create tensorflow model here
import turicreate.toolkits.libtctensorflow
if classes == None:
classes = []
_raise_error_if_not_iterable(classes)
_raise_error_if_not_iterable(grid_shape)
grid_shape = [int(x) for x in grid_shape]
assert len(grid_shape) == 2
tf_config = {
"grid_height": params["grid_shape"][0],
"grid_width": params["grid_shape"][1],
"mlmodel_path": params["mlmodel_path"],
"classes": classes,
"compute_final_metrics": False,
"verbose": verbose,
"model": "darknet-yolo",
}
# If batch_size or max_iterations = 0, they will be automatically
# generated in C++.
if batch_size > 0:
tf_config["batch_size"] = batch_size
if max_iterations > 0:
tf_config["max_iterations"] = max_iterations
model = _tc.extensions.object_detector()
model.train(
data=dataset,
annotations_column_name=annotations,
image_column_name=feature,
options=tf_config,
)
return ObjectDetector(model_proxy=model, name="object_detector")
class ObjectDetector(_Model):
"""
A trained model using C++ implementation that is ready to use for classification
or export to CoreML.
This model should not be constructed directly.
"""
_CPP_OBJECT_DETECTOR_VERSION = 1
def __init__(self, model_proxy=None, name=None):
self.__proxy__ = model_proxy
self.__name__ = name
@classmethod
def _native_name(cls):
return "object_detector"
def __str__(self):
"""
Return a string description of the model to the ``print`` method.
Returns
-------
out : string
A description of the ObjectDetector.
"""
return self.__repr__()
def __repr__(self):
"""
Print a string description of the model when the model name is entered
in the terminal.
"""
width = 40
sections, section_titles = self._get_summary_struct()
out = _tkutl._toolkit_repr_print(self, sections, section_titles, width=width)
return out
def _get_version(self):
return self._CPP_OBJECT_DETECTOR_VERSION
def export_coreml(
self,
filename,
include_non_maximum_suppression=True,
iou_threshold=None,
confidence_threshold=None,
):
"""
Save the model in Core ML format. The Core ML model takes an image of
fixed size as input and produces two output arrays: `confidence` and
`coordinates`.
The first one, `confidence` is an `N`-by-`C` array, where `N` is the
number of instances predicted and `C` is the number of classes. The
number `N` is fixed and will include many low-confidence predictions.
The instances are not sorted by confidence, so the first one will
generally not have the highest confidence (unlike in `predict`). Also
unlike the `predict` function, the instances have not undergone
what is called `non-maximum suppression`, which means there could be
several instances close in location and size that have all discovered
the same object instance. Confidences do not need to sum to 1 over the
classes; any remaining probability is implied as confidence there is no
object instance present at all at the given coordinates. The classes
appear in the array alphabetically sorted.
The second array `coordinates` is of size `N`-by-4, where the first
dimension `N` again represents instances and corresponds to the
`confidence` array. The second dimension represents `x`, | |
<reponame>matcolgit/simulator_sib<filename>sim/lib/dynamics_old.py
import time
import bisect
import numpy as np
import pandas as pd
import networkx as nx
import scipy
import scipy.optimize
import scipy as sp
import os, math
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
from sympy import Symbol, integrate, lambdify, exp, Max, Min, Piecewise, log
import pprint
from lib.priorityqueue import PriorityQueue
from lib.measures import *
TO_HOURS = 24.0
class DiseaseModel(object):
"""
Simulate continuous-time SEIR epidemics with exponentially distributed inter-event times.
All units in the simulator are in hours for numerical stability, though disease parameters are
assumed to be in units of days as usual in epidemiology
"""
def __init__(self, mob, distributions, inference_algo):
"""
Init simulation object with parameters
Arguments:
---------
mob:
object of class MobilitySimulator providing mobility data
"""
# cache settings
self.mob = mob
self.d = distributions
assert(np.allclose(np.array(self.d.delta), np.array(self.mob.delta), atol=1e-3))
# inference algorithm
self.inference_algo = inference_algo
# parse distributions object
self.lambda_0 = self.d.lambda_0
self.gamma = self.d.gamma
self.fatality_rates_by_age = self.d.fatality_rates_by_age
self.p_hospital_by_age = self.d.p_hospital_by_age
self.delta = self.d.delta
print('Using delta:', self.delta)
# parse mobility object
self.n_people = mob.num_people
self.n_sites = mob.num_sites
self.max_time = mob.max_time
# special state variables from mob object
self.people_age = mob.people_age
self.num_age_groups = mob.num_age_groups
self.site_type = mob.site_type
self.site_dict = mob.site_dict
self.num_site_types = mob.num_site_types
self.people_household = mob.people_household # j-th entry is household index of individual j
self.households = mob.households # {household index: [individuals in household]}
assert(self.num_age_groups == self.fatality_rates_by_age.shape[0])
assert(self.num_age_groups == self.p_hospital_by_age.shape[0])
# print
self.last_print = time.time()
self._PRINT_INTERVAL = 0.1
self._PRINT_MSG = (
't: {t:.2f} '
'| '
'{maxt:.2f} hrs '
'({maxd:.0f} d)'
)
def __print(self, t, force=False):
if ((time.time() - self.last_print > self._PRINT_INTERVAL) or force) and self.verbose:
print('\r', self._PRINT_MSG.format(t=t, maxt=self.max_time, maxd=self.max_time / 24),
sep='', end='', flush=True)
self.last_print = time.time()
def __init_run(self):
"""
Initialize the run of the epidemic
"""
self.queue = PriorityQueue()
self.testing_queue = PriorityQueue()
'''
State and queue codes (transition event into this state)
'susc': susceptible
'expo': exposed
'ipre': infectious pre-symptomatic
'isym': infectious symptomatic
'iasy': infectious asymptomatic
'posi': tested positive
'nega': tested negative
'resi': resistant
'dead': dead
'hosp': hospitalized
'test': event of i getting a test (transitions to posi if not susc)
'execute_tests': generic event indicating that testing queue should be processed
'''
self.legal_states = ['susc', 'expo', 'ipre', 'isym', 'iasy', 'posi', 'nega', 'resi', 'dead', 'hosp']
self.legal_preceeding_state = {
'expo' : ['susc',],
'ipre' : ['expo',],
'isym' : ['ipre',],
'iasy' : ['expo',],
'posi' : ['isym', 'ipre', 'iasy', 'expo'],
'nega' : ['susc', 'resi'],
'resi' : ['isym', 'iasy'],
'dead' : ['isym',],
'hosp' : ['isym',],
}
self.state = {
'susc': np.ones(self.n_people, dtype='bool'),
'expo': np.zeros(self.n_people, dtype='bool'),
'ipre': np.zeros(self.n_people, dtype='bool'),
'isym': np.zeros(self.n_people, dtype='bool'),
'iasy': np.zeros(self.n_people, dtype='bool'),
'posi': np.zeros(self.n_people, dtype='bool'),
'nega': np.zeros(self.n_people, dtype='bool'),
'resi': np.zeros(self.n_people, dtype='bool'),
'dead': np.zeros(self.n_people, dtype='bool'),
'hosp': np.zeros(self.n_people, dtype='bool'),
}
self.state_started_at = {
'susc': - np.inf * np.ones(self.n_people, dtype='float'),
'expo': np.inf * np.ones(self.n_people, dtype='float'),
'ipre': np.inf * np.ones(self.n_people, dtype='float'),
'isym': np.inf * np.ones(self.n_people, dtype='float'),
'iasy': np.inf * np.ones(self.n_people, dtype='float'),
'posi': np.inf * np.ones(self.n_people, dtype='float'),
'nega': np.inf * np.ones(self.n_people, dtype='float'),
'resi': np.inf * np.ones(self.n_people, dtype='float'),
'dead': np.inf * np.ones(self.n_people, dtype='float'),
'hosp': np.inf * np.ones(self.n_people, dtype='float'),
}
self.state_ended_at = {
'susc': np.inf * np.ones(self.n_people, dtype='float'),
'expo': np.inf * np.ones(self.n_people, dtype='float'),
'ipre': np.inf * np.ones(self.n_people, dtype='float'),
'isym': np.inf * np.ones(self.n_people, dtype='float'),
'iasy': np.inf * np.ones(self.n_people, dtype='float'),
'posi': np.inf * np.ones(self.n_people, dtype='float'),
'nega': np.inf * np.ones(self.n_people, dtype='float'),
'resi': np.inf * np.ones(self.n_people, dtype='float'),
'dead': np.inf * np.ones(self.n_people, dtype='float'),
'hosp': np.inf * np.ones(self.n_people, dtype='float'),
}
self.outcome_of_test = np.zeros(self.n_people, dtype='bool')
# infector of i
self.parent = -1 * np.ones(self.n_people, dtype='int')
# no. people i infected (given i was in a certain state)
self.children_count_iasy = np.zeros(self.n_people, dtype='int')
self.children_count_ipre = np.zeros(self.n_people, dtype='int')
self.children_count_isym = np.zeros(self.n_people, dtype='int')
# contact tracing
# records which contact caused the exposure of `i`
self.contact_caused_expo = [None for i in range(self.n_people)]
# list of tuples (i, contacts) where `contacts` were valid when `i` got tested positive
self.valid_contacts_for_tracing = []
# evaluates an integral of the exposure rate
self.exposure_integral = self.make_exposure_int_eval()
self.exposure_rate = self.make_exposure_rate_eval() # for sanity check
# record all test results
self.all_obs = {}
# count indirect infections
self.tot_inf_num = 0
self.inf_num = 0
self.indir_inf_num = 0
self.full_indir_inf_num = 0
# initialize inference algorithm
print('Initializing inference algorithm')
self.inference_algo.init(self.n_people, int((self.max_time // TO_HOURS) + 1))
# DEBUG
self.risk_got_exposed = np.zeros(11)
self.risk_got_not_exposed = np.zeros(11)
def initialize_states_for_seeds(self):
"""
Sets state variables according to invariants as given by `self.initial_seeds`
NOTE: by the seeding heuristic using the reproductive rate
we assume that exposures already took place
"""
assert(isinstance(self.initial_seeds, dict))
for state, seeds_ in self.initial_seeds.items():
for i in seeds_:
assert(self.was_initial_seed[i] == False)
self.was_initial_seed[i] = True
# inital exposed
if state == 'expo':
self.__process_exposure_event(t=0.0, i=i, parent=None, contact=None)
# initial presymptomatic
elif state == 'ipre':
self.state['susc'][i] = False
self.state['expo'][i] = True
self.state_ended_at['susc'][i] = -1.0
self.state_started_at['expo'][i] = -1.0
self.bernoulli_is_iasy[i] = 0
# no exposures added due to heuristic `expo` seeds using reproductive rate
self.__process_presymptomatic_event(0.0, i, add_exposures=False)
# initial asymptomatic
elif state == 'iasy':
self.state['susc'][i] = False
self.state['expo'][i] = True
self.state_ended_at['susc'][i] = -1.0
self.state_started_at['expo'][i] = -1.0
self.bernoulli_is_iasy[i] = 1
# no exposures added due to heuristic `expo` seeds using reproductive rate
self.__process_asymptomatic_event(0.0, i, add_exposures=False)
# initial symptomatic
elif state == 'isym' or state == 'isym_notposi':
self.state['susc'][i] = False
self.state['ipre'][i] = True
self.state_ended_at['susc'][i] = -1.0
self.state_started_at['expo'][i] = -1.0
self.state_ended_at['expo'][i] = -1.0
self.state_started_at['ipre'][i] = -1.0
self.bernoulli_is_iasy[i] = 0
self.__process_symptomatic_event(0.0, i)
# initial symptomatic and positive
elif state == 'isym_posi':
self.state['susc'][i] = False
self.state['ipre'][i] = True
self.state['posi'][i] = True
self.state_ended_at['susc'][i] = -1.0
self.state_started_at['expo'][i] = -1.0
self.state_ended_at['expo'][i] = -1.0
self.state_started_at['ipre'][i] = -1.0
self.state_started_at['posi'][i] = -1.0
self.bernoulli_is_iasy[i] = 0
self.__process_symptomatic_event(0.0, i, apply_for_test=False)
# initial resistant and positive
elif state == 'resi_posi':
self.state['susc'][i] = False
self.state['isym'][i] = True
self.state['posi'][i] = True
self.state_ended_at['susc'][i] = -1.0
self.state_started_at['expo'][i] = -1.0
self.state_ended_at['expo'][i] = -1.0
self.state_started_at['ipre'][i] = -1.0
self.state_ended_at['ipre'][i] = -1.0
self.state_started_at['isym'][i] = -1.0
self.state_started_at['posi'][i] = -1.0
self.bernoulli_is_iasy[i] = 0
self.__process_resistant_event(0.0, i)
# initial resistant and positive
elif state == 'resi_notposi':
self.state['susc'][i] = False
self.state['isym'][i] = True
self.state_ended_at['susc'][i] = -1.0
self.state_started_at['expo'][i] = -1.0
self.state_ended_at['expo'][i] = -1.0
self.state_started_at['ipre'][i] = -1.0
self.state_ended_at['ipre'][i] = -1.0
self.state_started_at['isym'][i] = -1.0
self.bernoulli_is_iasy[i] = 0
self.__process_resistant_event(0.0, i)
else:
raise ValueError('Invalid initial seed state.')
def make_exposure_int_eval(self):
'''
Returns evaluatable numpy function that computes an integral
of the exposure rate. The function returned takes the following arguments
`j_from`: visit start of j
`j_to`: visit end of j
`inf_from`: visit start of infector
`inf_to`: visit end of infector
`beta_site`: transmission rate at site
'''
# define symbols in exposure rate
beta_sp = Symbol('beta')
base_rate_sp = Symbol('base_rate')
lower_sp = Symbol('lower')
upper_sp = Symbol('upper')
a_sp = Symbol('a')
b_sp = Symbol('b')
u_sp = Symbol('u')
t_sp = Symbol('t')
# symbolically integrate term of the exposure rate over [lower_sp, upper_sp]
expo_int_symb = Max(integrate(
beta_sp *
integrate(
base_rate_sp *
self.gamma *
Piecewise((1.0, (a_sp <= u_sp) & (u_sp <= b_sp)), (0.0, True)) *
exp(- self.gamma * (t_sp - u_sp)),
(u_sp, t_sp - self.delta, t_sp)),
(t_sp, lower_sp, upper_sp)
).simplify(), 0.0)
f_sp = lambdify((lower_sp, upper_sp, a_sp, b_sp, beta_sp, base_rate_sp), expo_int_symb, 'numpy')
# define function with named arguments
def f(*, j_from, j_to, inf_from, inf_to, beta_site, base_rate):
'''Shifts to 0.0 for numerical stability'''
return f_sp(0.0, j_to - j_from, inf_from - j_from, inf_to - j_from, beta_site, base_rate)
return f
def make_exposure_rate_eval(self):
'''
Returns evaluatable numpy function that computes an integral
of the exposure rate. The function returned takes the following arguments
`inf_from`: visit start of infector
`inf_to`: visit end of infector
`beta_site`: transmission rate at site
'''
# define symbols in exposure rate
a_sp = Symbol('a')
b_sp = Symbol('b')
u_sp = Symbol('u')
t_sp = Symbol('t')
# symbolically integrate term of the exposure rate over [lower_sp, upper_sp]
expo_rate_symb = Max(
integrate(
self.gamma * \
Piecewise((1.0, (a_sp <= u_sp) & (u_sp <= b_sp)), (0.0, True)) \
* exp(- self.gamma * (t_sp - u_sp)),
(u_sp, t_sp - self.delta, t_sp)).simplify(),
0.0)
f_sp = lambdify((t_sp, a_sp, b_sp), expo_rate_symb, 'numpy')
# define function with named arguments
def f(*, t, inf_from, inf_to):
return f_sp(t, inf_from, inf_to)
return f
def launch_epidemic(self, params, initial_counts, testing_params, measure_list, thresholds_roc=[], verbose=True):
"""
Run the epidemic, starting | |
import FWCore.ParameterSet.Config as cms
EletightIsoCut = "(gsfElectronRef.pfIsolationVariables.sumChargedHadronPt + max(0., gsfElectronRef.pfIsolationVariables.sumNeutralHadronEt + gsfElectronRef.pfIsolationVariables.sumPhotonEt - 0.5 * gsfElectronRef.pfIsolationVariables.sumPUPt) ) / gsfElectronRef.pt < 0.1"
ElelooseIsoCut = "(gsfElectronRef.pfIsolationVariables.sumChargedHadronPt + max(0., gsfElectronRef.pfIsolationVariables.sumNeutralHadronEt + gsfElectronRef.pfIsolationVariables.sumPhotonEt - 0.5 * gsfElectronRef.pfIsolationVariables.sumPUPt) ) / gsfElectronRef.pt < 0.15"
singleTopTChannelLeptonDQM = cms.EDAnalyzer("SingleTopTChannelLeptonDQM",
## ------------------------------------------------------
## SETUP
##
## configuration of the MonitoringEnsemble(s)
## [mandatory] : optional PSets may be omitted
##
setup = cms.PSet(
## sub-directory to write the monitor histograms to
## [mandatory] : should not be changed w/o explicit
## communication to TopCom!
directory = cms.string("Physics/Top/SingleTopDQM/"),
## [mandatory]
sources = cms.PSet(
muons = cms.InputTag("pfIsolatedMuonsEI"),
elecs = cms.InputTag("pfIsolatedElectronsEI"),
jets = cms.InputTag("ak4PFJetsCHS"),
mets = cms.VInputTag("met", "tcMet", "pfMetEI"),
pvs = cms.InputTag("offlinePrimaryVertices")
),
## [optional] : when omitted the verbosity level is set to STANDARD
monitoring = cms.PSet(
verbosity = cms.string("DEBUG")
),
## [optional] : when omitted all monitoring plots for primary vertices
## will be filled w/o extras
# pvExtras = cms.PSet(
## when omitted electron plots will be filled w/o additional pre-
## selection of the primary vertex candidates
# select = cms.string("abs(x)<1. & abs(y)<1. & abs(z)<20. & tracksSize>3 & !isFake")
# ),
## [optional] : when omitted all monitoring plots for electrons
## will be filled w/o extras
elecExtras = cms.PSet(
## when omitted electron plots will be filled w/o cut on electronId
##electronId = cms.PSet( src = cms.InputTag("mvaTrigV0"), cutValue = cms.double(0.5) ),
## when omitted electron plots will be filled w/o additional pre-
## selection of the electron candidates
select = cms.string("pt>15 & abs(eta)<2.5 & abs(gsfElectronRef.gsfTrack.d0)<1 & abs(gsfElectronRef.gsfTrack.dz)<20"),
## when omitted isolated electron multiplicity plot will be equi-
## valent to inclusive electron multiplicity plot
isolation = cms.string(ElelooseIsoCut),
),
## [optional] : when omitted all monitoring plots for muons
## will be filled w/o extras
muonExtras = cms.PSet(
## when omitted muon plots will be filled w/o additional pre-
## selection of the muon candidates
select = cms.string("pt>10 & abs(eta)<2.1 & isGlobalMuon & abs(globalTrack.d0)<1 & abs(globalTrack.dz)<20"),
## when omitted isolated muon multiplicity plot will be equi-
## valent to inclusive muon multiplicity plot
# isolation = cms.string("(isolationR03.sumPt+isolationR03.emEt+isolationR03.hadEt)/pt<0.1"),
),
## [optional] : when omitted all monitoring plots for jets will
## be filled from uncorrected jets
jetExtras = cms.PSet(
## when omitted monitor plots for pt will be filled from uncorrected
## jets
jetCorrector = cms.string("ak4CaloL2L3"),
## when omitted monitor plots will be filled w/o additional cut on
## jetID
# jetID = cms.PSet(
# label = cms.InputTag("ak4JetID"),
# select = cms.string("fHPD < 0.98 & n90Hits>1 & restrictedEMF<1")
# ),
## when omitted no extra selection will be applied on jets before
## filling the monitor histograms; if jetCorrector is present the
## selection will be applied to corrected jets
select = cms.string("pt>15 & abs(eta)<2.5 & emEnergyFraction>0.01"),
),
## [optional] : when omitted no mass window will be applied
## for the W mass befor filling the event monitoring plots
# massExtras = cms.PSet(
# lowerEdge = cms.double( 70.),
# upperEdge = cms.double(110.)
# ),
## [optional] : when omitted the monitoring plots for triggering
## will be empty
triggerExtras = cms.PSet(
src = cms.InputTag("TriggerResults","","HLT"),
paths = cms.vstring(['HLT_Mu3:HLT_QuadJet15U',
'HLT_Mu5:HLT_QuadJet15U',
'HLT_Mu7:HLT_QuadJet15U',
'HLT_Mu9:HLT_QuadJet15U'])
)
),
## ------------------------------------------------------
## PRESELECTION
##
## setup of the event preselection, which will not
## be monitored
## [mandatory] : but may be empty
##
preselection = cms.PSet(
## [optional] : when omitted no preselection is applied
# trigger = cms.PSet(
# src = cms.InputTag("TriggerResults","","HLT"),
# select = cms.vstring(['HLT_Mu11', 'HLT_Ele15_LW_L1R', 'HLT_QuadJet30'])
# ),
## [optional] : when omitted no preselection is applied
# vertex = cms.PSet(
# src = cms.InputTag("offlinePrimaryVertices"),
# select = cms.string('abs(x)<1. & abs(y)<1. & abs(z)<20. & tracksSize>3 & !isFake')
# )
),
## ------------------------------------------------------
## SELECTION
##
## monitor histrograms are filled after each selection
## step, the selection is applied in the order defined
## by this vector
## [mandatory] : may be empty or contain an arbitrary
## number of PSets
##
selection = cms.VPSet(
cms.PSet(
label = cms.string("jets/calo:step0"),
src = cms.InputTag("ak4CaloJets"),
select = cms.string("pt>20 & abs(eta)<2.1 & 0.05<emEnergyFraction"),
jetID = cms.PSet(
label = cms.InputTag("ak4JetID"),
select = cms.string("fHPD < 0.98 & n90Hits>1 & restrictedEMF<1")
),
min = cms.int32(2),
)
)
)
singleTopMuonMediumDQM = cms.EDAnalyzer("SingleTopTChannelLeptonDQM",
## ------------------------------------------------------
## SETUP
##
## configuration of the MonitoringEnsemble(s)
## [mandatory] : optional PSets may be omitted
##
setup = cms.PSet(
## sub-directory to write the monitor histograms to
## [mandatory] : should not be changed w/o explicit
## communication to TopCom!
directory = cms.string("Physics/Top/SingleTopMuonMediumDQM/"),
## [mandatory]
sources = cms.PSet(
muons = cms.InputTag("pfIsolatedMuonsEI"),
elecs_gsf = cms.InputTag("gedGsfElectrons"),
elecs = cms.InputTag("pfIsolatedElectronsEI"),
jets = cms.InputTag("ak4PFJetsCHS"),
mets = cms.VInputTag("met", "tcMet", "pfMetEI"),
pvs = cms.InputTag("offlinePrimaryVertices")
),
## [optional] : when omitted the verbosity level is set to STANDARD
monitoring = cms.PSet(
verbosity = cms.string("DEBUG")
),
## [optional] : when omitted all monitoring plots for primary vertices
## will be filled w/o extras
# pvExtras = cms.PSet(
## when omitted electron plots will be filled w/o additional pre-
## selection of the primary vertex candidates
# select = cms.string("") #abs(x)<1. & abs(y)<1. & abs(z)<20. & tracksSize>3 & !isFake")
# ),
## [optional] : when omitted all monitoring plots for muons
## will be filled w/o extras
muonExtras = cms.PSet(
## when omitted muon plots will be filled w/o additional pre-
## selection of the muon candidates
select = cms.string("abs(muonRef.eta)<2.1")
## & isGlobalMuon & innerTrack.numberOfValidHits>10 & globalTrack.normalizedChi2>-1 & globalTrack.normalizedChi2<10
##& (isolationR03.sumPt+isolationR03.emEt+isolationR03.hadEt)/pt<0.1"),
## when omitted isolated muon multiplicity plot will be equi-
## valent to inclusive muon multiplicity plot
## isolation = cms.string("(muonRef.isolationR03.sumPt+muonRef.isolationR03.emEt+muonRef.isolationR03.hadEt)/muonRef.pt<10" )
## isolation = cms.string("(muonRef.isolationR03.sumPt+muonRef.isolationR03.emEt+muonRef.isolationR03.hadEt)/muonRef.pt<0.1")
),
## [optional] : when omitted all monitoring plots for jets
## will be filled w/o extras
jetExtras = cms.PSet(
## when omitted monitor plots for pt will be filled from uncorrected
## jets
jetCorrector = cms.string("topDQMak5PFCHSL2L3"),
## when omitted monitor plots will be filled w/o additional cut on
## jetID
# jetID = cms.PSet(
# label = cms.InputTag("ak4JetID"),
# select = cms.string(""), ##fHPD < 0.98 & n90Hits>1 & restrictedEMF<1")
# ),
## when omitted no extra selection will be applied on jets before
## filling the monitor histograms; if jetCorrector is present the
## selection will be applied to corrected jets
select = cms.string("pt>15 & abs(eta)<2.5"), # & neutralEmEnergyFraction >0.01 & chargedEmEnergyFraction>0.01"),
## when omitted monitor histograms for b-tagging will not be filled
jetBTaggers = cms.PSet(
trackCountingEff = cms.PSet(
label = cms.InputTag("pfTrackCountingHighEffBJetTags" ),
workingPoint = cms.double(1.25)
),
trackCountingPur = cms.PSet(
label = cms.InputTag("pfTrackCountingHighPurBJetTags" ),
workingPoint = cms.double(3.41)
),
secondaryVertex = cms.PSet(
label = cms.InputTag("pfSimpleSecondaryVertexHighEffBJetTags"),
workingPoint = cms.double(2.05)
),
combinedSecondaryVertex = cms.PSet(
label = cms.InputTag("pfCombinedInclusiveSecondaryVertexV2BJetTags"),
workingPoint = cms.double(0.970)
)
)
)
## [optional] : when omitted no mass window will be applied
## for the W mass before filling the event monitoring plots
# massExtras = cms.PSet(
# lowerEdge = cms.double( 70.),
# upperEdge = cms.double(110.)
# ),
## [optional] : when omitted the monitoring plots for triggering
## will be empty
# triggerExtras = cms.PSet(
# src = cms.InputTag("TriggerResults","","HLT"),
# paths = cms.vstring(['HLT_IsoMu17_eta2p1_CentralPFNoPUJet30_BTagIPIter_v1'])
# 'HLT_IsoMu24_eta2p1_v12',
# 'HLT_IsoMu20_eta2p1_CentralPFJet30_BTagIPIter_v2',
# 'HLT_IsoMu20_eta2p1_CentralPFJet30_BTagIPIter_v3'])
# )
),
## ------------------------------------------------------
## PRESELECTION
##
## setup of the event preselection, which will not
## be monitored
## [mandatory] : but may be empty
##
preselection = cms.PSet(
## [optional] : when omitted no preselection is applied
# trigger = cms.PSet(
# src = cms.InputTag("TriggerResults","","HLT"),
# select = cms.vstring(['HLT_IsoMu17_eta2p1_CentralPFNoPUJet30_BTagIPIter_v1'])
# ),
## [optional] : when omitted no preselection is applied
# vertex = cms.PSet(
# src = cms.InputTag("offlinePrimaryVertices"),
# select = cms.string('!isFake && ndof >= 4 && abs(z)<24. && position.Rho <= 2.0')
# )
),
## ------------------------------------------------------
## SELECTION
##
## monitor histrograms are filled after each selection
## step, the selection is applied in the order defined
## by this vector
## [mandatory] : may be empty or contain an arbitrary
## number of PSets
selection = cms.VPSet(
cms.PSet(
label = cms.string("presel"),
src = cms.InputTag("offlinePrimaryVertices"),
select = cms.string('!isFake && ndof >= 4 && abs(z)<24. && position.Rho <= 2.0 '),
),
cms.PSet(
label = cms.string("muons/pf:step0"),
src = cms.InputTag("pfIsolatedMuonsEI"),
| |
<filename>pytest_docker_registry_fixtures/fixtures.py<gh_stars>0
#!/usr/bin/env python
# pylint: disable=redefined-outer-name,too-many-arguments,too-many-locals
"""The actual fixtures, you found them ;)."""
import logging
import itertools
from base64 import b64encode
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from ssl import create_default_context, SSLContext
from string import Template
from time import sleep, time
from typing import Dict, Generator, List, NamedTuple
import pytest
from docker import DockerClient, from_env
from lovely.pytest.docker.compose import Services
from _pytest.tmpdir import TempPathFactory
from .imagename import ImageName
from .utils import (
check_url_secure,
DOCKER_REGISTRY_SERVICE,
DOCKER_REGISTRY_SERVICE_PATTERN,
generate_cacerts,
generate_htpasswd,
generate_keypair,
get_docker_compose_user_defined,
get_embedded_file,
get_user_defined_file,
replicate_image,
start_service,
)
# Caching is needed, as singular-fixtures and list-fixtures will conflict at scale_factor=1
# This appears to only matter when attempting to start the docker secure registry service
# for the second time.
CACHE = {}
LOGGER = logging.getLogger(__name__)
class DockerRegistryCerts(NamedTuple):
# pylint: disable=missing-class-docstring
ca_certificate: Path
ca_private_key: Path
certificate: Path
private_key: Path
class DockerRegistryInsecure(NamedTuple):
# pylint: disable=missing-class-docstring
docker_client: DockerClient
docker_compose: Path
endpoint: str
images: List[ImageName]
service_name: str
# Note: NamedTuple does not support inheritance :(
class DockerRegistrySecure(NamedTuple):
# pylint: disable=missing-class-docstring
auth_header: Dict[str, str]
cacerts: Path
certs: DockerRegistryCerts
docker_client: DockerClient
docker_compose: Path
endpoint: str
htpasswd: Path
images: List[ImageName]
password: str
service_name: str
ssl_context: SSLContext
username: str
@pytest.fixture(scope="session")
def docker_client() -> DockerClient:
"""Provides an insecure Docker API client."""
return from_env()
def _docker_compose_insecure(
*,
docker_compose_files: List[str],
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of the docker-compose configuration file containing the insecure docker registry service.
"""
cache_key = _docker_compose_insecure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("insecure", i)
chain = itertools.chain(
get_docker_compose_user_defined(docker_compose_files, service_name),
# TODO: lovely-docker-compose uses the file for teardown ...
get_embedded_file(
tmp_path_factory, delete_after=False, name="docker-compose.yml"
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find docker compose for: %s", service_name)
result.append("-unknown-")
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_compose_insecure(
docker_compose_files: List[str], tmp_path_factory: TempPathFactory
) -> Generator[Path, None, None]:
"""
Provides the location of the docker-compose configuration file containing the insecure docker registry service.
"""
for lst in _docker_compose_insecure(
docker_compose_files=docker_compose_files,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_compose_insecure_list(
docker_compose_files: List[str],
pdrf_scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of the docker-compose configuration file containing the insecure docker registry service.
"""
yield from _docker_compose_insecure(
docker_compose_files=docker_compose_files,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_compose_secure(
*,
docker_compose_files: List[str],
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of the templated docker-compose configuration file containing the secure docker registry
service.
"""
cache_key = _docker_compose_secure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("secure", i)
chain = itertools.chain(
get_docker_compose_user_defined(docker_compose_files, service_name),
get_embedded_file(
tmp_path_factory, delete_after=False, name="docker-compose.yml"
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find docker compose for: %s", service_name)
result.append("-unknown-")
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_compose_secure(
docker_compose_files: List[str], tmp_path_factory: TempPathFactory
) -> Generator[Path, None, None]:
"""
Provides the location of the templated docker-compose configuration file containing the secure docker registry
service.
"""
for lst in _docker_compose_secure(
docker_compose_files=docker_compose_files,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_compose_secure_list(
docker_compose_files: List[str],
pdrf_scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of the templated docker-compose configuration file containing the secure docker registry
service.
"""
yield from _docker_compose_secure(
docker_compose_files=docker_compose_files,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_registry_auth_header(
*,
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
scale_factor: int,
) -> List[Dict[str, str]]:
"""Provides an HTTP basic authentication header containing credentials for the secure docker registry service."""
cache_key = _docker_registry_auth_header.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
auth = b64encode(
f"{docker_registry_username_list[i]}:{docker_registry_password_list[i]}".encode(
"utf-8"
)
).decode("utf-8")
result.append({"Authorization": f"Basic {auth}"})
CACHE[cache_key] = result
return result
@pytest.fixture(scope="session")
def docker_registry_auth_header(
docker_registry_password: str, docker_registry_username: str
) -> Dict[str, str]:
"""Provides an HTTP basic authentication header containing credentials for the secure docker registry service."""
return _docker_registry_auth_header(
docker_registry_password_list=[docker_registry_password],
docker_registry_username_list=[docker_registry_username],
scale_factor=1,
)[0]
@pytest.fixture(scope="session")
def docker_registry_auth_header_list(
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
pdrf_scale_factor: int,
) -> List[Dict[str, str]]:
"""Provides an HTTP basic authentication header containing credentials for the secure docker registry service."""
return _docker_registry_auth_header(
docker_registry_password_list=docker_registry_password_list,
docker_registry_username_list=docker_registry_username_list,
scale_factor=pdrf_scale_factor,
)
def _docker_registry_cacerts(
*,
docker_registry_certs_list: List[DockerRegistryCerts],
pytestconfig: "_pytest.config.Config",
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of a temporary CA certificate trust store that contains the certificate of the secure docker
registry service.
"""
cache_key = _docker_registry_cacerts.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
chain = itertools.chain(
get_user_defined_file(pytestconfig, "cacerts"),
generate_cacerts(
tmp_path_factory,
certificate=docker_registry_certs_list[i].ca_certificate,
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find or generate cacerts!")
result.append("-unknown-")
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_registry_cacerts(
docker_registry_certs: DockerRegistryCerts,
pytestconfig: "_pytest.config.Config",
tmp_path_factory: TempPathFactory,
) -> Generator[Path, None, None]:
"""
Provides the location of a temporary CA certificate trust store that contains the certificate of the secure docker
registry service.
"""
for lst in _docker_registry_cacerts(
docker_registry_certs_list=[docker_registry_certs],
pytestconfig=pytestconfig,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_registry_cacerts_list(
docker_registry_certs_list: List[DockerRegistryCerts],
pdrf_scale_factor: int,
pytestconfig: "_pytest.config.Config",
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of a temporary CA certificate trust store that contains the certificate of the secure docker
registry service.
"""
yield from _docker_registry_cacerts(
docker_registry_certs_list=docker_registry_certs_list,
pytestconfig=pytestconfig,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_registry_certs(
*, scale_factor: int, tmp_path_factory: TempPathFactory
) -> Generator[List[DockerRegistryCerts], None, None]:
"""Provides the location of temporary certificate and private key files for the secure docker registry service."""
# TODO: Augment to allow for reading certificates from /test ...
cache_key = _docker_registry_certs.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
tmp_path = tmp_path_factory.mktemp(__name__)
keypair = generate_keypair()
docker_registry_cert = DockerRegistryCerts(
ca_certificate=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-ca-{i}.crt"),
ca_private_key=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-ca-{i}.key"),
certificate=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-{i}.crt"),
private_key=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-{i}.key"),
)
docker_registry_cert.ca_certificate.write_bytes(keypair.ca_certificate)
docker_registry_cert.ca_private_key.write_bytes(keypair.ca_private_key)
docker_registry_cert.certificate.write_bytes(keypair.certificate)
docker_registry_cert.private_key.write_bytes(keypair.private_key)
result.append(docker_registry_cert)
CACHE[cache_key] = result
yield result
for docker_registry_cert in result:
docker_registry_cert.ca_certificate.unlink(missing_ok=True)
docker_registry_cert.ca_private_key.unlink(missing_ok=True)
docker_registry_cert.certificate.unlink(missing_ok=True)
docker_registry_cert.private_key.unlink(missing_ok=True)
@pytest.fixture(scope="session")
def docker_registry_certs(
tmp_path_factory: TempPathFactory,
) -> Generator[DockerRegistryCerts, None, None]:
"""Provides the location of temporary certificate and private key files for the secure docker registry service."""
for lst in _docker_registry_certs(
scale_factor=1, tmp_path_factory=tmp_path_factory
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_registry_certs_list(
pdrf_scale_factor: int, tmp_path_factory: TempPathFactory
) -> Generator[List[DockerRegistryCerts], None, None]:
"""Provides the location of temporary certificate and private key files for the secure docker registry service."""
yield from _docker_registry_certs(
scale_factor=pdrf_scale_factor, tmp_path_factory=tmp_path_factory
)
def _docker_registry_htpasswd(
*,
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
pytestconfig: "_pytest.config.Config",
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""Provides the location of the htpasswd file for the secure registry service."""
cache_key = _docker_registry_htpasswd.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
chain = itertools.chain(
get_user_defined_file(pytestconfig, "htpasswd"),
generate_htpasswd(
tmp_path_factory,
username=docker_registry_username_list[i],
password=docker_registry_password_list[i],
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find or generate htpasswd!")
result.append("-unknown-")
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_registry_htpasswd(
docker_registry_password: str,
docker_registry_username: str,
pytestconfig: "_pytest.config.Config",
tmp_path_factory: TempPathFactory,
) -> Generator[Path, None, None]:
"""Provides the location of the htpasswd file for the secure registry service."""
for lst in _docker_registry_htpasswd(
docker_registry_password_list=[docker_registry_password],
docker_registry_username_list=[docker_registry_username],
pytestconfig=pytestconfig,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_registry_htpasswd_list(
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
pdrf_scale_factor: int,
pytestconfig: "_pytest.config.Config",
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""Provides the location of the htpasswd file for the secure registry service."""
yield from _docker_registry_htpasswd(
docker_registry_username_list=docker_registry_username_list,
docker_registry_password_list=docker_registry_password_list,
pytestconfig=pytestconfig,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_registry_insecure(
*,
docker_client: DockerClient,
docker_compose_insecure_list: List[Path],
docker_services: Services,
request,
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[DockerRegistryInsecure], None, None]:
"""Provides the endpoint of a local, mutable, insecure, docker registry."""
cache_key = _docker_registry_insecure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("insecure", i)
tmp_path = tmp_path_factory.mktemp(__name__)
# Create a secure registry service from the docker compose template ...
path_docker_compose = tmp_path.joinpath(f"docker-compose-{i}.yml")
template = Template(docker_compose_insecure_list[i].read_text("utf-8"))
path_docker_compose.write_text(
template.substitute(
{
"CONTAINER_NAME": service_name,
# Note: Needed to correctly populate the embedded, consolidated, service template ...
"PATH_CERTIFICATE": "/dev/null",
"PATH_HTPASSWD": "/dev/null",
"PATH_KEY": "/dev/null",
}
),
"utf-8",
)
LOGGER.debug("Starting insecure docker registry service [%d] ...", i)
LOGGER.debug(" docker-compose : %s", path_docker_compose)
LOGGER.debug(" service name : %s", service_name)
endpoint = start_service(
docker_services,
docker_compose=path_docker_compose,
service_name=service_name,
)
LOGGER.debug("Insecure docker registry endpoint [%d]: %s", i, endpoint)
images = []
if i == 0:
LOGGER.debug("Replicating images into %s [%d] ...", service_name, i)
images = _replicate_images(docker_client, endpoint, request)
result.append(
DockerRegistryInsecure(
docker_client=docker_client,
docker_compose=path_docker_compose,
endpoint=endpoint,
images=images,
service_name=service_name,
)
)
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_registry_insecure(
docker_client: DockerClient,
docker_compose_insecure: Path,
docker_services: Services,
request,
tmp_path_factory: TempPathFactory,
) -> Generator[DockerRegistryInsecure, None, None]:
"""Provides the endpoint of a local, mutable, insecure, docker registry."""
for lst in _docker_registry_insecure(
docker_client=docker_client,
docker_compose_insecure_list=[docker_compose_insecure],
docker_services=docker_services,
request=request,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_registry_insecure_list(
docker_client: DockerClient,
docker_compose_insecure_list: List[Path],
docker_services: Services,
pdrf_scale_factor: int,
request,
tmp_path_factory: TempPathFactory,
) -> Generator[List[DockerRegistryInsecure], None, None]:
"""Provides the | |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.108071,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.62866,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.250372,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.40384,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.203845,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.858057,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.286352,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.38331,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0105017,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0759405,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0776666,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0759405,
'Execution Unit/Register Files/Runtime Dynamic': 0.0881683,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.159985,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.480715,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.03501,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00205838,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00205838,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00183075,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000729443,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00111569,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00706321,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0183815,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0746629,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.7492,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.248094,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.253589,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.19821,
'Instruction Fetch Unit/Runtime Dynamic': 0.601791,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0565014,
'L2/Runtime Dynamic': 0.0157953,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.74778,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.22735,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0812257,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0812256,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.13134,
'Load Store Unit/Runtime Dynamic': 1.70915,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.200289,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.400577,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0710831,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0719288,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.295288,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0406796,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.573505,
'Memory Management Unit/Runtime Dynamic': 0.112608,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 19.9323,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0112961,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.130283,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
<filename>VAE_functions.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 16 10:59:14 2018
@author: anazabal
"""
import csv
import numpy as np
import tensorflow as tf
import loglik_models_missing_normalize
def place_holder_types(types_file, batch_size):
# Read the types of the data from the files
with open(types_file) as f:
types_list = [{k: v for k, v in row.items()}
for row in csv.DictReader(f, skipinitialspace=True)]
# Create placeholders for every data type, with appropriate dimensions
batch_data_list = []
for i in range(len(types_list)):
batch_data_list.append(tf.placeholder(tf.float32, shape=(batch_size, types_list[i]['dim'])))
tf.concat(batch_data_list, axis=1)
# Create placeholders for every missing data type, with appropriate dimensions
batch_data_list_observed = []
for i in range(len(types_list)):
batch_data_list_observed.append(tf.placeholder(tf.float32, shape=(batch_size, types_list[i]['dim'])))
tf.concat(batch_data_list_observed, axis=1)
# Create placeholders for the missing data indicator variable
miss_list = tf.placeholder(tf.int32, shape=(batch_size, len(types_list)))
# Placeholder for Gumbel-softmax parameter
tau = tf.placeholder(tf.float32, shape=())
tau2 = tf.placeholder(tf.float32, shape=())
return batch_data_list, batch_data_list_observed, miss_list, tau, tau2, types_list
def batch_normalization(batch_data_list, types_list, miss_list):
normalized_data = []
normalization_parameters = []
for i, d in enumerate(batch_data_list):
# Partition the data in missing data (0) and observed data n(1)
missing_data, observed_data = tf.dynamic_partition(d, miss_list[:, i], num_partitions=2)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(d)[0]), miss_list[:, i], num_partitions=2)
if types_list[i]['type'] == 'real':
# We transform the data to a gaussian with mean 0 and std 1
data_mean, data_var = tf.nn.moments(observed_data, 0)
data_var = tf.clip_by_value(data_var, 1e-6, 1e20) # Avoid zero values
aux_X = tf.nn.batch_normalization(observed_data, data_mean, data_var, offset=0.0, scale=1.0,
variance_epsilon=1e-6)
normalized_data.append(tf.dynamic_stitch(condition_indices, [missing_data, aux_X]))
normalization_parameters.append([data_mean, data_var])
# When using log-normal
elif types_list[i]['type'] == 'pos':
# #We transform the log of the data to a gaussian with mean 0 and std 1
observed_data_log = tf.log(1.0 + observed_data)
data_mean_log, data_var_log = tf.nn.moments(observed_data_log, 0)
data_var_log = tf.clip_by_value(data_var_log, 1e-6, 1e20) # Avoid zero values
aux_X = tf.nn.batch_normalization(observed_data_log, data_mean_log, data_var_log, offset=0.0, scale=1.0,
variance_epsilon=1e-6)
normalized_data.append(tf.dynamic_stitch(condition_indices, [missing_data, aux_X]))
normalization_parameters.append([data_mean_log, data_var_log])
elif types_list[i]['type'] == 'count':
# Input log of the data
aux_X = tf.log(observed_data)
normalized_data.append(tf.dynamic_stitch(condition_indices, [missing_data, aux_X]))
normalization_parameters.append([0.0, 1.0])
else:
# Don't normalize the categorical and ordinal variables
normalized_data.append(d)
normalization_parameters.append([0.0, 1.0]) # No normalization here
return normalized_data, normalization_parameters
def s_proposal_multinomial(X, batch_size, s_dim, tau, reuse):
# We propose a categorical distribution to create a GMM for the latent space z
log_pi = tf.layers.dense(inputs=X, units=s_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05), name='layer_1_' + 'enc_s',
reuse=reuse)
# Gumbel-softmax trick
log_pi_aux = tf.log(tf.clip_by_value(tf.nn.softmax(log_pi), 1e-6, 1))
U = -tf.log(-tf.log(tf.random_uniform([batch_size, s_dim])))
samples_s = tf.nn.softmax((log_pi_aux + U) / tau)
return samples_s, log_pi_aux
def z_proposal_GMM(X, samples_s, batch_size, z_dim, reuse):
# X_in = tf.layers.dense(inputs=X, units=100, activation=tf.nn.tanh,
# kernel_initializer=tf.random_normal_initializer(stddev=0.05), name='layer_0_' + 'mean_enc_z', reuse=reuse)
# We propose a GMM for z
mean_qz = tf.layers.dense(inputs=tf.concat([X, samples_s], 1), units=z_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_' + 'mean_enc_z', reuse=reuse)
log_var_qz = tf.layers.dense(inputs=tf.concat([X, samples_s], 1), units=z_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_' + 'logvar_enc_z', reuse=reuse)
# Avoid numerical problems
log_var_qz = tf.clip_by_value(log_var_qz, -15.0, 15.0)
# Rep-trick
eps = tf.random_normal((batch_size, z_dim), 0, 1, dtype=tf.float32)
samples_z = mean_qz + tf.multiply(tf.exp(log_var_qz / 2), eps)
return samples_z, [mean_qz, log_var_qz]
def z_proposal_Normal(X, batch_size, z_dim, reuse):
# We propose a GMM for z
mean_qz = tf.layers.dense(inputs=X, units=z_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_' + 'mean_enc_z', reuse=reuse)
log_var_qz = tf.layers.dense(inputs=X, units=z_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_' + 'logvar_enc_z', reuse=reuse)
# Avoid numerical problems
log_var_qz = tf.clip_by_value(log_var_qz, -15.0, 15.0)
# Rep-trick
eps = tf.random_normal((batch_size, z_dim), 0, 1, dtype=tf.float32)
samples_z = mean_qz + tf.multiply(tf.exp(log_var_qz / 2), eps)
return samples_z, [mean_qz, log_var_qz]
def z_proposal_GMM_factorized(X, samples_s, miss_list, batch_size, z_dim, reuse):
mean_qz = []
log_var_qz = []
for i, d in enumerate(X):
# Partition the data in missing data (0) and observed data n(1)
missing_data, observed_data = tf.dynamic_partition(d, miss_list[:, i], num_partitions=2)
missing_s, observed_s = tf.dynamic_partition(samples_s, miss_list[:, i], num_partitions=2)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(d)[0]), miss_list[:, i], num_partitions=2)
# Get the dimensions of the observed data
nObs = tf.shape(observed_data)[0]
# Mean layer
aux_m = tf.layers.dense(inputs=tf.concat([observed_data, observed_s], 1), units=z_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_' + 'mean_enc_z' + str(i), reuse=reuse)
# Reconstruct means with zeros (so they don't affect the mean_joint)
aux_mean_qz = tf.dynamic_stitch(condition_indices,
[tf.zeros([batch_size - nObs, z_dim], dtype=tf.float32), aux_m])
# Logvar layers
aux_lv = tf.layers.dense(inputs=tf.concat([observed_data, observed_s], 1), units=z_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_' + 'logvar_enc_z' + str(i), reuse=reuse)
# Set a high value to make the variance in the missing cases negligible
aux_log_var_qz = tf.dynamic_stitch(condition_indices, [tf.fill([batch_size - nObs, z_dim], 15.0), aux_lv])
mean_qz.append(aux_mean_qz)
log_var_qz.append(aux_log_var_qz)
# Input prior
log_var_qz.append(tf.zeros([batch_size, z_dim]))
mean_qz.append(tf.zeros([batch_size, z_dim]))
# Compute full parameters, as a product of Gaussians distribution
log_var_qz_joint = -tf.reduce_logsumexp(tf.negative(log_var_qz), 0)
mean_qz_joint = tf.multiply(tf.exp(log_var_qz_joint),
tf.reduce_sum(tf.multiply(mean_qz, tf.exp(tf.negative(log_var_qz))), 0))
# Avoid numerical problems
log_var_qz = tf.clip_by_value(log_var_qz, -15.0, 15.0)
# Rep-trick
eps = tf.random_normal((batch_size, z_dim), 0, 1, dtype=tf.float32)
samples_z = mean_qz_joint + tf.multiply(tf.exp(log_var_qz_joint / 2), eps)
return samples_z, [mean_qz_joint, log_var_qz_joint]
def z_distribution_GMM(samples_s, z_dim, reuse):
# We propose a GMM for z
mean_pz = tf.layers.dense(inputs=samples_s, units=z_dim, activation=None,
kernel_initializer=tf.random_normal_initializer(stddev=0.05),
name='layer_1_' + 'mean_dec_z', reuse=reuse)
log_var_pz = tf.zeros([tf.shape(samples_s)[0], z_dim])
# Avoid numerical problems
log_var_pz = tf.clip_by_value(log_var_pz, -15.0, 15.0)
return mean_pz, log_var_pz
def y_partition(samples_y, types_list, y_dim_partition):
grouped_samples_y = []
# First element must be 0 and the length of the partition vector must be len(types_dict)+1
if len(y_dim_partition) != len(types_list):
raise Exception("The length of the partition vector must match the number of variables in the data + 1")
# Insert a 0 at the beginning of the cumsum vector
partition_vector_cumsum = np.insert(np.cumsum(y_dim_partition), 0, 0)
for i in range(len(types_list)):
grouped_samples_y.append(samples_y[:, partition_vector_cumsum[i]:partition_vector_cumsum[i + 1]])
return grouped_samples_y
def theta_estimation_from_z(samples_z, types_list, miss_list, batch_size, reuse):
theta = []
# Independet yd -> Compute p(xd|yd)
for i, d in enumerate(types_list):
# Partition the data in missing data (0) and observed data (1)
missing_y, observed_y = tf.dynamic_partition(samples_z, miss_list[:, i], num_partitions=2)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(samples_z)[0]), miss_list[:, i], num_partitions=2)
nObs = tf.shape(observed_y)[0]
# Different layer models for each type of variable
if types_list[i]['type'] == 'real':
params = theta_real(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'pos':
params = theta_pos(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'count':
params = theta_count(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'cat':
params = theta_cat(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'ordinal':
params = theta_ordinal(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
theta.append(params)
return theta
def theta_estimation_from_y(samples_y, types_list, miss_list, batch_size, reuse):
theta = []
# Independet yd -> Compute p(xd|yd)
for i, d in enumerate(samples_y):
# Partition the data in missing data (0) and observed data (1)
missing_y, observed_y = tf.dynamic_partition(d, miss_list[:, i], num_partitions=2)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(d)[0]), miss_list[:, i], num_partitions=2)
nObs = tf.shape(observed_y)[0]
# Different layer models for each type of variable
if types_list[i]['type'] == 'real':
params = theta_real(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'pos':
params = theta_pos(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'count':
params = theta_count(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'cat':
params = theta_cat(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
elif types_list[i]['type'] == 'ordinal':
params = theta_ordinal(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
theta.append(params)
return theta
def theta_estimation_from_ys(samples_y, samples_s, types_list, miss_list, batch_size, reuse):
theta = []
# Independet yd -> Compute p(xd|yd)
for i, d in enumerate(samples_y):
# Partition the data in missing data (0) and observed data (1)
missing_y, observed_y = tf.dynamic_partition(d, miss_list[:, i], num_partitions=2)
missing_s, observed_s = tf.dynamic_partition(samples_s, miss_list[:, i], num_partitions=2)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(d)[0]), miss_list[:, i], num_partitions=2)
nObs = tf.shape(observed_y)[0]
# Different layer models for each type of variable
if types_list[i]['type'] == 'real':
# params = theta_real(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
params = theta_real_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs,
batch_size, i, reuse)
elif types_list[i]['type'] == 'pos':
# params = theta_pos(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
params = theta_pos_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs,
batch_size, i, reuse)
elif types_list[i]['type'] == 'count':
# params = theta_count(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
params = theta_count_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs,
batch_size, i, reuse)
elif types_list[i]['type'] == 'cat':
# params = theta_cat(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
params = theta_cat_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs,
batch_size, i, reuse)
elif types_list[i]['type'] == 'ordinal':
# params = theta_ordinal(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse)
params = theta_ordinal_s(observed_y, missing_y, observed_s, missing_s, condition_indices, types_list, nObs,
batch_size, i, reuse)
theta.append(params)
return theta
def theta_real(observed_y, missing_y, condition_indices, types_list, nObs, batch_size, i, reuse):
# Mean layer
h2_mean = observed_data_layer(observed_y, missing_y, condition_indices, output_dim=types_list[i]['dim'],
name='layer_h2' + | |
# Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD 3 clause
"""
Base Algorithm for lattice Boltzmann methods
============================================
This module describes various methods involved during
the computation of a solution using lattice Boltzmann
methods.
The default kernels defined here are:
- transport kernel
- f2m kernel
- m2f_kernel
- equilibrium kernel
- relaxation kernel
- source terms kernel
- one_time_step kernel which makes the stream + source terms + relaxation
You can modify each functions to define your own behavior. This is what is done
when we want to specialize our one_time_step kernel for Pull algorithm,
Push algorithm, ... All these kernels are defined using SymPy and thus must be
expressed as symbolic expressions. Then, we will generate the numerical code.
These kernels are defined in a class (the base class is BaseAlgorithm) where we
have all the needed information of our scheme.
Let's take an example: the f2m kernel allows to compute the moments from the
distribution functions thanks to the matrix M define in our scheme.
The formula is straightforward
m = M f
where m is the moments, f the distributed functions and M the matrix build with
the polynoms defining the moments of our scheme.
The SymPy expression can be written as
Eq(m, M*f)
where m and f are symbol matrices and M a SymPy matrix.
When we define a kernel, we make that in two steps. First we define locally
the expression (what we have to do for each point). Then we define the kernel
using this local kernel for the whole domain (we write the loop).
Therefore, for our example we have
def f2m_local(self, f, m):
return Eq(m, self.M*f)
def f2m(self):
space_index = self._get_space_idx_full()
f = self._get_indexed_1('f', space_index)
m = self._get_indexed_1('m', space_index)
return {'code': For(space_index, self.f2m_local(f, m))}
"""
import sympy as sp
from sympy import Eq
from ..generator import For, If
from ..symbolic import ix, iy, iz, nx, ny, nz, nv, indexed, space_idx, alltogether, recursive_sub
from ..symbolic import rel_ux, rel_uy, rel_uz
from .transform import parse_expr
from .ode import euler
from ..monitoring import monitor
class BaseAlgorithm:
def __init__(self, scheme, sorder, generator, settings=None):
xx, yy, zz = sp.symbols('xx, yy, zz')
self.symb_coord_local = [xx, yy, zz]
self.symb_coord = scheme.symb_coord
self.dim = scheme.dim
self.ns = scheme.stencil.nv_ptr[-1]
self.M = scheme.M.subs(scheme.param.items())
self.invM = scheme.invM.subs(scheme.param.items())
self.all_velocities = scheme.stencil.get_all_velocities()
self.mv = sp.MatrixSymbol('m', self.ns, 1)
if scheme.rel_vel is not None:
self.rel_vel_symb = [rel_ux, rel_uy, rel_uz][:self.dim]
self.rel_vel = sp.Matrix(scheme.rel_vel)
self.Tu = scheme.Tu.subs(scheme.param.items())
self.Tmu = scheme.Tmu.subs(scheme.param.items())
self.Mu = self.Tu * self.M
self.invMu = self.invM * self.Tmu
alltogether(self.Mu, nsimplify=True)
alltogether(self.invMu, nsimplify=True)
else:
self.rel_vel_symb = None
self.consm = {}
for k, v in scheme.consm.items():
self.consm[str(k)] = v
self.sorder = sorder
self.generator = generator
subs_coords = list(zip(self.symb_coord, self.symb_coord_local))
subs_moments = list(zip(scheme.consm.keys(), [self.mv[int(i), 0] for i in scheme.consm.values()]))
to_subs = subs_coords + list(scheme.param.items())
to_subs_full = to_subs + subs_moments
self.eq = recursive_sub(scheme.EQ, to_subs_full)
self.s = recursive_sub(scheme.s, to_subs_full)
alltogether(self.eq, nsimplify=True)
alltogether(self.s)
if self.rel_vel_symb:
self.rel_vel = recursive_sub(self.rel_vel, to_subs_full)
alltogether(self.rel_vel)
self.source_eq = []
for source in scheme._source_terms:
if source:
for k, v in source.items():
lhs = recursive_sub(k, to_subs_full)
if isinstance(v, (float, int)):
rhs = v
else:
rhs = recursive_sub(v, to_subs)
self.source_eq.append((lhs, rhs))
self.vmax = [0]*3
self.vmax[:scheme.dim] = scheme.stencil.vmax
self.local_vars = self.symb_coord_local[:self.dim]
self.settings = settings if settings else {}
def _get_space_idx_full(self):
"""
Return a list of SymPy Idx ordered with sorder
and with the dimensions.
ix -> [0, nx[
iy -> [0, ny[
iz -> [0, nz[
The length of the list is the dimension of the problem.
"""
return space_idx([(0, nx), (0, ny), (0, nz)], priority=self.sorder[1:])
def _get_space_idx_inner(self):
"""
Return a list of SymPy Idx ordered with sorder
and with the dimensions.
ix -> [vmax_x, nx-vmax_x[
iy -> [vmax_y, ny-vmax_y[
iz -> [vmax_z, nz-vmax_z[
where vmax_i is the maximum of the velocities modulus in direction i.
The length of the list is the dimension of the problem.
"""
return space_idx([(self.vmax[0], nx-self.vmax[0]),
(self.vmax[1], ny-self.vmax[1]),
(self.vmax[2], nz-self.vmax[2])],
priority=self.sorder[1:])
def _get_indexed_on_range(self, name, space_index):
"""
Return a SymPy matrix of indexed objects
(one component for each velocity index).
Parameters
----------
name : string
name of the SymPy symbol for the indexed object
space_index : list
list of SymPy Idx corresponding to space variables
Return
------
SymPy Matrix
indexed objects for each velocity
"""
return indexed(name, [self.ns, nx, ny, nz],
[nv] + space_index,
velocities_index=range(self.ns), priority=self.sorder)
def _get_indexed_on_velocities(self, name, space_index, velocities):
"""
Return a SymPy matrix of indexed objects (one component for each velocity).
Parameters
----------
name : string
name of the SymPy symbol for the indexed object
space_index : list
list of SymPy Idx corresponding to space variables
velocities : list
list of velocity components
Return
------
SymPy Matrix
indexed objects for each velocity
"""
return indexed(name, [self.ns, nx, ny, nz],
[nv] + space_index,
velocities=velocities, priority=self.sorder)
def relative_velocity(self, m):
rel_vel = sp.Matrix(self.rel_vel).subs(list(zip(self.mv, m)))
return [Eq(self.rel_vel_symb[i], rel_vel[i]) for i in range(self.dim)]
def restore_conserved_moments(self, m, f):
nconsm = len(self.consm)
if isinstance(m[nconsm:], list):
m_consm = sp.Matrix(m[:nconsm])
else:
m_consm = m[:nconsm]
return Eq(m_consm, sp.Matrix((self.Mu*f)[:nconsm]))
def coords(self):
coord = []
for x in self.symb_coord[:self.dim]:
coord.append(indexed(x.name, [self.ns, nx, ny, nz], priority=self.sorder[1:]))
return [Eq(xx, x) for xx, x in zip(self.symb_coord_local[:self.dim], coord)]
def transport_local(self, f, fnew):
"""
Return the symbolic expression of the lbm transport.
Parameters
----------
f : SymPy Matrix
indexed objects of rhs for the distributed functions
fnew : SymPy Matrix
indexed objects of lhs for the distributed functions
"""
return Eq(fnew, f)
def transport(self):
"""
Return the code expression of the lbm transport on the whole inner domain.
"""
space_index = self._get_space_idx_inner()
f = self._get_indexed_on_velocities('f', space_index, -self.all_velocities)
fnew = self._get_indexed_on_range('fnew', space_index)
return {'code': For(space_index, self.transport_local(f, fnew))}
def f2m_local(self, f, m, with_rel_velocity=False):
"""
Return symbolic expression which computes the moments from the
distributed functions.
Parameters
----------
f : SymPy Matrix
indexed objects for the distributed functions
m : SymPy Matrix
indexed objects for the moments
with_rel_velocity : boolean
check if the scheme uses relative velocity.
If yes, the conserved moments must be computed first.
(default is False)
"""
if with_rel_velocity:
nconsm = len(self.consm)
if isinstance(m[:nconsm], list):
m_consm = sp.Matrix(m[:nconsm])
m_notconsm = sp.Matrix(m[nconsm:])
else:
m_consm = m[:nconsm]
m_notconsm = m[nconsm:]
return [Eq(m_consm, sp.Matrix((self.M*f)[:nconsm])),
*self.relative_velocity(m),
Eq(m_notconsm, sp.Matrix((self.Mu*f)[nconsm:]))]
else:
return Eq(m, self.M*f)
def f2m(self):
"""
Return the code expression which computes the moments from the
distributed functions on the whole domain.
"""
space_index = self._get_space_idx_full()
f = self._get_indexed_on_range('f', space_index)
m = self._get_indexed_on_range('m', space_index)
return {'code': For(space_index, self.f2m_local(f, m))}
def m2f_local(self, m, f, with_rel_velocity=False):
"""
Return symbolic expression which computes the distributed functions
from the moments.
Parameters
----------
m : SymPy Matrix
indexed objects for the moments
f : SymPy Matrix
indexed objects for the distributed functions
with_rel_velocity : boolean
check if the scheme uses relative velocity.
If yes, the conserved moments must be computed first.
(default is False)
"""
if with_rel_velocity:
return Eq(f, self.invMu*m)
else:
return Eq(f, self.invM*m)
def m2f(self):
"""
Return the code expression which computes the distributed functions
from the moments on the whole domain.
"""
space_index = self._get_space_idx_full()
f = self._get_indexed_on_range('f', space_index)
m = self._get_indexed_on_range('m', space_index)
return {'code': For(space_index, self.m2f_local(m, f))}
def equilibrium_local(self, m):
"""
Return symbolic expression which computes the equilibrium.
Parameters
----------
m : SymPy Matrix
indexed objects for the moments
"""
eq = self.eq.subs(list(zip(self.mv, m)))
return Eq(m, eq)
def equilibrium(self):
"""
Return the code expression which computes the equilibrium
on the whole domain.
"""
space_index = self._get_space_idx_full()
m = self._get_indexed_on_range('m', space_index)
return {'code': For(space_index, self.equilibrium_local(m))}
def relaxation_local(self, m, with_rel_velocity=False):
"""
Return symbolic expression which computes the relaxation operator.
Parameters
----------
m : SymPy Matrix
indexed objects for the moments
with_rel_velocity : boolean
check if the scheme uses relative velocity.
(default is False)
"""
if with_rel_velocity:
eq = (self.Tu*self.eq).subs(list(zip(self.mv, m)))
else:
eq = self.eq.subs(list(zip(self.mv, m)))
relax = (1 - self.s)*m + self.s*eq
alltogether(relax)
return Eq(m, relax)
def relaxation(self):
"""
Return the code expression which computes the relaxation
on the whole domain.
"""
space_index = self._get_space_idx_full()
m = self._get_indexed_on_range('m', space_index)
return {'code': For(space_index, self.relaxation_local(m))}
def source_term_local(self, m):
"""
Return symbolic expression which computes the source term
using explicit Euler (should be more flexible in a near future).
Parameters
----------
m : SymPy Matrix
indexed objects for the | |
= "minecraft:warped_planks"
crimson_slab = "minecraft:crimson_slab"
warped_slab = "minecraft:warped_slab"
crimson_pressure_plate = "minecraft:crimson_pressure_plate"
warped_pressure_plate = "minecraft:warped_pressure_plate"
crimson_fence = "minecraft:crimson_fence"
warped_fence = "minecraft:warped_fence"
crimson_trapdoor = "minecraft:crimson_trapdoor"
warped_trapdoor = "minecraft:warped_trapdoor"
crimson_fence_gate = "minecraft:crimson_fence_gate"
warped_fence_gate = "minecraft:warped_fence_gate"
crimson_stairs = "minecraft:crimson_stairs"
warped_stairs = "minecraft:warped_stairs"
crimson_button = "minecraft:crimson_button"
warped_button = "minecraft:warped_button"
crimson_door = "minecraft:crimson_door"
warped_door = "minecraft:warped_door"
crimson_sign = "minecraft:crimson_sign"
warped_sign = "minecraft:warped_sign"
crimson_wall_sign = "minecraft:crimson_wall_sign"
warped_wall_sign = "minecraft:warped_wall_sign"
structure_block = "minecraft:structure_block"
jigsaw = "minecraft:jigsaw"
composter = "minecraft:composter"
target = "minecraft:target"
bee_nest = "minecraft:bee_nest"
beehive = "minecraft:beehive"
honey_block = "minecraft:honey_block"
honeycomb_block = "minecraft:honeycomb_block"
netherite_block = "minecraft:netherite_block"
ancient_debris = "minecraft:ancient_debris"
crying_obsidian = "minecraft:crying_obsidian"
respawn_anchor = "minecraft:respawn_anchor"
potted_crimson_fungus = "minecraft:potted_crimson_fungus"
potted_warped_fungus = "minecraft:potted_warped_fungus"
potted_crimson_roots = "minecraft:potted_crimson_roots"
potted_warped_roots = "minecraft:potted_warped_roots"
lodestone = "minecraft:lodestone"
blackstone = "minecraft:blackstone"
blackstone_stairs = "minecraft:blackstone_stairs"
blackstone_wall = "minecraft:blackstone_wall"
blackstone_slab = "minecraft:blackstone_slab"
polished_blackstone = "minecraft:polished_blackstone"
polished_blackstone_bricks = "minecraft:polished_blackstone_bricks"
cracked_polished_blackstone_bricks = "minecraft:cracked_polished_blackstone_bricks"
chiseled_polished_blackstone = "minecraft:chiseled_polished_blackstone"
polished_blackstone_brick_slab = "minecraft:polished_blackstone_brick_slab"
polished_blackstone_brick_stairs = "minecraft:polished_blackstone_brick_stairs"
polished_blackstone_brick_wall = "minecraft:polished_blackstone_brick_wall"
gilded_blackstone = "minecraft:gilded_blackstone"
polished_blackstone_stairs = "minecraft:polished_blackstone_stairs"
polished_blackstone_slab = "minecraft:polished_blackstone_slab"
polished_blackstone_pressure_plate = "minecraft:polished_blackstone_pressure_plate"
polished_blackstone_button = "minecraft:polished_blackstone_button"
polished_blackstone_wall = "minecraft:polished_blackstone_wall"
chiseled_nether_bricks = "minecraft:chiseled_nether_bricks"
cracked_nether_bricks = "minecraft:cracked_nether_bricks"
quartz_bricks = "minecraft:quartz_bricks"
class item(enum.Enum):
"""
* air
* stone
* granite
* polished_granite
* diorite
* polished_diorite
* andesite
* polished_andesite
* grass_block
* dirt
* coarse_dirt
* podzol
* crimson_nylium
* warped_nylium
* cobblestone
* oak_planks
* spruce_planks
* birch_planks
* jungle_planks
* acacia_planks
* dark_oak_planks
* crimson_planks
* warped_planks
* oak_sapling
* spruce_sapling
* birch_sapling
* jungle_sapling
* acacia_sapling
* dark_oak_sapling
* bedrock
* sand
* red_sand
* gravel
* gold_ore
* iron_ore
* coal_ore
* nether_gold_ore
* oak_log
* spruce_log
* birch_log
* jungle_log
* acacia_log
* dark_oak_log
* crimson_stem
* warped_stem
* stripped_oak_log
* stripped_spruce_log
* stripped_birch_log
* stripped_jungle_log
* stripped_acacia_log
* stripped_dark_oak_log
* stripped_crimson_stem
* stripped_warped_stem
* stripped_oak_wood
* stripped_spruce_wood
* stripped_birch_wood
* stripped_jungle_wood
* stripped_acacia_wood
* stripped_dark_oak_wood
* stripped_crimson_hyphae
* stripped_warped_hyphae
* oak_wood
* spruce_wood
* birch_wood
* jungle_wood
* acacia_wood
* dark_oak_wood
* crimson_hyphae
* warped_hyphae
* oak_leaves
* spruce_leaves
* birch_leaves
* jungle_leaves
* acacia_leaves
* dark_oak_leaves
* sponge
* wet_sponge
* glass
* lapis_ore
* lapis_block
* dispenser
* sandstone
* chiseled_sandstone
* cut_sandstone
* note_block
* powered_rail
* detector_rail
* sticky_piston
* cobweb
* grass
* fern
* dead_bush
* seagrass
* sea_pickle
* piston
* white_wool
* orange_wool
* magenta_wool
* light_blue_wool
* yellow_wool
* lime_wool
* pink_wool
* gray_wool
* light_gray_wool
* cyan_wool
* purple_wool
* blue_wool
* brown_wool
* green_wool
* red_wool
* black_wool
* dandelion
* poppy
* blue_orchid
* allium
* azure_bluet
* red_tulip
* orange_tulip
* white_tulip
* pink_tulip
* oxeye_daisy
* cornflower
* lily_of_the_valley
* wither_rose
* brown_mushroom
* red_mushroom
* crimson_fungus
* warped_fungus
* crimson_roots
* warped_roots
* nether_sprouts
* weeping_vines
* twisting_vines
* sugar_cane
* kelp
* bamboo
* gold_block
* iron_block
* oak_slab
* spruce_slab
* birch_slab
* jungle_slab
* acacia_slab
* dark_oak_slab
* crimson_slab
* warped_slab
* stone_slab
* smooth_stone_slab
* sandstone_slab
* cut_sandstone_slab
* petrified_oak_slab
* cobblestone_slab
* brick_slab
* stone_brick_slab
* nether_brick_slab
* quartz_slab
* red_sandstone_slab
* cut_red_sandstone_slab
* purpur_slab
* prismarine_slab
* prismarine_brick_slab
* dark_prismarine_slab
* smooth_quartz
* smooth_red_sandstone
* smooth_sandstone
* smooth_stone
* bricks
* tnt
* bookshelf
* mossy_cobblestone
* obsidian
* torch
* end_rod
* chorus_plant
* chorus_flower
* purpur_block
* purpur_pillar
* purpur_stairs
* spawner
* oak_stairs
* chest
* diamond_ore
* diamond_block
* crafting_table
* farmland
* furnace
* ladder
* rail
* cobblestone_stairs
* lever
* stone_pressure_plate
* oak_pressure_plate
* spruce_pressure_plate
* birch_pressure_plate
* jungle_pressure_plate
* acacia_pressure_plate
* dark_oak_pressure_plate
* crimson_pressure_plate
* warped_pressure_plate
* polished_blackstone_pressure_plate
* redstone_ore
* redstone_torch
* snow
* ice
* snow_block
* cactus
* clay
* jukebox
* oak_fence
* spruce_fence
* birch_fence
* jungle_fence
* acacia_fence
* dark_oak_fence
* crimson_fence
* warped_fence
* pumpkin
* carved_pumpkin
* netherrack
* soul_sand
* soul_soil
* basalt
* polished_basalt
* soul_torch
* glowstone
* jack_o_lantern
* oak_trapdoor
* spruce_trapdoor
* birch_trapdoor
* jungle_trapdoor
* acacia_trapdoor
* dark_oak_trapdoor
* crimson_trapdoor
* warped_trapdoor
* infested_stone
* infested_cobblestone
* infested_stone_bricks
* infested_mossy_stone_bricks
* infested_cracked_stone_bricks
* infested_chiseled_stone_bricks
* stone_bricks
* mossy_stone_bricks
* cracked_stone_bricks
* chiseled_stone_bricks
* brown_mushroom_block
* red_mushroom_block
* mushroom_stem
* iron_bars
* chain
* glass_pane
* melon
* vine
* oak_fence_gate
* spruce_fence_gate
* birch_fence_gate
* jungle_fence_gate
* acacia_fence_gate
* dark_oak_fence_gate
* crimson_fence_gate
* warped_fence_gate
* brick_stairs
* stone_brick_stairs
* mycelium
* lily_pad
* nether_bricks
* cracked_nether_bricks
* chiseled_nether_bricks
* nether_brick_fence
* nether_brick_stairs
* enchanting_table
* end_portal_frame
* end_stone
* end_stone_bricks
* dragon_egg
* redstone_lamp
* sandstone_stairs
* emerald_ore
* ender_chest
* tripwire_hook
* emerald_block
* spruce_stairs
* birch_stairs
* jungle_stairs
* crimson_stairs
* warped_stairs
* command_block
* beacon
* cobblestone_wall
* mossy_cobblestone_wall
* brick_wall
* prismarine_wall
* red_sandstone_wall
* mossy_stone_brick_wall
* granite_wall
* stone_brick_wall
* nether_brick_wall
* andesite_wall
* red_nether_brick_wall
* sandstone_wall
* end_stone_brick_wall
* diorite_wall
* blackstone_wall
* polished_blackstone_wall
* polished_blackstone_brick_wall
* stone_button
* oak_button
* spruce_button
* birch_button
* jungle_button
* acacia_button
* dark_oak_button
* crimson_button
* warped_button
* polished_blackstone_button
* anvil
* chipped_anvil
* damaged_anvil
* trapped_chest
* light_weighted_pressure_plate
* heavy_weighted_pressure_plate
* daylight_detector
* redstone_block
* nether_quartz_ore
* hopper
* chiseled_quartz_block
* quartz_block
* quartz_bricks
* quartz_pillar
* quartz_stairs
* activator_rail
* dropper
* white_terracotta
* orange_terracotta
* magenta_terracotta
* light_blue_terracotta
* yellow_terracotta
* lime_terracotta
* pink_terracotta
* gray_terracotta
* light_gray_terracotta
* cyan_terracotta
* purple_terracotta
* blue_terracotta
* brown_terracotta
* green_terracotta
* red_terracotta
* black_terracotta
* barrier
* iron_trapdoor
* hay_block
* white_carpet
* orange_carpet
* magenta_carpet
* light_blue_carpet
* yellow_carpet
* lime_carpet
* pink_carpet
* gray_carpet
* light_gray_carpet
* cyan_carpet
* purple_carpet
* blue_carpet
* brown_carpet
* green_carpet
* red_carpet
* black_carpet
* terracotta
* coal_block
* packed_ice
* acacia_stairs
* dark_oak_stairs
* slime_block
* grass_path
* sunflower
* lilac
* rose_bush
* peony
* tall_grass
* large_fern
* white_stained_glass
* orange_stained_glass
* magenta_stained_glass
* light_blue_stained_glass
* yellow_stained_glass
* lime_stained_glass
* pink_stained_glass
* gray_stained_glass
* light_gray_stained_glass
* cyan_stained_glass
* purple_stained_glass
* blue_stained_glass
* brown_stained_glass
* green_stained_glass
* red_stained_glass
* black_stained_glass
* white_stained_glass_pane
* orange_stained_glass_pane
* magenta_stained_glass_pane
* light_blue_stained_glass_pane
* yellow_stained_glass_pane
* lime_stained_glass_pane
* pink_stained_glass_pane
* gray_stained_glass_pane
* light_gray_stained_glass_pane
* cyan_stained_glass_pane
* purple_stained_glass_pane
* blue_stained_glass_pane
* brown_stained_glass_pane
* green_stained_glass_pane
* red_stained_glass_pane
* black_stained_glass_pane
* prismarine
* prismarine_bricks
* dark_prismarine
* prismarine_stairs
* prismarine_brick_stairs
* dark_prismarine_stairs
* sea_lantern
* red_sandstone
* chiseled_red_sandstone
* cut_red_sandstone
* red_sandstone_stairs
* repeating_command_block
* chain_command_block
* magma_block
* nether_wart_block
* warped_wart_block
* red_nether_bricks
* bone_block
* structure_void
* observer
* shulker_box
* white_shulker_box
* orange_shulker_box
* magenta_shulker_box
* light_blue_shulker_box
* yellow_shulker_box
* lime_shulker_box
* pink_shulker_box
* gray_shulker_box
* light_gray_shulker_box
* cyan_shulker_box
* purple_shulker_box
* blue_shulker_box
* brown_shulker_box
* green_shulker_box
* red_shulker_box
* black_shulker_box
* white_glazed_terracotta
* orange_glazed_terracotta
* magenta_glazed_terracotta
* light_blue_glazed_terracotta
* yellow_glazed_terracotta
* lime_glazed_terracotta
* pink_glazed_terracotta
* gray_glazed_terracotta
* light_gray_glazed_terracotta
* cyan_glazed_terracotta
* purple_glazed_terracotta
* blue_glazed_terracotta
* brown_glazed_terracotta
* green_glazed_terracotta
* red_glazed_terracotta
* black_glazed_terracotta
* white_concrete
* orange_concrete
* magenta_concrete
* light_blue_concrete
* yellow_concrete
* lime_concrete
* pink_concrete
* gray_concrete
* light_gray_concrete
* cyan_concrete
* purple_concrete
* blue_concrete
* brown_concrete
* green_concrete
* red_concrete
* black_concrete
* white_concrete_powder
* orange_concrete_powder
* magenta_concrete_powder
* light_blue_concrete_powder
* yellow_concrete_powder
* lime_concrete_powder
* pink_concrete_powder
* gray_concrete_powder
* light_gray_concrete_powder
* cyan_concrete_powder
* purple_concrete_powder
* blue_concrete_powder
* brown_concrete_powder
* green_concrete_powder
* red_concrete_powder
* black_concrete_powder
* turtle_egg
* dead_tube_coral_block
* dead_brain_coral_block
* dead_bubble_coral_block
* dead_fire_coral_block
* dead_horn_coral_block
* tube_coral_block
* brain_coral_block
* bubble_coral_block
* fire_coral_block
* horn_coral_block
* tube_coral
* brain_coral
* bubble_coral
* fire_coral
* horn_coral
* dead_brain_coral
* dead_bubble_coral
* dead_fire_coral
* dead_horn_coral
* dead_tube_coral
* tube_coral_fan
* brain_coral_fan
* bubble_coral_fan
* fire_coral_fan
* horn_coral_fan
* dead_tube_coral_fan
* dead_brain_coral_fan
* dead_bubble_coral_fan
* dead_fire_coral_fan
* dead_horn_coral_fan
* blue_ice
* conduit
* polished_granite_stairs
* smooth_red_sandstone_stairs
* mossy_stone_brick_stairs
* polished_diorite_stairs
* mossy_cobblestone_stairs
* end_stone_brick_stairs
* stone_stairs
* smooth_sandstone_stairs
* smooth_quartz_stairs
* granite_stairs
* andesite_stairs
* red_nether_brick_stairs
* polished_andesite_stairs
* diorite_stairs
* polished_granite_slab
* smooth_red_sandstone_slab
* mossy_stone_brick_slab
* polished_diorite_slab
* mossy_cobblestone_slab
* end_stone_brick_slab
* smooth_sandstone_slab
* smooth_quartz_slab
* granite_slab
* | |
" 3650 -nodes -subj /commonName="
"{hostname}/emailAddress={email}"
" -out /etc/ssl/certs/postfix.pem"
" -keyout /etc/ssl/private/"
"postfix.pem"
.format(hostname=EEVariables.ee_fqdn,
email=EEVariables.ee_email))
EEShellExec.cmd_exec(self, "chmod 0600 /etc/ssl/private"
"/postfix.pem")
EEShellExec.cmd_exec(self, "postconf -e \"smtpd_tls_cert_"
"file = /etc/ssl/certs/postfix.pem\"")
EEShellExec.cmd_exec(self, "postconf -e \"smtpd_tls_key_"
"file = /etc/ssl/private/"
"postfix.pem\"")
except CommandExecutionError as e:
Log.Error(self, "Failed to update Dovecot configuration")
# Sieve configuration
if not os.path.exists('/var/lib/dovecot/sieve/'):
Log.debug(self, 'Creating directory '
'/var/lib/dovecot/sieve/ ')
os.makedirs('/var/lib/dovecot/sieve/')
# Custom sieve configuration by EasyEngine
data = dict()
Log.debug(self, "Writting configuration of EasyEngine into "
"file /var/lib/dovecot/sieve/default.sieve")
ee_sieve = open('/var/lib/dovecot/sieve/default.sieve',
encoding='utf-8', mode='w')
self.app.render((data), 'default-sieve.mustache',
out=ee_sieve)
ee_sieve.close()
# Compile sieve rules
Log.debug(self, "Setting Privileges to dovecot ")
# EEShellExec.cmd_exec(self, "chown -R vmail:vmail /var/lib"
# "/dovecot")
EEFileUtils.chown(self, "/var/lib/dovecot", 'vmail', 'vmail',
recursive=True)
try:
EEShellExec.cmd_exec(self, "sievec /var/lib/dovecot/"
"/sieve/default.sieve")
except CommandExecutionError as e:
raise SiteError("Failed to compile default.sieve")
EEGit.add(self, ["/etc/postfix", "/etc/dovecot"],
msg="Installed mail server")
EEService.restart_service(self, 'dovecot')
EEService.reload_service(self, 'postfix')
if set(EEVariables.ee_mailscanner).issubset(set(apt_packages)):
# Set up Custom amavis configuration
data = dict()
Log.debug(self, "Configuring file /etc/amavis/conf.d"
"/15-content_filter_mode")
ee_amavis = open('/etc/amavis/conf.d/15-content_filter_mode',
encoding='utf-8', mode='w')
self.app.render((data), '15-content_filter_mode.mustache',
out=ee_amavis)
ee_amavis.close()
# Amavis ViMbadmin configuration
if os.path.isfile("/etc/postfix/mysql/virtual_alias_maps.cf"):
vm_host = os.popen("grep hosts /etc/postfix/mysql/virtual_"
"alias_maps.cf | awk \'{ print $3 }\' |"
" tr -d '\\n'").read()
vm_pass = os.popen("grep password /etc/postfix/mysql/"
"virtual_alias_maps.cf | awk \'{ print "
"$3 }\' | tr -d '\\n'").read()
data = dict(host=vm_host, password=vm_pass)
vm_config = open('/etc/amavis/conf.d/50-user',
encoding='utf-8', mode='w')
self.app.render((data), '50-user.mustache', out=vm_config)
vm_config.close()
# Amavis postfix configuration
try:
EEShellExec.cmd_exec(self, "postconf -e \"content_filter ="
" smtp-amavis:[127.0.0.1]:10024\"")
EEShellExec.cmd_exec(self, "sed -i \"s/1 pickup/1 "
" pickup"
"\\n -o content_filter=\\n "
" -o receive_override_options="
"no_header_body"
"_checks/\" /etc/postfix/master.cf")
except CommandExecutionError as e:
raise SiteError("Failed to update Amavis-Postfix config")
amavis_master = ("""smtp-amavis unix - - n - 2 smtp
-o smtp_data_done_timeout=1200
-o smtp_send_xforward_command=yes
-o disable_dns_lookups=yes
-o max_use=20
127.0.0.1:10025 inet n - n - - smtpd
-o content_filter=
-o smtpd_delay_reject=no
-o smtpd_client_restrictions=permit_mynetworks,reject
-o smtpd_helo_restrictions=
-o smtpd_sender_restrictions=
-o smtpd_recipient_restrictions=permit_mynetworks,reject
-o smtpd_data_restrictions=reject_unauth_pipelining
-o smtpd_end_of_data_restrictions=
-o smtpd_restriction_classes=
-o mynetworks=127.0.0.0/8
-o smtpd_error_sleep_time=0
-o smtpd_soft_error_limit=1001
-o smtpd_hard_error_limit=1000
-o smtpd_client_connection_count_limit=0
-o smtpd_client_connection_rate_limit=0
-o local_header_rewrite_clients=""")
with open("/etc/postfix/master.cf",
encoding='utf-8', mode='a') as am_config:
am_config.write(amavis_master)
try:
# Amavis ClamAV configuration
Log.debug(self, "Adding new user clamav amavis")
EEShellExec.cmd_exec(self, "adduser clamav amavis")
Log.debug(self, "Adding new user amavis clamav")
EEShellExec.cmd_exec(self, "adduser amavis clamav")
Log.debug(self, "Setting Privileges to /var/lib/amavis"
"/tmp")
EEFileUtils.chmod(self, "/var/lib/amavis/tmp", 0o755)
# Update ClamAV database
Log.debug(self, "Updating database")
EEShellExec.cmd_exec(self, "freshclam")
except CommandExecutionError as e:
raise SiteError(" Unable to update ClamAV-Amavis config")
EEGit.add(self, ["/etc/amavis"], msg="Adding Amavis into Git")
EEService.restart_service(self, 'dovecot')
EEService.reload_service(self, 'postfix')
EEService.restart_service(self, 'amavis')
if len(packages):
if any('/usr/bin/wp' == x[1] for x in packages):
Log.debug(self, "Setting Privileges to /usr/bin/wp file ")
EEFileUtils.chmod(self, "/usr/bin/wp", 0o775)
if any('/tmp/pma.tar.gz' == x[1]
for x in packages):
EEExtract.extract(self, '/tmp/pma.tar.gz', '/tmp/')
Log.debug(self, 'Extracting file /tmp/pma.tar.gz to '
'location /tmp/')
if not os.path.exists('{0}22222/htdocs/db'
.format(EEVariables.ee_webroot)):
Log.debug(self, "Creating new directory "
"{0}22222/htdocs/db"
.format(EEVariables.ee_webroot))
os.makedirs('{0}22222/htdocs/db'
.format(EEVariables.ee_webroot))
shutil.move('/tmp/phpmyadmin-STABLE/',
'{0}22222/htdocs/db/pma/'
.format(EEVariables.ee_webroot))
shutil.copyfile('{0}22222/htdocs/db/pma/config.sample.inc.php'
.format(EEVariables.ee_webroot),
'{0}22222/htdocs/db/pma/config.inc.php'
.format(EEVariables.ee_webroot))
Log.debug(self, 'Setting Blowfish Secret Key FOR COOKIE AUTH to '
'{0}22222/htdocs/db/pma/config.inc.php file '
.format(EEVariables.ee_webroot))
blowfish_key = ''.join([random.choice
(string.ascii_letters + string.digits)
for n in range(10)])
EEFileUtils.searchreplace(self,
'{0}22222/htdocs/db/pma/config.inc.php'
.format(EEVariables.ee_webroot),
"$cfg[\'blowfish_secret\'] = \'\';","$cfg[\'blowfish_secret\'] = \'{0}\';"
.format(blowfish_key))
Log.debug(self, 'Setting HOST Server For Mysql to '
'{0}22222/htdocs/db/pma/config.inc.php file '
.format(EEVariables.ee_webroot))
EEFileUtils.searchreplace(self,
'{0}22222/htdocs/db/pma/config.inc.php'
.format(EEVariables.ee_webroot),
"$cfg[\'Servers\'][$i][\'host\'] = \'localhost\';","$cfg[\'Servers\'][$i][\'host\'] = \'{0}\';"
.format(EEVariables.ee_mysql_host))
Log.debug(self, 'Setting Privileges of webroot permission to '
'{0}22222/htdocs/db/pma file '
.format(EEVariables.ee_webroot))
EEFileUtils.chown(self, '{0}22222'
.format(EEVariables.ee_webroot),
EEVariables.ee_php_user,
EEVariables.ee_php_user,
recursive=True)
if any('/tmp/memcache.tar.gz' == x[1]
for x in packages):
Log.debug(self, "Extracting memcache.tar.gz to location"
" {0}22222/htdocs/cache/memcache "
.format(EEVariables.ee_webroot))
EEExtract.extract(self, '/tmp/memcache.tar.gz',
'{0}22222/htdocs/cache/memcache'
.format(EEVariables.ee_webroot))
Log.debug(self, "Setting Privileges to "
"{0}22222/htdocs/cache/memcache file"
.format(EEVariables.ee_webroot))
EEFileUtils.chown(self, '{0}22222'
.format(EEVariables.ee_webroot),
EEVariables.ee_php_user,
EEVariables.ee_php_user,
recursive=True)
if any('/tmp/webgrind.tar.gz' == x[1]
for x in packages):
Log.debug(self, "Extracting file webgrind.tar.gz to "
"location /tmp/ ")
EEExtract.extract(self, '/tmp/webgrind.tar.gz', '/tmp/')
if not os.path.exists('{0}22222/htdocs/php'
.format(EEVariables.ee_webroot)):
Log.debug(self, "Creating directroy "
"{0}22222/htdocs/php"
.format(EEVariables.ee_webroot))
os.makedirs('{0}22222/htdocs/php'
.format(EEVariables.ee_webroot))
shutil.move('/tmp/webgrind-master/',
'{0}22222/htdocs/php/webgrind'
.format(EEVariables.ee_webroot))
EEFileUtils.searchreplace(self, "{0}22222/htdocs/php/webgrind/"
"config.php"
.format(EEVariables.ee_webroot),
"/usr/local/bin/dot", "/usr/bin/dot")
EEFileUtils.searchreplace(self, "{0}22222/htdocs/php/webgrind/"
"config.php"
.format(EEVariables.ee_webroot),
"Europe/Copenhagen",
EEVariables.ee_timezone)
EEFileUtils.searchreplace(self, "{0}22222/htdocs/php/webgrind/"
"config.php"
.format(EEVariables.ee_webroot),
"90", "100")
Log.debug(self, "Setting Privileges of webroot permission to "
"{0}22222/htdocs/php/webgrind/ file "
.format(EEVariables.ee_webroot))
EEFileUtils.chown(self, '{0}22222'
.format(EEVariables.ee_webroot),
EEVariables.ee_php_user,
EEVariables.ee_php_user,
recursive=True)
if any('/tmp/anemometer.tar.gz' == x[1]
for x in packages):
Log.debug(self, "Extracting file anemometer.tar.gz to "
"location /tmp/ ")
EEExtract.extract(self, '/tmp/anemometer.tar.gz', '/tmp/')
if not os.path.exists('{0}22222/htdocs/db/'
.format(EEVariables.ee_webroot)):
Log.debug(self, "Creating directory")
os.makedirs('{0}22222/htdocs/db/'
.format(EEVariables.ee_webroot))
shutil.move('/tmp/Anemometer-master',
'{0}22222/htdocs/db/anemometer'
.format(EEVariables.ee_webroot))
chars = ''.join(random.sample(string.ascii_letters, 8))
try:
EEShellExec.cmd_exec(self, 'mysql < {0}22222/htdocs/db'
'/anemometer/install.sql'
.format(EEVariables.ee_webroot))
except CommandExecutionError as e:
raise SiteError("Unable to import Anemometer database")
EEMysql.execute(self, 'grant select on *.* to \'anemometer\''
'@\'{0}\' IDENTIFIED'
' BY \'{1}\''.format(self.app.config.get('mysql',
'grant-host'),chars))
Log.debug(self, "grant all on slow-query-log.*"
" to anemometer@root_user IDENTIFIED BY password ")
EEMysql.execute(self, 'grant all on slow_query_log.* to'
'\'anemometer\'@\'{0}\' IDENTIFIED'
' BY \'{1}\''.format(self.app.config.get(
'mysql', 'grant-host'),
chars),
errormsg="cannot grant priviledges", log=False)
# Custom Anemometer configuration
Log.debug(self, "configration Anemometer")
data = dict(host=EEVariables.ee_mysql_host, port='3306',
user='anemometer', password=chars)
ee_anemometer = open('{0}22222/htdocs/db/anemometer'
'/conf/config.inc.php'
.format(EEVariables.ee_webroot),
encoding='utf-8', mode='w')
self.app.render((data), 'anemometer.mustache',
out=ee_anemometer)
ee_anemometer.close()
if any('/usr/bin/pt-query-advisor' == x[1]
for x in packages):
EEFileUtils.chmod(self, "/usr/bin/pt-query-advisor", 0o775)
if any('/tmp/vimbadmin.tar.gz' == x[1] for x in packages):
# Extract ViMbAdmin
Log.debug(self, "Extracting ViMbAdmin.tar.gz to "
"location /tmp/")
EEExtract.extract(self, '/tmp/vimbadmin.tar.gz', '/tmp/')
if not os.path.exists('{0}22222/htdocs/'
.format(EEVariables.ee_webroot)):
Log.debug(self, "Creating directory "
"{0}22222/htdocs/"
.format(EEVariables.ee_webroot))
os.makedirs('{0}22222/htdocs/'
.format(EEVariables.ee_webroot))
shutil.move('/tmp/ViMbAdmin-{0}/'
.format(EEVariables.ee_vimbadmin),
'{0}22222/htdocs/vimbadmin/'
.format(EEVariables.ee_webroot))
# Donwload composer and install ViMbAdmin
Log.debug(self, "Downloading composer "
"https://getcomposer.org/installer | php ")
try:
EEShellExec.cmd_exec(self, "cd {0}22222/htdocs"
"/vimbadmin; curl"
" -sS https://getcomposer.org/"
"installer |"
" php".format(EEVariables.ee_webroot))
Log.debug(self, "Installating of composer")
EEShellExec.cmd_exec(self, "cd {0}22222/htdocs"
"/vimbadmin && "
"php composer.phar install "
"--prefer-dist"
" --no-dev && rm -f {1}22222/htdocs"
"/vimbadmin/composer.phar"
.format(EEVariables.ee_webroot,
EEVariables.ee_webroot))
except CommandExecutionError as e:
raise SiteError("Failed to setup ViMbAdmin")
# Configure vimbadmin database
vm_passwd = ''.join(random.sample(string.ascii_letters, 8))
Log.debug(self, "Creating vimbadmin database if not exist")
EEMysql.execute(self, "create database if not exists"
" vimbadmin")
Log.debug(self, " grant all privileges on `vimbadmin`.* to"
" `vimbadmin`@`{0}` IDENTIFIED BY"
" ' '".format(self.app.config.get('mysql',
'grant-host')))
EEMysql.execute(self, "grant all privileges on `vimbadmin`.* "
" to `vimbadmin`@`{0}` IDENTIFIED BY"
" '{1}'".format(self.app.config.get('mysql',
'grant-host'), vm_passwd),
errormsg="Cannot grant "
"user privileges", log=False)
vm_salt = (''.join(random.sample(string.ascii_letters +
string.ascii_letters, 64)))
# Custom Vimbadmin configuration by EasyEngine
data = dict(salt=vm_salt, host=EEVariables.ee_mysql_host,
password=<PASSWORD>,
php_user=EEVariables.ee_php_user)
Log.debug(self, 'Writting the ViMbAdmin configuration to '
'file {0}22222/htdocs/vimbadmin/application/'
'configs/application.ini'
.format(EEVariables.ee_webroot))
ee_vmb = open('{0}22222/htdocs/vimbadmin/application/'
'configs/application.ini'
.format(EEVariables.ee_webroot),
encoding='utf-8', mode='w')
self.app.render((data), 'vimbadmin.mustache',
out=ee_vmb)
ee_vmb.close()
shutil.copyfile("{0}22222/htdocs/vimbadmin/public/"
".htaccess.dist"
.format(EEVariables.ee_webroot),
"{0}22222/htdocs/vimbadmin/public/"
".htaccess".format(EEVariables.ee_webroot))
Log.debug(self, "Executing command "
"{0}22222/htdocs/vimbadmin/bin"
"/doctrine2-cli.php orm:schema-tool:"
"create".format(EEVariables.ee_webroot))
try:
EEShellExec.cmd_exec(self, "{0}22222/htdocs/vimbadmin"
"/bin/doctrine2-cli.php "
"orm:schema-tool:create"
.format(EEVariables.ee_webroot))
except CommandExecutionError as e:
raise SiteError("Unable to create ViMbAdmin schema")
EEFileUtils.chown(self, '{0}22222'
.format(EEVariables.ee_webroot),
EEVariables.ee_php_user,
EEVariables.ee_php_user,
recursive=True)
# Copy Dovecot and Postfix templates which are depednet on
# Vimbadmin
if not os.path.exists('/etc/postfix/mysql/'):
Log.debug(self, "Creating directory "
"/etc/postfix/mysql/")
os.makedirs('/etc/postfix/mysql/')
if EEVariables.ee_mysql_host is "localhost":
data = dict(password=<PASSWORD>, host="127.0.0.1")
else:
data = dict(password=<PASSWORD>,
host=EEVariables.ee_mysql_host)
vm_config = open('/etc/postfix/mysql/virtual_alias_maps.cf',
encoding='utf-8', mode='w')
self.app.render((data), 'virtual_alias_maps.mustache',
out=vm_config)
vm_config.close()
Log.debug(self, "Writting configuration to "
"/etc/postfix/mysql"
"/virtual_domains_maps.cf file")
vm_config = open('/etc/postfix/mysql/virtual_domains_maps.cf',
encoding='utf-8', mode='w')
self.app.render((data), 'virtual_domains_maps.mustache',
out=vm_config)
vm_config.close()
Log.debug(self, "Writting configuration to "
"/etc/postfix/mysql"
"/virtual_mailbox_maps.cf file")
vm_config = open('/etc/postfix/mysql/virtual_mailbox_maps.cf',
encoding='utf-8', mode='w')
self.app.render((data), 'virtual_mailbox_maps.mustache',
out=vm_config)
vm_config.close()
Log.debug(self, "Writting configration"
" to /etc/dovecot/dovecot-sql.conf.ext file ")
vm_config = open('/etc/dovecot/dovecot-sql.conf.ext',
encoding='utf-8', mode='w')
self.app.render((data), 'dovecot-sql-conf.mustache',
out=vm_config)
vm_config.close()
# If Amavis is going to be installed then configure Vimabadmin
# Amvis settings
if set(EEVariables.ee_mailscanner).issubset(set(apt_packages)):
vm_config = open('/etc/amavis/conf.d/50-user',
encoding='utf-8', mode='w')
self.app.render((data), '50-user.mustache',
out=vm_config)
vm_config.close()
EEService.restart_service(self, 'dovecot')
EEService.reload_service(self, 'nginx')
if (EEVariables.ee_platform_distro == 'debian' or EEVariables.ee_platform_codename == 'precise'):
EEService.reload_service(self, 'php5-fpm')
else:
EEService.reload_service(self, 'php5.6-fpm')
if EEAptGet.is_installed(self, 'php7.0-fpm'):
EEService.reload_service(self, 'php7.0-fpm')
self.msg = (self.msg + ["Configure ViMbAdmin:\thttps://{0}:"
"22222/vimbadmin".format(EEVariables.ee_fqdn)]
+ ["Security Salt: {0}".format(vm_salt)])
if any('/tmp/roundcube.tar.gz' == x[1] for x in packages):
# Extract RoundCubemail
Log.debug(self, "Extracting file /tmp/roundcube.tar.gz "
"to location /tmp/ ")
EEExtract.extract(self, '/tmp/roundcube.tar.gz', '/tmp/')
if not os.path.exists('{0}roundcubemail'
.format(EEVariables.ee_webroot)):
Log.debug(self, "Creating new directory "
" {0}roundcubemail/"
.format(EEVariables.ee_webroot))
os.makedirs('{0}roundcubemail/'
.format(EEVariables.ee_webroot))
shutil.move('/tmp/roundcubemail-{0}/'
.format(EEVariables.ee_roundcube),
'{0}roundcubemail/htdocs'
.format(EEVariables.ee_webroot))
#Fix pear install config for trusty
if (EEVariables.ee_platform_codename == 'trusty' or EEVariables.ee_platform_codename == 'xenial'or EEVariables.ee_platform_codename == 'bionic'):
EEShellExec.cmd_exec(self, "pear config-set php_dir /usr/share/php")
EEShellExec.cmd_exec(self, "pear config-set doc_dir /lib/php/pear/docs")
EEShellExec.cmd_exec(self, "pear config-set cfg_dir /lib/php/pear/cfg")
EEShellExec.cmd_exec(self, "pear config-set data_dir /lib/php/pear/data")
EEShellExec.cmd_exec(self, "pear config-set test_dir /lib/php/pear/tests")
EEShellExec.cmd_exec(self, "pear config-set www_dir /lib/php/pear/www")
# Install Roundcube depednet pear packages
EEShellExec.cmd_exec(self, "pear install Mail_Mime Net_SMTP"
" Mail_mimeDecode Net_IDNA2-beta "
"Auth_SASL Net_Sieve Crypt_GPG")
# pear install Mail_Mime Net_SMTP Mail_mimeDecode Net_IDNA2-beta Auth_SASL Net_Sieve Crypt_GPG
# Configure roundcube database
rc_passwd = ''.join(random.sample(string.ascii_letters, 8))
Log.debug(self, "Creating Database roundcubemail")
EEMysql.execute(self, "create database if not exists "
" roundcubemail")
Log.debug(self, "grant all privileges"
" on `roundcubemail`.* to "
| |
<reponame>davisyoshida/finetune-transformer-lm<gh_stars>0
import os
import time
import math
import json
import joblib
import random
import argparse
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from functools import partial
from sklearn.utils import shuffle
from sklearn.metrics import accuracy_score
from .opt import adam, warmup_cosine, warmup_linear, warmup_constant
from .datasets import rocstories
from .analysis import rocstories as rocstories_analysis
from .text_utils import TextEncoder
from .utils import encode_dataset, flatten, iter_data, find_trainable_variables, convert_gradient_to_tensor, shape_list, ResultLogger, assign_to_gpu, average_grads, make_path
def gelu(x):
return 0.5*x*(1+tf.tanh(math.sqrt(2/math.pi)*(x+0.044715*tf.pow(x, 3))))
def swish(x):
return x*tf.nn.sigmoid(x)
opt_fns = {
'adam':adam,
}
act_fns = {
'relu':tf.nn.relu,
'swish':swish,
'gelu':gelu
}
lr_schedules = {
'warmup_cosine':warmup_cosine,
'warmup_linear':warmup_linear,
'warmup_constant':warmup_constant,
}
def _norm(x, g=None, b=None, e=1e-5, axis=[1]):
u = tf.reduce_mean(x, axis=axis, keep_dims=True)
s = tf.reduce_mean(tf.square(x-u), axis=axis, keep_dims=True)
x = (x - u) * tf.rsqrt(s + e)
if g is not None and b is not None:
x = x*g + b
return x
def norm(x, scope, axis=[-1]):
with tf.variable_scope(scope):
n_state = shape_list(x)[-1]
g = tf.get_variable("g", [n_state], initializer=tf.constant_initializer(1))
b = tf.get_variable("b", [n_state], initializer=tf.constant_initializer(0))
return _norm(x, g, b, axis=axis)
def dropout(x, pdrop, train):
if train and pdrop > 0:
x = tf.nn.dropout(x, 1-pdrop)
return x
def mask_attn_weights(w):
n = shape_list(w)[-1]
b = tf.matrix_band_part(tf.ones([n, n]), -1, 0)
b = tf.reshape(b, [1, 1, n, n])
w = w*b + -1e9*(1-b)
return w
def _attn(q, k, v, train=False, scale=False):
w = tf.matmul(q, k)
if scale:
n_state = shape_list(v)[-1]
w = w*tf.rsqrt(tf.cast(n_state, tf.float32))
w = mask_attn_weights(w)
w = tf.nn.softmax(w)
# w = dropout(w, attn_pdrop, train)
a = tf.matmul(w, v)
return a
def split_states(x, n):
x_shape = shape_list(x)
m = x_shape[-1]
new_x_shape = x_shape[:-1]+[n, m//n]
return tf.reshape(x, new_x_shape)
def merge_states(x):
x_shape = shape_list(x)
new_x_shape = x_shape[:-2]+[np.prod(x_shape[-2:])]
return tf.reshape(x, new_x_shape)
def split_heads(x, n, k=False):
if k:
return tf.transpose(split_states(x, n), [0, 2, 3, 1])
else:
return tf.transpose(split_states(x, n), [0, 2, 1, 3])
def merge_heads(x):
return merge_states(tf.transpose(x, [0, 2, 1, 3]))
def conv1d(x, scope, nf, rf, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), pad='VALID', train=False):
with tf.variable_scope(scope):
nx = shape_list(x)[-1]
w = tf.get_variable("w", [rf, nx, nf], initializer=w_init)
b = tf.get_variable("b", [nf], initializer=b_init)
if rf == 1: #faster 1x1 conv
c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, shape_list(x)[:-1]+[nf])
else: #was used to train LM
c = tf.nn.conv1d(x, w, stride=1, padding=pad)+b
return c
def attn(x, scope, n_state, n_head, train=False, scale=False):
assert n_state%n_head==0
with tf.variable_scope(scope):
c = conv1d(x, 'c_attn', n_state*3, 1, train=train)
q, k, v = tf.split(c, 3, 2)
q = split_heads(q, n_head)
k = split_heads(k, n_head, k=True)
v = split_heads(v, n_head)
a = _attn(q, k, v, train=train, scale=scale)
a = merge_heads(a)
a = conv1d(a, 'c_proj', n_state, 1, train=train)
# a = dropout(a, resid_pdrop, train)
return a
def mlp(x, scope, n_state, afn, train=False):
with tf.variable_scope(scope):
nx = shape_list(x)[-1]
act = act_fns[afn]
h = act(conv1d(x, 'c_fc', n_state, 1, train=train))
h2 = conv1d(h, 'c_proj', nx, 1, train=train)
# h2 = dropout(h2, resid_pdrop, train)
return h2
def block(x, scope, n_head, afn, train=False, scale=False):
with tf.variable_scope(scope):
nx = shape_list(x)[-1]
a = attn(x, 'attn', nx, n_head, train=train, scale=scale)
n = norm(x+a, 'ln_1')
m = mlp(n, 'mlp', nx*4, afn, train=train)
h = norm(n+m, 'ln_2')
return h
def embed(X, we):
we = convert_gradient_to_tensor(we)
e = tf.gather(we, X)
h = tf.reduce_sum(e, 2)
return h
def clf(x, ny, w_init=tf.random_normal_initializer(stddev=0.02), b_init=tf.constant_initializer(0), train=False):
with tf.variable_scope('clf'):
nx = shape_list(x)[-1]
w = tf.get_variable("w", [nx, ny], initializer=w_init)
b = tf.get_variable("b", [ny], initializer=b_init)
return tf.matmul(x, w)+b
def model(X, M, Y, n_vocab, n_special, n_ctx, n_embd, n_layer, n_head, afn, clf_token, train=False, reuse=False):
with tf.variable_scope('model', reuse=reuse):
we = tf.get_variable("we", [n_vocab+n_special+n_ctx, n_embd], initializer=tf.random_normal_initializer(stddev=0.02))
# we = dropout(we, embd_pdrop, train)
X = tf.reshape(X, [-1, n_ctx, 2])
M = tf.reshape(M, [-1, n_ctx])
h = embed(X, we)
for layer in range(n_layer):
h = block(h, 'h%d'%layer, n_head, afn, train=train, scale=True)
lm_h = tf.reshape(h[:, :-1], [-1, n_embd])
lm_logits = tf.matmul(lm_h, we, transpose_b=True)
lm_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=lm_logits, labels=tf.reshape(X[:, 1:, 0], [-1]))
lm_losses = tf.reshape(lm_losses, [shape_list(X)[0], shape_list(X)[1]-1])
lm_losses = tf.reduce_sum(lm_losses*M[:, 1:], 1)/tf.reduce_sum(M[:, 1:], 1)
clf_h = tf.reshape(h, [-1, n_embd])
pool_idx = tf.cast(tf.argmax(tf.cast(tf.equal(X[:, :, 0], clf_token), tf.float32), 1), tf.int32)
gather_indices = tf.range(shape_list(X)[0], dtype=tf.int32)*n_ctx+pool_idx
clf_h = tf.gather(clf_h, gather_indices)
clf_h = tf.reshape(clf_h, [-1, 2, n_embd])
if train and clf_pdrop > 0:
shape = shape_list(clf_h)
shape[1] = 1
clf_h = tf.nn.dropout(clf_h, 1-clf_pdrop, shape)
clf_h = tf.reshape(clf_h, [-1, n_embd])
clf_logits = clf(clf_h, 1, train=train)
clf_logits = tf.reshape(clf_logits, [-1, 2])
clf_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=clf_logits, labels=Y)
return clf_logits, clf_losses, clf_h
def mgpu_train(*xs):
gpu_ops = []
gpu_grads = []
xs = (tf.split(x, n_gpu, 0) for x in xs)
for i, xs in enumerate(zip(*xs)):
do_reuse = True if i > 0 else None
with tf.device(assign_to_gpu(i, "/gpu:0")), tf.variable_scope(tf.get_variable_scope(), reuse=do_reuse):
clf_logits, clf_losses, lm_losses = model(*xs, train=True, reuse=do_reuse)
if lm_coef > 0:
train_loss = tf.reduce_mean(clf_losses) + lm_coef*tf.reduce_mean(lm_losses)
else:
train_loss = tf.reduce_mean(clf_losses)
params = find_trainable_variables("model")
grads = tf.gradients(train_loss, params)
grads = list(zip(grads, params))
gpu_grads.append(grads)
gpu_ops.append([clf_logits, clf_losses, lm_losses])
ops = [tf.concat(op, 0) for op in zip(*gpu_ops)]
grads = average_grads(gpu_grads)
grads = [g for g, p in grads]
train = opt_fns[opt](params, grads, lr, partial(lr_schedules[lr_schedule], warmup=lr_warmup), n_updates_total, l2=l2, max_grad_norm=max_grad_norm, vector_l2=vector_l2, b1=b1, b2=b2, e=e)
return [train]+ops
def mgpu_predict(*xs):
gpu_ops = []
xs = (tf.split(x, n_gpu, 0) for x in xs)
for i, xs in enumerate(zip(*xs)):
with tf.device(assign_to_gpu(i, "/gpu:0")), tf.variable_scope(tf.get_variable_scope(), reuse=True):
clf_logits, clf_losses, lm_losses = model(*xs, train=False, reuse=True)
gpu_ops.append([clf_logits, clf_losses, lm_losses])
ops = [tf.concat(op, 0) for op in zip(*gpu_ops)]
return ops
def transform_roc(X1, X2, X3):
n_batch = len(X1)
xmb = np.zeros((n_batch, 2, n_ctx, 2), dtype=np.int32)
mmb = np.zeros((n_batch, 2, n_ctx), dtype=np.float32)
start = encoder['_start_']
delimiter = encoder['_delimiter_']
for i, (x1, x2, x3), in enumerate(zip(X1, X2, X3)):
x12 = [start]+x1[:max_len]+[delimiter]+x2[:max_len]+[clf_token]
x13 = [start]+x1[:max_len]+[delimiter]+x3[:max_len]+[clf_token]
l12 = len(x12)
l13 = len(x13)
xmb[i, 0, :l12, 0] = x12
xmb[i, 1, :l13, 0] = x13
mmb[i, 0, :l12] = 1
mmb[i, 1, :l13] = 1
xmb[:, :, :, 1] = np.arange(n_vocab+n_special, n_vocab+n_special+n_ctx)
return xmb, mmb
def iter_apply(Xs, Ms, Ys):
fns = [lambda x:np.concatenate(x, 0), lambda x:float(np.sum(x))]
results = []
for xmb, mmb, ymb in iter_data(Xs, Ms, Ys, n_batch=n_batch_train, truncate=False, verbose=True):
n = len(xmb)
if n == n_batch_train:
res = sess.run([eval_mgpu_logits, eval_mgpu_clf_loss], {X_train:xmb, M_train:mmb, Y_train:ymb})
else:
res = sess.run([eval_logits, eval_clf_loss], {X:xmb, M:mmb, Y:ymb})
res = [r*n for r in res]
results.append(res)
results = zip(*results)
return [fn(res) for res, fn in zip(results, fns)]
def iter_predict(Xs, Ms):
logits = []
for xmb, mmb in iter_data(Xs, Ms, n_batch=n_batch_train, truncate=False, verbose=True):
n = len(xmb)
if n == n_batch_train:
logits.append(sess.run(eval_mgpu_logits, {X_train:xmb, M_train:mmb}))
else:
logits.append(sess.run(eval_logits, {X:xmb, M:mmb}))
logits = np.concatenate(logits, 0)
return logits
def save(path):
ps = sess.run(params)
joblib.dump(ps, make_path(path))
def log():
global best_score
tr_logits, tr_cost = iter_apply(trX[:n_valid], trM[:n_valid], trY[:n_valid])
va_logits, va_cost = iter_apply(vaX, vaM, vaY)
tr_cost = tr_cost/len(trY[:n_valid])
va_cost = va_cost/n_valid
tr_acc = accuracy_score(trY[:n_valid], np.argmax(tr_logits, 1))*100.
va_acc = accuracy_score(vaY, np.argmax(va_logits, 1))*100.
logger.log(n_epochs=n_epochs, n_updates=n_updates, tr_cost=tr_cost, va_cost=va_cost, tr_acc=tr_acc, va_acc=va_acc)
print('%d %d %.3f %.3f %.2f %.2f'%(n_epochs, n_updates, tr_cost, va_cost, tr_acc, va_acc))
if submit:
score = va_acc
if score > best_score:
best_score = score
save(os.path.join(save_dir, desc, 'best_params.jl'))
argmax = lambda x:np.argmax(x, 1)
pred_fns = {
'rocstories':argmax,
}
filenames = {
'rocstories':'ROCStories.tsv',
}
label_decoders = {
'rocstories':None,
}
def predict():
filename = filenames[dataset]
pred_fn = pred_fns[dataset]
label_decoder = label_decoders[dataset]
predictions = pred_fn(iter_predict(teX, teM))
if label_decoder is not None:
predictions = [label_decoder[prediction] for prediction in predictions]
path = os.path.join(submission_dir, filename)
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as f:
f.write('{}\t{}\n'.format('index', 'prediction'))
for i, prediction in enumerate(predictions):
f.write('{}\t{}\n'.format(i, prediction))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--desc', type=str)
parser.add_argument('--dataset', type=str)
parser.add_argument('--log_dir', type=str, default='log/')
parser.add_argument('--save_dir', type=str, default='save/')
parser.add_argument('--data_dir', type=str, default='data/')
parser.add_argument('--submission_dir', type=str, default='submission/')
parser.add_argument('--submit', action='store_true')
parser.add_argument('--analysis', action='store_true')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--n_iter', type=int, default=3)
parser.add_argument('--n_batch', type=int, default=8)
parser.add_argument('--max_grad_norm', type=int, default=1)
parser.add_argument('--lr', type=float, default=6.25e-5)
parser.add_argument('--lr_warmup', type=float, default=0.002)
parser.add_argument('--n_ctx', type=int, default=512)
parser.add_argument('--n_embd', type=int, default=768)
parser.add_argument('--n_head', type=int, default=12)
parser.add_argument('--n_layer', type=int, default=12)
parser.add_argument('--embd_pdrop', type=float, default=0.1)
parser.add_argument('--attn_pdrop', type=float, default=0.1)
parser.add_argument('--resid_pdrop', type=float, default=0.1)
parser.add_argument('--clf_pdrop', type=float, default=0.1)
parser.add_argument('--l2', type=float, default=0.01)
parser.add_argument('--vector_l2', action='store_true')
parser.add_argument('--n_gpu', type=int, default=4)
parser.add_argument('--opt', type=str, default='adam')
parser.add_argument('--afn', type=str, default='gelu')
parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
parser.add_argument('--encoder_path', type=str, default='model/encoder_bpe_40000.json')
parser.add_argument('--bpe_path', type=str, default='model/vocab_40000.bpe')
parser.add_argument('--n_transfer', type=int, default=12)
parser.add_argument('--lm_coef', type=float, default=0.5)
parser.add_argument('--b1', type=float, default=0.9)
parser.add_argument('--b2', type=float, default=0.999)
parser.add_argument('--e', type=float, default=1e-8)
args = parser.parse_args()
print(args)
globals().update(args.__dict__)
random.seed(seed)
np.random.seed(seed)
tf.set_random_seed(seed)
logger = ResultLogger(path=os.path.join(log_dir, '{}.jsonl'.format(desc)), **args.__dict__)
text_encoder = TextEncoder(encoder_path, bpe_path)
encoder = text_encoder.encoder
n_vocab = len(text_encoder.encoder)
(trX1, trX2, trX3, trY), (vaX1, vaX2, vaX3, vaY), (teX1, teX2, teX3) = encode_dataset(rocstories(data_dir), encoder=text_encoder)
n_y = 2
encoder['_start_'] = len(encoder)
encoder['_delimiter_'] = len(encoder)
encoder['_classify_'] = len(encoder)
clf_token = encoder['_classify_']
n_special = 3
max_len = n_ctx//2-2
n_ctx = min(max([len(x1[:max_len])+max(len(x2[:max_len]), len(x3[:max_len])) for x1, x2, x3 in zip(trX1, trX2, trX3)]+[len(x1[:max_len])+max(len(x2[:max_len]), len(x3[:max_len])) for x1, x2, x3 | |
'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = BrushPanel.bl_idname
def draw(self, context):
layout = self.layout
column = layout.column()
column.operator("brush.draw_brush_template_settings_1", text = "Preset 1")
column.operator("brush.draw_brush_template_settings_2", text = "Preset 2")
class WeightsPanel(Panel):
bl_label = "Weights"
bl_idname = "OBJECT_PT_AUT_RIG_SK_WEIGHTS_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = RiggingSkinningPanel.bl_idname
def draw(self, context):
layout = self.layout
layout.prop(bpy.context.scene, 'vertex_weight_input', slider = True)
column = layout.column(align=True)
split1 = column.split(factor=0.33, align=True)
split1.operator("object.fill_active_vg", text = "Add").mode = 'ADD'
split1.operator("object.fill_active_vg", text = "Subtract").mode = 'SUBTRACT'
split1.operator("object.fill_active_vg", text = "Replace").mode = 'REPLACE'
#column = layout.column(align=True)
#split1 = column.split(factor=0.5, align=True)
#split1.operator("object.shift_weights", text = "Increase").action = True
#split1.operator("object.shift_weights", text = "Decrease").action = False
#split2 = column.split(factor=0.33, align=True)
#split2.operator("object.fill_active_vg", text = "Sel").only_selected = True
#split2.operator("object.fill_active_vg", text = "Act").only_selected = False
#split2.operator("object.fill_all_vg", text = "All")
column.operator("object.clamp_near_zero_values", text = "Clamp")
class NormalsPanel(Panel):
bl_label = "Normals"
bl_idname = "OBJECT_PT_AUT_NORMALS_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = GeometryPanel.bl_idname
def draw(self, context):
layout = self.layout
column = layout.column(align=True)
column.operator("mesh.customdata_custom_splitnormals_clear", text = "Split")
column.operator("object.reset_normals_object", text = "Reset")
class TransformPanel(Panel):
bl_label = "Transform"
bl_idname = "OBJECT_PT_TRANSFORM_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = GeometryPanel.bl_idname
def draw(self, context):
layout = self.layout
column = layout.column(align=True)
column.operator("object.move_to_scene_center", text = "Move to Origin")
class ModesPanel(Panel):
bl_label = "Modes"
bl_idname = "OBJECT_PT_AUT_RIG_SK_MODES_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
column = layout.column(align=True)
split = column.split(factor=0.5, align=True)
split.operator("object.object_edit_mode_on", text = "Object", icon='OBJECT_DATA').mode='OBJECT'
split.operator("object.object_edit_mode_on", text = "Edit", icon='EDITMODE_HLT').mode='EDIT'
column.operator("object.weight_paint_mode_on", icon='MOD_VERTEX_WEIGHT', text = "Weight")
column.operator("object.pose_mode_on", icon='POSE_HLT', text = "Pose")
column.operator("object.toggle_weight_pose_modes", text = "Weight / Pose", icon='ARROW_LEFTRIGHT')
class ShadingPanel(Panel):
bl_label = "Shading"
bl_idname = "OBJECT_PT_AUT_RIG_SK_SHADING_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = ModelingPanel.bl_idname
def draw(self, context):
layout = self.layout
column = layout.column()
column.operator("view3d.toggle_carpaint", text = "Car Paint / Basic")
class CurvesPanel(Panel):
bl_label = "Curves"
bl_idname = "OBJECT_PT_AUT_RIG_SK_CURVES_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = ModelingPanel.bl_idname
def draw(self, context):
layout = self.layout
column = layout.column(align=True)
column.prop(bpy.context.scene, "curve_type")
column.separator(factor=1.0)
column.operator("object.curve_between_2_objects", text = "Vertices / Objects > Curve")
column.operator("object.edge_to_curve", text = "Edges > Curve")
class TriangulationPanel(Panel):
bl_label = "Triangulation"
bl_idname = "OBJECT_PT_AUT_TRIANGULATION_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = GeometryPanel.bl_idname
def draw(self, context):
layout = self.layout
column = layout.column(align=True)
column.operator("mesh.rotate_edge_triangulation_quads", text = 'Rotate Edges Beauty').quad_method = 'BEAUTY'
column.operator("mesh.rotate_edge_triangulation_quads", text = 'Rotate Edges Fixed').quad_method = 'FIXED_ALTERNATE'
column.separator()
column.operator("OBJECT_OT_at_wiki", text = "Help", icon = "HELP").tool = 'fix_triangulation'
class ModifiersPanel(Panel):
bl_label = "Modifiers"
bl_idname = "OBJECT_PT_AUT_RIG_SK_MODIFIERS_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = ModelingPanel.bl_idname
def draw(self, context):
layout = self.layout
column = layout.column(align=True)
column.operator("view3d.toggle_all_modifiers_visibility", text = "Toggle All")
column.operator("object.transfer_modifiers", text = "Transfer")
column.operator("view3d.copy_apply_modifier", text = "Copy and Apply")
column.operator("object.apply_modifiers_with_shape_keys", text = 'Apply [with Shape Keys]')
column.separator()
column.operator("OBJECT_OT_at_wiki", text = "Help", icon = "HELP").tool = 'modifiers'
class StandardBatchExportPanel(Panel):
bl_label = "Batch Export"
bl_idname = "OBJECT_PT_Standard_Batch_Export_Panel"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = ExportPanel.bl_idname
def draw(self, context):
layout = self.layout
column = layout.column(align=True)
column.operator("object.standard_batch_export", text = "Export")
class BodyExportPanel(Panel):
bl_label = "Body"
bl_idname = "OBJECT_PT_AUTOMATION_TOOLS_BODY_EXPORT_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = ExportPanel.bl_idname
def draw(self, context):
layout = self.layout
column = layout.column(align=True)
column.operator(BodyExport.bl_idname, text = "One Collection")
column.operator(BodiesBatchExport.bl_idname, text = "All Collections")
column.prop(bpy.context.scene, "if_apply_modifiers")
column.prop(bpy.context.scene, "debug_mode")
class RimExportPanel(Panel):
bl_label = "Rim"
bl_idname = "OBJECT_PT_AUTOMATION_TOOLS_RIM_EXPORT_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = ExportPanel.bl_idname
def draw(self, context):
layout = self.layout
column = layout.column(align=True)
column.operator("object.rim_export", text = "One Collection")
column.operator("object.rim_batch_export", text = "All Collections")
column.prop(bpy.context.scene, "debug_mode")
class HierarchyExportPanel(Panel):
bl_label = "Hierarchy"
bl_idname = "OBJECT_PT_AUTOMATION_TOOLS_EXPORT_PANEL_2"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = ExportPanel.bl_idname
def draw(self, context):
layout = self.layout
column = layout.column(align=True)
split = column.split(factor=0.75, align=True)
split.prop(bpy.context.scene, "hierarchy_list")
split.operator("object.get_selected_objects_names", text = "Add")
column.operator("object.fast_auto_fbx_export", text = "Export")
column.prop(bpy.context.scene, "if_lods")
class FixturesExportPanel(Panel):
bl_label = "Fixtures"
bl_idname = "OBJECT_PT_AUTOMATION_TOOLS_EXPORT_PANEL_3"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = ExportPanel.bl_idname
def draw(self, context):
layout = self.layout
column = layout.column(align=True)
column.operator("object.selected_fixtures_batch_export", text = "Selected Meshes")
column.operator("object.fixture_export", text = "Active Collection")
column.operator("object.fixtures_batch_export", text = "Batch Collections")
class OptionsPanel(Panel):
bl_label = "Options"
bl_idname = "OBJECT_PT_AUT_RIG_SK_OPTIONS_PANEL"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Automation Tools"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = RiggingSkinningPanel.bl_idname
def draw(self, context):
layout = self.layout
column = layout.column(align=True)
column.label(text = 'Vertex Groups:')
column.prop(bpy.context.scene, 'select_all_vg_vertices')
column.prop(bpy.context.scene, 'lock_all_unused_vgs')
column.prop(bpy.context.scene, 'auto_add_vertex_group')
# Menus
class GenerateRigMenu(Menu):
bl_label = "Add Bone"
bl_idname = "OBJECT_MT_generate_rig_menu"
bl_description = "Add Car Body Bones"
def draw(self, context):
layout = self.layout
layout.operator("object.generate_rig", text = "New Bone").name= 'New_Bone'
layout.menu("OBJECT_MT_fender_sub_menu", text = "Fender")
layout.menu("OBJECT_MT_quarter_sub_menu", text = "Quarter")
layout.menu("OBJECT_MT_side_sub_menu", text = "Side")
layout.menu("OBJECT_MT_rocker_sub_menu", text = "Rocker")
cabin_wigth = layout.operator("object.generate_rig", text = "Cabin Width")
cabin_wigth.name= 'R_CabinWidth'
layout.menu("OBJECT_MT_front_sub_menu", text = "Front")
layout.menu("OBJECT_MT_rear_sub_menu", text = "Rear")
layout.menu("OBJECT_MT_pillar_sub_menu", text = "Pillar")
layout.menu("OBJECT_MT_roof_sub_menu", text = "Roof")
layout.menu("OBJECT_MT_cargo_sub_menu", text = "Cargo")
layout.menu("OBJECT_MT_hood_sub_menu", text = "Hood")
layout.menu("OBJECT_MT_boot_sub_menu", text = "Boot")
layout.menu("OBJECT_MT_front_side_sub_menu", text = "Front Side")
layout.menu("OBJECT_MT_rear_side_sub_menu", text = "Rear Side")
layout.menu("OBJECT_MT_front_bumper_sub_menu", text = "Front Bumper")
layout.menu("OBJECT_MT_rear_bumper_sub_menu", text = "Rear_Bumper")
bed_hight = layout.operator("object.generate_rig", text = "Bed Height")
bed_hight.name= 'Bed_Height'
layout.menu("OBJECT_MT_wheel_well_sub_menu", text = "Wheel Well")
class FenderSubMenu(Menu):
bl_label = "Fender Menu"
bl_idname = "OBJECT_MT_fender_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Fender")
bone1.name='R_Fender'
bone1.symmetry=True
bone2 = layout.operator("object.generate_rig", text = "Lip")
bone2.name='R_FenderLip'
bone2.symmetry=True
bone3 = layout.operator("object.generate_rig", text = "Height")
bone3.name='R_FenderHeight'
bone3.symmetry=True
bone4 = layout.operator("object.generate_rig", text = "Arch Size")
bone4.name='Fender_Arch_Size'
bone4.symmetry=False
class QuarterSubMenu(Menu):
bl_label = "Quarter Menu"
bl_idname = "OBJECT_MT_quarter_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Quarter")
bone1.name='R_Quarter'
bone1.symmetry=True
bone2 = layout.operator("object.generate_rig", text = "Lip")
bone2.name='R_QuarterLip'
bone2.symmetry=True
bone3 = layout.operator("object.generate_rig", text = "Height")
bone3.name='R_QuarterHeight'
bone3.symmetry=True
bone4 = layout.operator("object.generate_rig", text = "Arch Size")
bone4.name='Quarter_Arch_Size'
bone4.symmetry=False
class SideSubMenu(Menu):
bl_label = "Side Menu"
bl_idname = "OBJECT_MT_side_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Side")
bone1.name='R_Side'
bone1.symmetry=True
bone2 = layout.operator("object.generate_rig", text = "Detail 1")
bone2.name='R_SideDetail1'
bone2.symmetry=True
bone3 = layout.operator("object.generate_rig", text = "Detail 2")
bone3.name='R_SideDetail2'
bone3.symmetry=True
bone4 = layout.operator("object.generate_rig", text = "Detail 3")
bone4.name='R_SideDetail3'
bone4.symmetry=True
class RockerSubMenu(Menu):
bl_label = "Rocker Menu"
bl_idname = "OBJECT_MT_rocker_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Panel")
bone1.name='R_RockerPanel'
bone1.symmetry=True
bone2 = layout.operator("object.generate_rig", text = "Detail")
bone2.name='R_RockerDetail'
bone2.symmetry=True
class FrontSubMenu(Menu):
bl_label = "Front Menu"
bl_idname = "OBJECT_MT_front_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Length")
bone1.name='Front_Length'
bone1.symmetry=False
bone2 = layout.operator("object.generate_rig", text = "Angle")
bone2.name='R_FrontAngle'
bone2.symmetry=True
bone3 = layout.operator("object.generate_rig", text = "Detail 1")
bone3.name='Front_Detail_1'
bone3.symmetry=False
bone4 = layout.operator("object.generate_rig", text = "Detail 2")
bone4.name='Front_Detail_2'
bone4.symmetry=False
bone5 = layout.operator("object.generate_rig", text = "Detail 3")
bone5.name='Front_Detail_3'
bone5.symmetry=False
class RearSubMenu(Menu):
bl_label = "Rear Menu"
bl_idname = "OBJECT_MT_rear_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Length")
bone1.name='Rear_Length'
bone1.symmetry=False
bone2 = layout.operator("object.generate_rig", text = "Angle")
bone2.name='R_RearAngle'
bone2.symmetry=True
bone3 = layout.operator("object.generate_rig", text = "Detail 1")
bone3.name='Rear_Detail_1'
bone3.symmetry=False
bone4 = layout.operator("object.generate_rig", text = "Detail 2")
bone4.name='Rear_Detail_2'
bone4.symmetry=False
bone5 = layout.operator("object.generate_rig", text = "Detail 3")
bone5.name='Rear_Detail_3'
bone5.symmetry=False
class PillarSubMenu(Menu):
bl_label = "Pillar Menu"
bl_idname = "OBJECT_MT_pillar_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "A Pillar")
bone1.name='A_Pillar'
bone1.symmetry=False
bone2 = layout.operator("object.generate_rig", text = "A Adjustment")
bone2.name='A_Pillar_Adjustment'
bone2.symmetry=False
bone3 = layout.operator("object.generate_rig", text = "A Upper")
bone3.name='A_Pillar_Upper'
bone3.symmetry=False
bone4 = layout.operator("object.generate_rig", text = "A Mid")
bone4.name='A_Pillar_Mid'
bone4.symmetry=False
bone5 = layout.operator("object.generate_rig", text = "A Lower")
bone5.name='A_Pillar_Lower'
bone5.symmetry=False
bone6 = layout.operator("object.generate_rig", text = "B Pillar")
bone6.name='B_Pillar'
bone6.symmetry=False
bone7 = layout.operator("object.generate_rig", text = "B Adjustment")
bone7.name='B_Pillar_Adjustment'
bone7.symmetry=False
bone8 = layout.operator("object.generate_rig", text = "B Upper")
bone8.name='B_Pillar_Upper'
bone8.symmetry=False
bone9 = layout.operator("object.generate_rig", text = "B Mid")
bone9.name='B_Pillar_Mid'
bone9.symmetry=False
bone10 = layout.operator("object.generate_rig", text = "B Lower")
bone10.name='B_Pillar_Lower'
bone10.symmetry=False
bone11 = layout.operator("object.generate_rig", text = "C Pillar")
bone11.name='C_Pillar'
bone11.symmetry=False
bone12 = layout.operator("object.generate_rig", text = "C Adjustment")
bone12.name='C_Pillar_Adjustment'
bone12.symmetry=False
bone13 = layout.operator("object.generate_rig", text = "C Upper")
bone13.name='C_Pillar_Upper'
bone13.symmetry=False
bone14 = layout.operator("object.generate_rig", text = "C Mid")
bone14.name='C_Pillar_Mid'
bone14.symmetry=False
bone15 = layout.operator("object.generate_rig", text = "C Lower")
bone15.name='C_Pillar_Lower'
bone15.symmetry=False
bone16 = layout.operator("object.generate_rig", text = "D Pillar")
bone16.name='D_Pillar'
bone16.symmetry=False
bone17 = layout.operator("object.generate_rig", text = "D Adjustment")
bone17.name='D_Pillar_Adjustment'
bone17.symmetry=False
bone18 = layout.operator("object.generate_rig", text = "D Upper")
bone18.name='D_Pillar_Upper'
bone18.symmetry=False
bone19 = layout.operator("object.generate_rig", text = "D Mid")
bone19.name='D_Pillar_Mid'
bone19.symmetry=False
bone20 = layout.operator("object.generate_rig", text = "D Lower")
bone20.name='D_Pillar_Lower'
bone20.symmetry=False
class RoofSubMenu(Menu):
bl_label = "Roof Menu"
bl_idname = "OBJECT_MT_roof_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Height")
bone1.name='Roof_Height'
bone1.symmetry=False
bone2 = layout.operator("object.generate_rig", text = "Detail")
bone2.name='Roof_Detail'
bone2.symmetry=False
class CargoSubMenu(Menu):
bl_label = "Roof Menu"
bl_idname = "OBJECT_MT_cargo_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Height")
bone1.name='Cargo_Height'
bone1.symmetry=False
bone2 = layout.operator("object.generate_rig", text = "Detail")
bone2.name='Cargo_Detail'
bone2.symmetry=False
class HoodSubMenu(Menu):
bl_label = "Hood Menu"
bl_idname = "OBJECT_MT_hood_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Angle")
bone1.name='R_HoodAngle'
bone1.symmetry=True
bone2 = layout.operator("object.generate_rig", text = "Slant")
bone2.name='Hood_Slant'
bone1.symmetry=False
bone3 = layout.operator("object.generate_rig", text = "Detail 1")
bone3.name='Hood_Detail_1'
bone2.symmetry=False
bone4 = layout.operator("object.generate_rig", text = "Detail 2")
bone4.name='Hood_Detail_2'
bone4.symmetry=False
bone5 = layout.operator("object.generate_rig", text = "Detail 3")
bone5.name='Hood_Detail_3'
bone5.symmetry=False
class BootSubMenu(Menu):
bl_label = "Boot Menu"
bl_idname = "OBJECT_MT_boot_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Angle")
bone1.name='R_BootAngle'
bone1.symmetry=True
bone2 = layout.operator("object.generate_rig", text = "Slant")
bone2.name='Boot_Slant'
bone2.symmetry=False
bone3 = layout.operator("object.generate_rig", text = "Detail 1")
bone3.name='Boot_Detail_1'
bone3.symmetry=False
bone4 = layout.operator("object.generate_rig", text = "Detail 2")
bone4.name='Boot_Detail_2'
bone4.symmetry=False
bone5 = layout.operator("object.generate_rig", text = "Detail 3")
bone5.name='Boot_Detail_3'
bone5.symmetry=False
class FrontBumperSubMenu(Menu):
bl_label = "Front Bumper Menu"
bl_idname = "OBJECT_MT_front_bumper_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Front")
bone1.name='Front_Bumper'
bone1.symmetry=False
bone2 = layout.operator("object.generate_rig", text = "Lip")
bone2.name='Front_Bumper_Lip'
bone2.symmetry=False
bone3 = layout.operator("object.generate_rig", text = "Upper")
bone3.name='Front_Bumper_Upper'
bone3.symmetry=False
bone4 = layout.operator("object.generate_rig", text = "Lower")
bone4.name='Front_Bumper_Lower'
bone4.symmetry=False
bone5 = layout.operator("object.generate_rig", text = "Detail 1")
bone5.name='Front_Bumper_Detail_1'
bone5.symmetry=False
bone6 = layout.operator("object.generate_rig", text = "Detail 2")
bone6.name='Front_Bumper_Detail_2'
bone6.symmetry=False
bone7 = layout.operator("object.generate_rig", text = "Detail 3")
bone7.name='Front_Bumper_Detail_3'
bone7.symmetry=False
class RearBumperSubMenu(Menu):
bl_label = "Rear Bumper Menu"
bl_idname = "OBJECT_MT_rear_bumper_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Rear")
bone1.name='Rear_Bumper'
bone1.symmetry=False
bone2 = layout.operator("object.generate_rig", text = "Lip")
bone2.name='Rear_Bumper_Lip'
bone2.symmetry=False
bone3 = layout.operator("object.generate_rig", text = "Upper")
bone3.name='Rear_Bumper_Upper'
bone3.symmetry=False
bone4 = layout.operator("object.generate_rig", text = "Lower")
bone4.name='Rear_Bumper_Lower'
bone4.symmetry=False
bone5 = layout.operator("object.generate_rig", text = "Detail 1")
bone5.name='RearBumper_Detail_1'
bone5.symmetry=False
bone6 = layout.operator("object.generate_rig", text = "Detail 2")
bone6.name='Rear_Bumper_Detail_2'
bone6.symmetry=False
bone7 = layout.operator("object.generate_rig", text = "Detail 3")
bone7.name='Rear_Bumper_Detail_3'
bone7.symmetry=False
class FrontSideSubMenu(Menu):
bl_label = "Front Side Menu"
bl_idname = "OBJECT_MT_front_side_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Front Side 1")
bone1.name='R_FrontSide1'
bone1.symmetry=True
bone2 = layout.operator("object.generate_rig", text = "Front Side 2")
bone2.name='R_FrontSide2'
bone2.symmetry=True
class RearSideSubMenu(Menu):
bl_label = "Rear Side Menu"
bl_idname = "OBJECT_MT_rear_side_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Rear Side 1")
bone1.name='R_RearSide1'
bone1.symmetry=True
bone2 = layout.operator("object.generate_rig", text = "Rear Side 2")
bone2.name='R_RearSide2'
bone2.symmetry=True
class WheelWellSubMenu(Menu):
bl_label = "Rear Side Menu"
bl_idname = "OBJECT_MT_wheel_well_sub_menu"
def draw(self, context):
layout = self.layout
bone1 = layout.operator("object.generate_rig", text = "Width")
bone1.name='R_Wheel_Well_Width'
bone1.symmetry=True
bone2 = layout.operator("object.generate_rig", text = "Position")
bone2.name='Wheel_Well_Position'
bone2.symmetry=False
class SelectBonesMenu(Menu):
bl_idname = "OBJECT_MT_select_bones_menu"
bl_label = "Select Bones"
bl_description = "Select bones menu"
def draw(self, context):
if context.object is not None:
if get_bones(self) is not None:
layout = self.layout
bones = get_bones(self)
if len(bones) > 1:
for i in bones:
layout.operator("armature.select_bones_and_mode", text = i.name).name= i.name
class PIE_MT_RigSk_tools(Menu):
bl_label = "Brushes"
def draw(self, context):
layout = self.layout
pie = layout.menu_pie()
pie.operator("brush.draw_brush_blend_toggle", text = | |
94))
night1 = Shift(
"Night_1", "N1", datetime.time(21, 15), 12,
roles=["Night", "Night_1"],
resident=True, shift_type=SHIFT_TYPES.FULL, rgb=(146, 208, 80))
night2 = Shift(
"Night_2", "N2", datetime.time(21, 15), 12,
roles=["Night", "Night_2"],
resident=True, shift_type=SHIFT_TYPES.FULL, rgb=(79, 98, 40))
off = Shift(
"Off", "OFF", datetime.time(9, 15), 7.75, work=False)
shifts = [nwd, late1, late2, late3, night1, night2, off]
# Doctors
# ... so confusing to edit them manually! Just a list
basedoc = Doctor("BASEDOC", [
night1, night1, off, nwd, nwd, # Mon/Tue missing
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, late1, late1, late1,
off, nwd, nwd, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, off, night2, night2, night2,
off, off, nwd, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
late2, late2, late2, late2, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, late3, late3, late3,
off, nwd, nwd, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
late1, late1, late1, late1, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
late3, late3, late3, late3, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
night2, night2, night2, night2, off, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, late2, late2, late2,
off, nwd, nwd, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
night1, night1
], leave_weeks_per_year=0)
doctors = [basedoc] # so we can operate with 1-indexing for a while
for i in range(1, 26 + 1):
if i in [1, 2, 8, 9, 10, 11, 12, 13, 14, 21, 25, 26]:
name = str(i) + "_SpR"
ooh_roles = ["Section 12 approved"]
leave_weeks_per_year = 6
else:
name = str(i) + "_SHO"
ooh_roles = []
leave_weeks_per_year = 5
name
d = basedoc.copy(name=name, rotate=(i - 1) * 7)
d.ooh_roles = ooh_roles
d.leave_weeks_per_year = leave_weeks_per_year
doctors.append(d)
# Now the deviations from a simple pattern
NWD = [nwd]
# LATE1_MTWT = [late1, late1, late1, late1]
LATE1_FSS = [late1, late1, late1, off]
LATE2_MTWT = [late2, late2, late2, late2]
LATE2_FSS = [late2, late2, late2, off]
LATE3_MTWT = [late3, late3, late3, late3]
LATE3_FSS = [late3, late3, late3, off]
NIGHT1_MTWT = [night1, night1, night1, night1, off]
NIGHT1_FSS = [off, night1, night1, night1, off, off]
# NIGHT2_MTWT = [night2, night2, night2, night2, off]
NIGHT2_FSS = [off, night2, night2, night2, off, off]
doctors[1].set_at(135, NIGHT1_FSS)
doctors[2].set_at(142, NIGHT1_FSS)
doctors[3].set_at(121, NIGHT1_FSS)
doctors[4].set_at(58, NWD * 6) # remove NIGHT2_FSS
doctors[4].set_at(128, NIGHT1_FSS)
doctors[4].set_at(177, NIGHT1_FSS) # two NIGHT1_FSS shifts
doctors[5].set_at(107, NIGHT1_FSS)
doctors[6].set_at(90, NWD * 4) # remove LATE2_MTWT
doctors[6].set_at(97, LATE2_MTWT)
doctors[6].set_at(114, NIGHT1_FSS)
doctors[7].set_at(79, NWD * 6) # remove NIGHT2_FSS
doctors[7].set_at(90, LATE2_MTWT)
doctors[7].set_at(97, NWD * 4) # remove LATE2_MTWT
doctors[7].set_at(100, NIGHT1_FSS)
doctors[8].set_at(156, NIGHT1_FSS)
doctors[9].set_at(79, NIGHT2_FSS)
doctors[9].set_at(93, NWD * 6) # remove NIGHT2_FSS
doctors[9].set_at(163, NIGHT1_FSS)
doctors[10].set_at(45, NWD * 4) # remove LATE2_FSS
doctors[10].set_at(52, LATE2_FSS)
doctors[10].set_at(170, NIGHT1_FSS)
doctors[11].set_at(45, LATE2_FSS)
doctors[11].set_at(52, NWD * 4) # remove LATE2_FSS
# no NIGHT1_FSS for doctor 11
doctors[11].set_at(58, NIGHT2_FSS) # but one of these
doctors[12].set_at(2, NIGHT1_FSS)
doctors[13].set_at(9, NIGHT1_FSS)
doctors[14].set_at(16, NIGHT1_FSS)
doctors[15].set_at(23, NIGHT1_FSS)
doctors[15].set_at(41, NWD * 4) # remove LATE3_MTWT
doctors[15].set_at(48, LATE3_MTWT)
doctors[15].set_at(143, LATE2_FSS)
doctors[16].set_at(30, NIGHT1_FSS)
doctors[16].set_at(41, LATE3_MTWT)
doctors[16].set_at(48, NWD * 4) # remove LATE3_MTWT
doctors[17].set_at(37, NIGHT1_FSS)
doctors[18].set_at(44, NIGHT1_FSS)
doctors[18].set_at(10, NWD * 4) # remove LATE3_FSS
doctors[18].set_at(17, LATE3_FSS)
doctors[18].set_at(10, LATE3_FSS)
doctors[18].set_at(17, NWD * 4) # remove LATE3_FSS
doctors[19].set_at(51, NIGHT1_FSS)
doctors[20].set_at(58, NIGHT1_FSS)
doctors[21].set_at(65, NIGHT1_FSS)
doctors[21].set_at(139, NWD * 6) # remove NIGHT1_MTWT
doctors[21].set_at(150, LATE2_FSS)
doctors[21].set_at(157, NWD * 4) # remove LATE1_FSS
doctors[21].set_at(160, NIGHT1_MTWT)
doctors[22].set_at(72, NIGHT1_FSS)
doctors[23].set_at(79, NIGHT1_FSS)
doctors[23].set_at(97, NWD * 4) # remove LATE3_MTWT, *then*...
doctors[23].set_at(93, NIGHT2_FSS)
doctors[23].set_at(104, LATE3_MTWT)
doctors[24].set_at(86, NIGHT1_FSS)
doctors[24].set_at(97, LATE3_MTWT)
doctors[24].set_at(104, NWD * 4) # remove LATE3_MTWT
doctors[24].set_at(143, NWD * 4) # remove LATE2_FSS, *then*...
doctors[24].set_at(139, NIGHT1_MTWT)
doctors[24].set_at(160, NWD * 4) # remove NIGHT1_MTWT
doctors[25].set_at(93, NIGHT1_FSS)
doctors[25].set_at(150, NWD * 4) # remove LATE2_FSS
doctors[25].set_at(157, LATE1_FSS)
doctors[26].set_at(149, NIGHT1_FSS)
# logger.info("\n" + doctors[23].get_quick_pattern_string())
# Remove the basedoc (only present for temporary 1-based indexing!)
doctors = doctors[1:]
# Rota
logger.warning("Overriding specified start/end date")
return Rota(
"CPFT Aug 2015 South", shifts, doctors,
start_date=datetime.date(2015, 8, 5),
end_date=datetime.date(2016, 2, 2),
doctor_patterns_weekday_based=False,
nwd_shifts=[nwd],
comments=[
"<b>Author:</b> CPFT Medical Staffing, Aug 2015.",
"<b>Banding:</b> Band 1B (40%).",
"<b>Supplement cost:</b> 14 × 0.4 = 5.6 SHOs + 12 × 0.4 = 4.8 "
"SpRs, or approx. £360,000 pa.",
],
)
def cpft_actual_aug2015_north(**kwargs):
"""CPFT North rota, Aug 2015 - Feb 2016."""
# Shifts
# ... colours to match the original
nwd = Shift(
"Normal_working_day", "nwd", datetime.time(9), 8, nwd_only=True,
resident=True, rgb=(217, 150, 148))
late1 = Shift(
"Late_1", "L1", datetime.time(9), 12.5,
roles=["Late", "Late_1"],
resident=True, shift_type=SHIFT_TYPES.FULL, rgb=(142, 180, 227))
late2 = Shift(
"Late_2", "L2", datetime.time(9), 12.5,
roles=["Late", "Late_2"],
resident=True, shift_type=SHIFT_TYPES.FULL, rgb=(0, 176, 240))
late3 = Shift(
"Late_3", "L3", datetime.time(9), 12.5,
roles=["Late", "Late_3"],
resident=True, shift_type=SHIFT_TYPES.FULL, rgb=(23, 55, 94))
night1 = Shift(
"Night_1", "N1", datetime.time(21, 15), 12,
roles=["Night", "Night_1"],
resident=True, shift_type=SHIFT_TYPES.FULL, rgb=(146, 208, 80))
night2 = Shift(
"Night_2", "N2", datetime.time(21, 15), 12,
roles=["Night", "Night_2"],
resident=True, shift_type=SHIFT_TYPES.FULL, rgb=(79, 98, 40))
off = Shift(
"Off", "OFF", datetime.time(9, 15), 7.75, work=False)
shifts = [nwd, late1, late2, late3, night1, night2, off]
# Doctors
# ... so confusing to edit them manually! Just a list
basedoc = Doctor("BASEDOC", [
night1, night1, night1, night1, off, nwd, nwd,
nwd, nwd, nwd, nwd, late3, late3, late3,
off, nwd, nwd, nwd, nwd, nwd, nwd,
late1, late1, late1, late1, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, off, night2, night2, night2,
off, off, off, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, late1, late1, late1,
off, nwd, nwd, nwd, nwd, nwd, nwd,
night2, night2, night2, night2, off, nwd, nwd,
nwd, nwd, nwd, nwd, late2, late2, late2,
off, nwd, nwd, nwd, nwd, nwd, nwd,
late3, late3, late3, late3, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
nwd, nwd, nwd, off, night1, night1, night1,
off, off, off, nwd, nwd, nwd, nwd,
late2, late2, late2, late2, nwd, nwd, nwd,
nwd, nwd, nwd, nwd, nwd, nwd, nwd,
], leave_weeks_per_year=0)
basedoc.rotate(-2) # move from a Monday to a Wednesday start
doctors = [basedoc] # so we can operate with 1-indexing for a while
for i in range(1, 18 + 1):
if i in [2, 3, 9, 10, 15, 17]:
name = str(i) + "_SpR"
ooh_roles = ["Section 12 approved"]
leave_weeks_per_year = 6
else:
name = str(i) + "_SHO"
ooh_roles = []
leave_weeks_per_year = 5
name
d = basedoc.copy(name=name, rotate=(i - 1) * 7)
d.ooh_roles = ooh_roles
d.leave_weeks_per_year = leave_weeks_per_year
doctors.append(d)
# Now the deviations from a simple pattern
NWD = [nwd]
# LATE1_MTWT = [late1, late1, late1, late1]
LATE1_FSS = [late1, late1, late1, off]
# LATE2_MTWT = [late2, late2, late2, late2]
LATE2_FSS = [late2, late2, late2, off]
# LATE3_MTWT = [late3, late3, late3, late3]
# LATE3_FSS = [late3, late3, late3, off]
# NIGHT1_MTWT = [night1, night1, night1, night1, off]
# NIGHT1_FSS = [off, night1, night1, night1, off, off]
# NIGHT2_MTWT = [night2, night2, night2, night2, off]
# NIGHT2_FSS = [off, night2, night2, night2, off, off]
doctors[4].set_at(52, LATE2_FSS)
doctors[16].set_at(52, NWD * 4) # remove LATE2_FSS
doctors[4].set_at(73, NWD * 4) # remove LATE1_FSS
doctors[16].set_at(73, LATE1_FSS)
# Remove the basedoc (only present for temporary 1-based indexing!)
doctors = doctors[1:]
# Rota
logger.warning("Overriding specified start/end date")
return Rota(
"CPFT Aug 2015 North", shifts, doctors,
start_date=datetime.date(2015, 8, 5),
end_date=datetime.date(2016, 2, 2),
doctor_patterns_weekday_based=False,
nwd_shifts=[nwd],
comments=[
"<b>Author:</b> CPFT Medical Staffing, Aug 2015.",
"<b>Banding:</b> Band 2B (50%).",
"<b>Supplement cost:</b> 12 × 0.5 = 6 SHOs + 6 × 0.5 = 3 SpRs, or "
"approx. £305,000.",
],
# allow_wtr_breach=True,
)
def cpft_actual_aug2015_south_ltft(**kwargs):
"""CPFT South rota, Aug 2015 - Feb 2016: pair of LTFT job-sharing
doctors making up one element of the main CPFT South rota."""
# Shifts
# ... colours to match the original
nwd = Shift(
"Normal_working_day", "nwd", datetime.time(9), 8, nwd_only=True,
resident=True, rgb=(217, 150, 148))
late1 = Shift(
"Late_1", "L1", datetime.time(9), 12.5,
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy3 Experiment Builder (v2021.2.1),
on Thu Aug 5 14:03:29 2021
If you publish work using this script the most relevant publication is:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. (2019)
PsychoPy2: Experiments in behavior made easy Behav Res 51: 195.
https://doi.org/10.3758/s13428-018-01193-y
"""
from __future__ import absolute_import, division
from psychopy import locale_setup
from psychopy import prefs
prefs.hardware['audioLib'] = 'ptb'
from psychopy import sound, gui, visual, core, data, event, logging, clock, colors
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle, choice as randchoice
import os # handy system and path functions
import sys # to get file system encoding
from psychopy.hardware import keyboard
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
psychopyVersion = '2021.2.1'
expName = 'psychopy_tutorial_test' # from the Builder filename that created this script
expInfo = {'participant': '', 'session': '001'}
dlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath='/Users/martinzettersten/Desktop/psychopy_tutorial/animal_words_2afc.py',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
frameTolerance = 0.001 # how close to onset before 'same' frame
# Start Code - component code to be run after the window creation
# Setup the Window
win = visual.Window(
size=[1440, 900], fullscr=True, screen=0,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# Setup eyetracking
ioDevice = ioConfig = ioSession = ioServer = eyetracker = None
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
# Initialize components for Routine "start"
startClock = core.Clock()
star = visual.ShapeStim(
win=win, name='star', vertices='star7',
size=(0.2, 0.2),
ori=0.0, pos=(0, 0),
lineWidth=1.0, colorSpace='rgb', lineColor='yellow', fillColor='yellow',
opacity=None, depth=0.0, interpolate=True)
right_rectangle = visual.Rect(
win=win, name='right_rectangle',
width=(0.5, 0.5)[0], height=(0.5, 0.5)[1],
ori=0.0, pos=(0.5, 0),
lineWidth=1.0, colorSpace='rgb', lineColor='white', fillColor='white',
opacity=None, depth=-1.0, interpolate=True)
left_rectangle = visual.Rect(
win=win, name='left_rectangle',
width=(0.5, 0.5)[0], height=(0.5, 0.5)[1],
ori=0.0, pos=(-0.5, 0),
lineWidth=1.0, colorSpace='rgb', lineColor='white', fillColor='white',
opacity=None, depth=-2.0, interpolate=True)
right_image = visual.ImageStim(
win=win,
name='right_image',
image='sin', mask=None,
ori=0.0, pos=(0.5, 0), size=(0.6, 0.6),
color=[1,1,1], colorSpace='rgb', opacity=None,
flipHoriz=False, flipVert=False,
texRes=128.0, interpolate=True, depth=-3.0)
left_image = visual.ImageStim(
win=win,
name='left_image',
image='sin', mask=None,
ori=0.0, pos=(-0.5, 0), size=(0.6,0.6),
color=[1,1,1], colorSpace='rgb', opacity=None,
flipHoriz=False, flipVert=False,
texRes=128.0, interpolate=True, depth=-4.0)
mouse = event.Mouse(win=win)
x, y = [None, None]
mouse.mouseClock = core.Clock()
# Initialize components for Routine "trial"
trialClock = core.Clock()
left_rectangle_trial = visual.Rect(
win=win, name='left_rectangle_trial',
width=(0.5, 0.5)[0], height=(0.5, 0.5)[1],
ori=0.0, pos=(-0.5, 0),
lineWidth=1.0, colorSpace='rgb', lineColor='white', fillColor='white',
opacity=None, depth=0.0, interpolate=True)
right_rectangle_trial = visual.Rect(
win=win, name='right_rectangle_trial',
width=(0.5, 0.5)[0], height=(0.5, 0.5)[1],
ori=0.0, pos=(0.5, 0),
lineWidth=1.0, colorSpace='rgb', lineColor='white', fillColor='white',
opacity=None, depth=-1.0, interpolate=True)
left_image_trial = visual.ImageStim(
win=win,
name='left_image_trial',
image='sin', mask=None,
ori=0.0, pos=(-0.5, 0), size=(0.6, 0.6),
color=[1,1,1], colorSpace='rgb', opacity=None,
flipHoriz=False, flipVert=False,
texRes=128.0, interpolate=True, depth=-2.0)
right_image_trial = visual.ImageStim(
win=win,
name='right_image_trial',
image='sin', mask=None,
ori=0.0, pos=(0.5, 0), size=(0.6, 0.6),
color=[1,1,1], colorSpace='rgb', opacity=None,
flipHoriz=False, flipVert=False,
texRes=128.0, interpolate=True, depth=-3.0)
trial_mouse = event.Mouse(win=win)
x, y = [None, None]
trial_mouse.mouseClock = core.Clock()
trial_sound = sound.Sound('A', secs=1.5, stereo=True, hamming=True,
name='trial_sound')
trial_sound.setVolume(1.0)
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=1.0, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('psychopy_tutorial_trials.csv'),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
# ------Prepare to start Routine "start"-------
continueRoutine = True
# update component parameters for each repeat
right_image.setImage(r_image)
left_image.setImage(l_image)
# setup some python lists for storing info about the mouse
mouse.x = []
mouse.y = []
mouse.leftButton = []
mouse.midButton = []
mouse.rightButton = []
mouse.time = []
mouse.clicked_name = []
gotValidClick = False # until a click is received
mouse.mouseClock.reset()
# keep track of which components have finished
startComponents = [star, right_rectangle, left_rectangle, right_image, left_image, mouse]
for thisComponent in startComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
startClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "start"-------
while continueRoutine:
# get current time
t = startClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=startClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *star* updates
if star.status == NOT_STARTED and tThisFlip >= 0.3-frameTolerance:
# keep track of start time/frame for later
star.frameNStart = frameN # exact frame index
star.tStart = t # local t and not account for scr refresh
star.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(star, 'tStartRefresh') # time at next scr refresh
star.setAutoDraw(True)
# *right_rectangle* updates
if right_rectangle.status == NOT_STARTED and tThisFlip >= 0.3-frameTolerance:
# keep track of start time/frame for later
right_rectangle.frameNStart = frameN # exact frame index
right_rectangle.tStart = t # local t and not account for scr refresh
right_rectangle.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(right_rectangle, 'tStartRefresh') # time at next scr refresh
right_rectangle.setAutoDraw(True)
# *left_rectangle* updates
if left_rectangle.status == NOT_STARTED and tThisFlip >= 0.3-frameTolerance:
# keep track of start time/frame for later
left_rectangle.frameNStart = frameN # exact frame index
left_rectangle.tStart = t # local t and not account for scr refresh
left_rectangle.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(left_rectangle, 'tStartRefresh') # time at next scr refresh
left_rectangle.setAutoDraw(True)
# *right_image* updates
if right_image.status == NOT_STARTED and tThisFlip >= 0.3-frameTolerance:
# keep track of start time/frame for later
right_image.frameNStart = frameN # exact frame index
right_image.tStart = t # local t and not account for scr refresh
right_image.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(right_image, 'tStartRefresh') # time at next scr refresh
right_image.setAutoDraw(True)
# *left_image* updates
if left_image.status == NOT_STARTED and tThisFlip >= 0.3-frameTolerance:
# keep track of start time/frame for later
left_image.frameNStart = frameN # exact frame index
left_image.tStart = t # local t and not account for scr refresh
left_image.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(left_image, 'tStartRefresh') # time at next scr refresh
left_image.setAutoDraw(True)
# *mouse* updates
if mouse.status == NOT_STARTED and t >= 0.3-frameTolerance:
# keep track of start time/frame for later
mouse.frameNStart = frameN # exact frame index
mouse.tStart = t # local t and not account for scr refresh
mouse.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(mouse, 'tStartRefresh') # time at next scr refresh
mouse.status = STARTED
prevButtonState = [0, 0, 0] # if now button is down we will treat as 'new' click
if mouse.status == STARTED: # only update if started and not finished!
buttons = mouse.getPressed()
if buttons != prevButtonState: # button state changed?
prevButtonState = buttons
if sum(buttons) > 0: # state changed to a new click
# check if the mouse was inside our 'clickable' objects
gotValidClick = False
try:
iter(star)
clickableList = star
except:
clickableList = [star]
for obj in clickableList:
if obj.contains(mouse):
gotValidClick = True
mouse.clicked_name.append(obj.name)
x, y = mouse.getPos()
mouse.x.append(x)
mouse.y.append(y)
buttons = mouse.getPressed()
mouse.leftButton.append(buttons[0])
mouse.midButton.append(buttons[1])
mouse.rightButton.append(buttons[2])
mouse.time.append(mouse.mouseClock.getTime())
if gotValidClick: # | |
<reponame>govvijaycal/carla<filename>PythonAPI/util/raycast_sensor_testing.py
#!/usr/bin/env python
# Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Raycast sensor profiler
This script can be used to test, visualize and profile the raycast sensors,
LiDARs and Radar (radar visualization not available, sorry).
By default, the script render one RGB Camera and three LiDARS whose output
can be visualized in a window just running:
python raycast_sensor_testing.py
For profiling, you can choose the number of LiDARs and Radars and then use
the profiling option to run a series of simulations for points from 100k
to 1.5M per second. In this mode we do not render anything but processing
of the data is done.
For example for profiling one lidar:
python raycast_sensor_testing.py -ln 1 --profiling
And for profiling one radar:
python raycast_sensor_testing.py -rn 1 --profiling
"""
import glob
import os
import sys
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import argparse
import random
import time
import numpy as np
try:
import pygame
from pygame.locals import K_ESCAPE
from pygame.locals import K_q
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
class CustomTimer:
def __init__(self):
try:
self.timer = time.perf_counter
except AttributeError:
self.timer = time.time
def time(self):
return self.timer()
class DisplayManager:
def __init__(self, grid_size, window_size, show_window=True):
if show_window:
pygame.init()
pygame.font.init()
self.display = pygame.display.set_mode(window_size, pygame.HWSURFACE | pygame.DOUBLEBUF)
else:
self.display = None
self.grid_size = grid_size
self.window_size = window_size
self.sensor_list = []
def get_window_size(self):
return [int(self.window_size[0]), int(self.window_size[1])]
def get_display_size(self):
return [int(self.window_size[0]/self.grid_size[0]), int(self.window_size[1]/self.grid_size[1])]
def get_display_offset(self, gridPos):
dis_size = self.get_display_size()
return [int(gridPos[0] * dis_size[0]), int(gridPos[1] * dis_size[1])]
def add_sensor(self, sensor):
self.sensor_list.append(sensor)
def get_sensor_list(self):
return self.sensor_list
def render(self):
if not self.render_enabled():
return
for s in self.sensor_list:
s.render()
pygame.display.flip()
def destroy(self):
for s in self.sensor_list:
s.destroy()
def render_enabled(self):
return self.display != None
class SensorManager:
def __init__(self, world, display_man, sensor_type, transform, attached, sensor_options, display_pos):
self.surface = None
self.world = world
self.display_man = display_man
self.display_pos = display_pos
self.sensor = self.init_sensor(sensor_type, transform, attached, sensor_options)
self.sensor_options = sensor_options
self.timer = CustomTimer()
self.time_processing = 0.0
self.tics_processing = 0
self.display_man.add_sensor(self)
def init_sensor(self, sensor_type, transform, attached, sensor_options):
if sensor_type == 'RGBCamera':
camera_bp = self.world.get_blueprint_library().find('sensor.camera.rgb')
disp_size = self.display_man.get_display_size()
camera_bp.set_attribute('image_size_x', str(disp_size[0]))
camera_bp.set_attribute('image_size_y', str(disp_size[1]))
for key in sensor_options:
camera_bp.set_attribute(key, sensor_options[key])
camera = self.world.spawn_actor(camera_bp, transform, attach_to=attached)
camera.listen(self.save_rgb_image)
return camera
elif sensor_type == 'LiDAR':
lidar_bp = self.world.get_blueprint_library().find('sensor.lidar.ray_cast')
lidar_bp.set_attribute('range', '100')
for key in sensor_options:
lidar_bp.set_attribute(key, sensor_options[key])
lidar = self.world.spawn_actor(lidar_bp, transform, attach_to=attached)
lidar.listen(self.save_lidar_image)
return lidar
elif sensor_type == "Radar":
radar_bp = self.world.get_blueprint_library().find('sensor.other.radar')
for key in sensor_options:
radar_bp.set_attribute(key, sensor_options[key])
radar = self.world.spawn_actor(radar_bp, transform, attach_to=attached)
radar.listen(self.save_radar_image)
return radar
else:
return None
def get_sensor(self):
return self.sensor
def save_rgb_image(self, image):
t_start = self.timer.time()
image.convert(carla.ColorConverter.Raw)
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
if self.display_man.render_enabled():
self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
t_end = self.timer.time()
self.time_processing += (t_end-t_start)
self.tics_processing += 1
def save_lidar_image(self, image):
t_start = self.timer.time()
disp_size = self.display_man.get_display_size()
lidar_range = 2.0*float(self.sensor_options['range'])
points = np.frombuffer(image.raw_data, dtype=np.dtype('f4'))
points = np.reshape(points, (int(points.shape[0] / 4), 4))
lidar_data = np.array(points[:, :2])
lidar_data *= min(disp_size) / lidar_range
lidar_data += (0.5 * disp_size[0], 0.5 * disp_size[1])
lidar_data = np.fabs(lidar_data) # pylint: disable=E1111
lidar_data = lidar_data.astype(np.int32)
lidar_data = np.reshape(lidar_data, (-1, 2))
lidar_img_size = (disp_size[0], disp_size[1], 3)
lidar_img = np.zeros((lidar_img_size), dtype=np.uint8)
lidar_img[tuple(lidar_data.T)] = (255, 255, 255)
if self.display_man.render_enabled():
self.surface = pygame.surfarray.make_surface(lidar_img)
t_end = self.timer.time()
self.time_processing += (t_end-t_start)
self.tics_processing += 1
def save_radar_image(self, radar_data):
t_start = self.timer.time()
#print("Hola, saving Radar data!!")
# To get a numpy [[vel, altitude, azimuth, depth],...[,,,]]:
points = np.frombuffer(radar_data.raw_data, dtype=np.dtype('f4'))
points = np.reshape(points, (len(radar_data), 4))
t_end = self.timer.time()
self.time_processing += (t_end-t_start)
self.tics_processing += 1
def render(self):
if self.surface is not None:
offset = self.display_man.get_display_offset(self.display_pos)
self.display_man.display.blit(self.surface, offset)
def destroy(self):
self.sensor.destroy()
def one_run(args, client):
"""This function performed one test run using the args parameters
and connecting to the carla client passed.
"""
display_manager = None
vehicle = None
vehicle_list = []
prof_str = ""
timer = CustomTimer()
try:
# Getting the world and
world = client.get_world()
if args.sync:
traffic_manager = client.get_trafficmanager(8000)
settings = world.get_settings()
traffic_manager.set_synchronous_mode(True)
settings.synchronous_mode = True
settings.fixed_delta_seconds = 0.05
world.apply_settings(settings)
if args.profiling:
settings.no_rendering_mode = True
# Instanciating the vehicle to which we attached the sensors
bp = world.get_blueprint_library().filter('vehicle')[0]
if bp.has_attribute('color'):
color = random.choice(bp.get_attribute('color').recommended_values)
bp.set_attribute('color', color)
vehicle = world.spawn_actor(bp, world.get_map().get_spawn_points()[0])
vehicle_list.append(vehicle)
vehicle.set_autopilot(True)
# Display Manager organize all the sensors an its display in a window
display_manager = DisplayManager(grid_size=[2, 2], window_size=[args.width, args.height], show_window=args.render_window)
# If require, we instanciate the RGB camera
if args.render_cam:
SensorManager(world, display_manager, 'RGBCamera', carla.Transform(carla.Location(x=1.5, z=2.4)), vehicle, {}, [0, 0])
# If any, we instanciate the required lidars
lidar_points_per_second = args.lidar_points
if args.lidar_number >= 3:
SensorManager(world, display_manager, 'LiDAR', carla.Transform(carla.Location(x=0, z=2.4)), vehicle, {'channels' : '64', 'range' : '50', 'points_per_second': lidar_points_per_second}, [1, 0])
if args.lidar_number >= 2:
SensorManager(world, display_manager, 'LiDAR', carla.Transform(carla.Location(x=0, z=2.4)), vehicle, {'channels' : '64', 'range' : '100', 'points_per_second': lidar_points_per_second}, [0, 1])
if args.lidar_number >= 1:
SensorManager(world, display_manager, 'LiDAR', carla.Transform(carla.Location(x=0, z=2.4)), vehicle, {'channels' : '64', 'range' : '200', 'points_per_second': lidar_points_per_second}, [1, 1])
# If any, we instanciate the required radars
radar_points_per_second = args.radar_points
if args.radar_number >= 3:
SensorManager(world, display_manager, 'Radar', carla.Transform(carla.Location(x=0, z=2.4), carla.Rotation(pitch=5, yaw=90)), vehicle, {'points_per_second': radar_points_per_second}, [2, 2])
if args.radar_number >= 2:
SensorManager(world, display_manager, 'Radar', carla.Transform(carla.Location(x=0, z=2.4), carla.Rotation(pitch=5, yaw=-90)), vehicle, {'points_per_second': radar_points_per_second}, [2, 2])
if args.radar_number >= 1:
SensorManager(world, display_manager, 'Radar', carla.Transform(carla.Location(x=0, z=2.4), carla.Rotation(pitch=5)), vehicle, {'points_per_second': radar_points_per_second}, [2, 2])
call_exit = False
time_init_sim = timer.time()
frame = 0
time0 = timer.time()
while True:
frame += 1
# Carla Tick
if args.sync:
world.tick()
else:
world.wait_for_tick()
# Render received data
display_manager.render()
# Time measurement for profiling or to output
if not args.profiling:
if frame == 30:
time_frames = timer.time() - time0
time_procc = 0
for sensor in display_manager.sensor_list:
time_procc += sensor.time_processing
print("FPS: %.3f %.3f %.3f" % (time_frames, 1.0/time_frames * 30, time_procc/time_frames))
frame = 0
for sensor in display_manager.sensor_list:
sensor.time_processing = 0
sensor.tics_processing = 0
time0 = timer.time()
if args.render_window:
for event in pygame.event.get():
if event.type == pygame.QUIT:
call_exit = True
elif event.type == pygame.KEYDOWN:
if event.key == K_ESCAPE or event.key == K_q:
call_exit = True
break
else:
if (timer.time() - time_init_sim) < 5.0:
frame = 0
for sensor in display_manager.sensor_list:
sensor.time_processing = 0
sensor.tics_processing = 0
time0 = timer.time()
if (timer.time() - time0) > 10.0:
time_frames = timer.time() - time0
time_procc = 0
for sensor in display_manager.sensor_list:
time_procc += sensor.time_processing
prof_str = "%-10s %-9s %-15s %-7.2f %-20.3f" % (args.lidar_number, args.radar_number, lidar_points_per_second, float(frame) / time_frames, time_procc/time_frames)
break
if call_exit:
break
finally:
if display_manager:
display_manager.destroy()
client.apply_batch([carla.command.DestroyActor(x) for x in vehicle_list])
if args.sync:
settings = world.get_settings()
settings.synchronous_mode = False
settings.fixed_delta_seconds = None
world.apply_settings(settings)
return prof_str
def main():
argparser = argparse.ArgumentParser(
description='CARLA Sensor tutorial')
argparser.add_argument(
'--host',
metavar='H',
default='127.0.0.1',
help='IP of the host server (default: 127.0.0.1)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'--sync',
action='store_true',
help='Synchronous mode execution')
argparser.add_argument(
'--async',
dest='sync',
action='store_false',
help='Asynchronous mode execution')
argparser.set_defaults(sync=True)
argparser.add_argument(
'--res',
metavar='WIDTHxHEIGHT',
default='1280x720',
help='window resolution (default: 1280x720)')
argparser.add_argument(
'-lp', '--lidar_points',
metavar='LP',
default='100000',
help='lidar points per second (default: "100000")')
argparser.add_argument(
'-ln', '--lidar_number',
metavar='LN',
default=3,
type=int,
choices=range(0, 4),
help='Number of lidars to render (from zero to three)')
argparser.add_argument(
'-rp', '--radar_points',
metavar='RP',
default='100000',
help='radar points per second (default: "100000")')
argparser.add_argument(
'-rn', '--radar_number',
metavar='LN',
default=0,
type=int,
choices=range(0, 4),
help='Number of radars to render (from zero to three)')
argparser.add_argument(
'--camera',
dest='render_cam', action='store_true',
help='render also RGB camera (camera enable by default)')
argparser.add_argument('--no-camera',
dest='render_cam', action='store_false',
help='no render RGB camera (camera disable by default)')
argparser.set_defaults(render_cam=True)
argparser.add_argument(
'--profiling',
action='store_true',
help='Use the script in profiling mode. It measures the performance of \
the lidar for different number of points.')
argparser.set_defaults(profiling=False)
argparser.add_argument(
'--no-render-window',
action='store_false',
dest='render_window',
help='Render visualization window.')
argparser.set_defaults(render_window=True)
args = argparser.parse_args()
args.width, args.height = [int(x) for x in args.res.split('x')]
try:
client = carla.Client(args.host, args.port)
client.set_timeout(5.0)
if args.profiling:
print("-------------------------------------------------------")
print("# Running profiling with %s lidars and %s radars." % (args.lidar_number, args.radar_number))
args.render_cam = False
args.render_window = False
runs_output = []
points_range = ['100000', '200000', '300000', '400000', '500000',
'600000', '700000', '800000', '900000', '1000000',
'1100000', '1200000', '1300000', '1400000', '1500000']
for points in points_range:
args.lidar_points = points
args.radar_points = points
run_str = one_run(args, client)
runs_output.append(run_str)
| |
5.111 8.080 7.645 1.00 20.00 N
ATOM 2 CA THRA1 2 5.000 6.722 7.125 1.00 20.00 C
ATOM 3 C THRA1 3 5.075 5.694 8.249 1.00 20.00 C
TER
ATOM 4 O THRB1 4 5.890 5.818 9.163 1.00 20.00 O
ATOM 5 CB THRB1 5 6.101 6.421 6.092 1.00 20.00 C
TER
ATOM 6 OG1 THRA1 6 6.001 7.343 5.000 1.00 20.00 O
ATOM 7 CG2 THRA1 7 5.964 5.000 5.565 1.00 20.00 C
TER
ATOM 1 N THRA2 1 1.790 4.994 11.032 1.00 20.00 N
ATOM 2 CA THRA2 2 2.306 4.556 9.740 1.00 20.00 C
ATOM 3 C THRA2 3 3.660 3.870 9.892 1.00 20.00 C
TER
ATOM 4 O THRB2 4 4.517 4.327 10.648 1.00 20.00 O
ATOM 5 CB THRB2 5 2.445 5.734 8.759 1.00 20.00 C
TER
ATOM 6 OG1 THRA2 6 1.166 6.348 8.560 1.00 20.00 O
ATOM 7 CG2 THRA2 7 2.985 5.251 7.420 1.00 20.00 C
TER
ATOM 1 N THRA3 1 4.100 -0.147 11.534 1.00 20.00 N
ATOM 2 CA THRA3 2 3.886 0.555 10.273 1.00 20.00 C
ATOM 3 C THRA3 3 5.088 1.422 9.915 1.00 20.00 C
TER
ATOM 4 O THRB3 4 5.659 2.093 10.775 1.00 20.00 O
ATOM 5 CB THRB3 5 2.625 1.437 10.325 1.00 20.00 C
TER
ATOM 6 OG1 THRA3 6 1.479 0.622 10.600 1.00 20.00 O
ATOM 7 CG2 THRA3 7 2.424 2.158 9.000 1.00 20.00 C
TER
END
"""
pdb_str_2b="""
HELIX 1 1 THR A 1 THR A 2 1 6
SHEET 1 A 2 THR A 1 THR A 3 0
SHEET 2 A 2 THR B 4 THR B 5 -1 O THR B 4 N THR A 2
MTRIX1 1 1.000000 0.000000 0.000000 0.00000 1
MTRIX2 1 0.000000 1.000000 0.000000 0.00000 1
MTRIX3 1 0.000000 0.000000 1.000000 0.00000 1
MTRIX1 2 0.496590 -0.643597 0.582393 0.00000
MTRIX2 2 0.867925 0.376088 -0.324443 0.00000
MTRIX3 2 -0.010221 0.666588 0.745356 0.00000
MTRIX1 3 -0.317946 -0.173437 0.932111 0.00000
MTRIX2 3 0.760735 -0.633422 0.141629 0.00000
MTRIX3 3 0.565855 0.754120 0.333333 0.00000
ATOM 1 N THR A 1 5.111 8.080 7.645 1.00 20.00 N
ATOM 2 CA THR A 2 5.000 6.722 7.125 1.00 20.00 C
ATOM 3 C THR A 3 5.075 5.694 8.249 1.00 20.00 C
TER
ATOM 4 O THR B 4 5.890 5.818 9.163 1.00 20.00 O
ATOM 5 CB THR B 5 6.101 6.421 6.092 1.00 20.00 C
TER
ATOM 6 OG1 THR A 6 6.001 7.343 5.000 1.00 20.00 O
ATOM 7 CG2 THR A 7 5.964 5.000 5.565 1.00 20.00 C
TER
END
"""
pdb_str_3="""
data_1A37
#
loop_
_database_PDB_rev_record.rev_num
_database_PDB_rev_record.type
_database_PDB_rev_record.details
2 SOURCE ?
2 COMPND ?
2 REMARK ?
2 SEQRES ?
2 KEYWDS ?
2 HEADER ?
3 VERSN ?
4 MTRIX1 ?
4 MTRIX2 ?
4 MTRIX3 ?
#
_cell.entry_id 1A37
_cell.length_a 94.730
_cell.length_b 94.730
_cell.length_c 250.870
_cell.angle_alpha 90.00
_cell.angle_beta 90.00
_cell.angle_gamma 120.00
_cell.Z_PDB 12
#
_symmetry.entry_id 1A37
_symmetry.space_group_name_H-M 'P 65'
_symmetry.pdbx_full_space_group_name_H-M ?
_symmetry.cell_setting ?
_symmetry.Int_Tables_number ?
_symmetry.space_group_name_Hall ?
#
loop_
_struct_ncs_oper.id
_struct_ncs_oper.code
_struct_ncs_oper.details
_struct_ncs_oper.matrix[1][1]
_struct_ncs_oper.matrix[1][2]
_struct_ncs_oper.matrix[1][3]
_struct_ncs_oper.matrix[2][1]
_struct_ncs_oper.matrix[2][2]
_struct_ncs_oper.matrix[2][3]
_struct_ncs_oper.matrix[3][1]
_struct_ncs_oper.matrix[3][2]
_struct_ncs_oper.matrix[3][3]
_struct_ncs_oper.vector[1]
_struct_ncs_oper.vector[2]
_struct_ncs_oper.vector[3]
1 given ? 1.000000 0.000000 0.000000 0.000000 1.000000 0.000000 0.000000 0.000000 1.000000 0.00000 0.00000 0.00000
2 generate ? -0.997443 0.000760 -0.071468 -0.000162 -0.999965 -0.008376 -0.071472 -0.008343 0.997408 59.52120 80.32820 2.38680
#
loop_
_atom_site.group_PDB
_atom_site.id
_atom_site.type_symbol
_atom_site.label_atom_id
_atom_site.label_alt_id
_atom_site.label_comp_id
_atom_site.label_asym_id
_atom_site.label_entity_id
_atom_site.label_seq_id
_atom_site.pdbx_PDB_ins_code
_atom_site.Cartn_x
_atom_site.Cartn_y
_atom_site.Cartn_z
_atom_site.occupancy
_atom_site.B_iso_or_equiv
_atom_site.Cartn_x_esd
_atom_site.Cartn_y_esd
_atom_site.Cartn_z_esd
_atom_site.occupancy_esd
_atom_site.B_iso_or_equiv_esd
_atom_site.pdbx_formal_charge
_atom_site.auth_seq_id
_atom_site.auth_comp_id
_atom_site.auth_asym_id
_atom_site.auth_atom_id
_atom_site.pdbx_PDB_model_num
ATOM 1 N N . MET A 1 1 ? 10.710 38.460 14.825 1.00 89.21 ? ? ? ? ? ? 1 MET A N 1
ATOM 2 C CA . MET A 1 1 ? 11.257 39.553 13.961 1.00 89.21 ? ? ? ? ? ? 1 MET A CA 1
ATOM 3 C C . MET A 1 1 ? 11.385 40.985 14.516 1.00 89.21 ? ? ? ? ? ? 1 MET A C 1
ATOM 4 O O . MET A 1 1 ? 12.376 41.648 14.218 1.00 89.21 ? ? ? ? ? ? 1 MET A O 1
ATOM 5 C CB . MET A 1 1 ? 10.514 39.584 12.633 1.00 72.05 ? ? ? ? ? ? 1 MET A CB 1
ATOM 6 C CG . MET A 1 1 ? 11.115 38.664 11.596 1.00 72.05 ? ? ? ? ? ? 1 MET A CG 1
ATOM 7 S SD . MET A 1 1 ? 12.048 39.609 10.386 1.00 72.05 ? ? ? ? ? ? 1 MET A SD 1
ATOM 8 C CE . MET A 1 1 ? 13.456 40.084 11.391 1.00 72.05 ? ? ? ? ? ? 1 MET A CE 1
ATOM 9 N N . ASP A 1 2 ? 10.381 41.467 15.263 1.00 81.99 ? ? ? ? ? ? 2 ASP A N 1
ATOM 10 C CA . ASP A 1 2 ? 10.350 42.822 15.886 1.00 81.99 ? ? ? ? ? ? 2 ASP A CA 1
ATOM 11 C C . ASP A 1 2 ? 10.651 44.060 15.038 1.00 81.99 ? ? ? ? ? ? 2 ASP A C 1
ATOM 12 O O . ASP A 1 2 ? 11.725 44.645 15.140 1.00 81.99 ? ? ? ? ? ? 2 ASP A O 1
ATOM 13 C CB . ASP A 1 2 ? 11.208 42.882 17.167 1.00 70.41 ? ? ? ? ? ? 2 ASP A CB 1
ATOM 14 C CG . ASP A 1 2 ? 11.000 44.178 17.963 1.00 70.41 ? ? ? ? ? ? 2 ASP A CG 1
ATOM 15 O OD1 . ASP A 1 2 ? 10.015 44.907 17.702 1.00 70.41 ? ? ? ? ? ? 2 ASP A OD1 1
ATOM 16 O OD2 . ASP A 1 2 ? 11.821 44.453 18.866 1.00 70.41 ? ? ? ? ? ? 2 ASP A OD2 1
#
"""
pdb_str_3a="""
data_default
_cell.angle_beta 90.000
_cell.angle_gamma 120.000
_cell.length_b 94.730
_cell.length_c 250.870
_cell.angle_alpha 90.000
_cell.volume 1949640.043
_cell.length_a 94.730
_space_group.crystal_system hexagonal
_space_group.name_H-M_alt 'P 65'
_space_group.IT_number 170
_space_group.name_Hall ' P 65'
_symmetry.space_group_name_H-M 'P 65'
_symmetry.Int_Tables_number 170
_symmetry.space_group_name_Hall ' P 65'
loop_
_space_group_symop.id
_space_group_symop.operation_xyz
1 x,y,z
2 x-y,x,z+5/6
3 y,-x+y,z+1/6
4 -y,x-y,z+2/3
5 -x+y,-x,z+1/3
6 -x,-y,z+1/2
loop_
_atom_site.group_PDB
_atom_site.id
_atom_site.label_atom_id
_atom_site.label_alt_id
_atom_site.label_comp_id
_atom_site.auth_asym_id
_atom_site.auth_seq_id
_atom_site.pdbx_PDB_ins_code
_atom_site.Cartn_x
_atom_site.Cartn_y
_atom_site.Cartn_z
_atom_site.occupancy
_atom_site.B_iso_or_equiv
_atom_site.type_symbol
_atom_site.pdbx_formal_charge
_atom_site.label_asym_id
_atom_site.label_entity_id
_atom_site.label_seq_id
_atom_site.pdbx_PDB_model_num
ATOM 1 N . MET A1 1 ? 10.71000 38.46000 14.82500 1.000 89.21000 N ? A ? 1 1
ATOM 2 CA . MET A1 1 ? 11.25700 39.55300 13.96100 1.000 89.21000 C ? A ? 1 1
ATOM 3 C . MET A1 1 ? 11.38500 40.98500 14.51600 1.000 89.21000 C ? A ? 1 1
ATOM 4 O . MET A1 1 ? 12.37600 41.64800 14.21800 1.000 89.21000 O ? A ? 1 1
ATOM 5 CB . MET A1 1 ? 10.51400 39.58400 12.63300 1.000 72.05000 C ? A ? 1 1
ATOM 6 CG . MET A1 1 ? 11.11500 38.66400 11.59600 1.000 72.05000 C ? A ? 1 1
ATOM 7 SD . MET A1 1 ? 12.04800 39.60900 10.38600 1.000 72.05000 S ? A ? 1 1
ATOM 8 CE . MET A1 1 ? 13.45600 40.08400 11.39100 1.000 72.05000 C ? A ? 1 1
ATOM 9 N . ASP A1 2 ? 10.38100 41.46700 15.26300 1.000 81.99000 N ? A ? 2 1
ATOM 10 CA . ASP A1 2 ? 10.35000 42.82200 15.88600 1.000 81.99000 C ? A ? 2 1
ATOM 11 C . ASP A1 2 ? 10.65100 44.06000 15.03800 1.000 81.99000 C ? A ? 2 1
ATOM 12 O . ASP A1 2 ? 11.72500 44.64500 15.14000 1.000 81.99000 O ? A ? 2 1
ATOM 13 CB . ASP A1 2 ? 11.20800 42.88200 17.16700 1.000 70.41000 C ? A ? 2 1
ATOM 14 CG . ASP A1 2 ? 11.00000 44.17800 17.96300 1.000 70.41000 C ? A ? 2 1
ATOM 15 OD1 . ASP A1 2 ? 10.01500 44.90700 17.70200 1.000 70.41000 O ? A ? 2 1
ATOM 16 OD2 . ASP A1 2 ? 11.82100 44.45300 18.86600 1.000 70.41000 O ? A ? 2 1
ATOM 1 N . MET A2 1 ? 47.80830 41.74364 16.08704 1.000 89.21000 N ? B ? 3 1
ATOM 2 CA . MET A2 1 ? 47.32528 40.65782 15.17706 1.000 89.21000 C ? B ? 3 1
ATOM 3 C . MET A2 1 ? 47.15903 39.22120 15.70953 1.000 89.21000 C ? B ? 3 1
ATOM 4 O . MET A2 1 ? 46.19237 38.56056 15.33594 1.000 89.21000 O ? B ? 3 1
ATOM 5 CB . MET A2 1 ? 48.16131 40.63807 13.90535 1.000 72.05000 C ? | |
<filename>characterClass.py<gh_stars>1-10
import intro
import itemGen
from msvcrt import getch, kbhit
from time import sleep
from random import randint
profList = ["Warrior", "Mage", "Thief", "Innkeeper", "Baker", "Fisherman", "Doctor"]
class Character(object):
totalChars = 0
# Static method to keep track of total number of characters.
@staticmethod
def totalCount():
return Character.totalChars
def __init__(self, name, hp, mp, prof):
self.__name = name
self.__hp = hp
self.__mp = mp
self.__profession = prof
Character.totalChars += 1
# toString
def __str__(self):
result = ""
result += "{}\n".format(self.__name)
result += "HP: {}\n".format(str(self.__hp))
result += "MP: {}\n".format(str(self.__mp))
return result
def getName(self):
return self.__name
def getHP(self):
return self.__hp
def getMP(self):
return self.__mp
def getProf(self):
return self.__profession
def setName(self, newName):
# Input validation: 1-15 characters.
if 0 < len(newName) < 15:
self.__name = newName
else:
print("Try again, names must be between 1 - 15 characters.")
def setProf(self, newProf):
if newProf in profList[:3]:
self.__profession = newProf
else:
print("Invalid profession.")
def setHP(self, newHP):
from time import sleep
# Input validation: health above 0.
if newHP > 0:
self.__hp = newHP
if isinstance(self, Player) and newHP > self.getMaxHP():
self.__hp = self.getMaxHP()
elif newHP <= 0:
self.__hp = newHP
if self.getProf() == "Monster":
intro.slow_type(["\n\tThe " + self.__name.lower() + " has died.\n"])
else:
intro.slow_type(["\n\t" + self.__name + " has died.\n"])
sleep(.5)
if isinstance(self, Player):
sleep(1)
Player.callDeath()
def setMP(self, newMP):
self.__mp = newMP
if isinstance(self, Player) and self.getMP() > self.getMaxMP():
self.__mp = self.getMaxMP()
class Player(Character):
# Constructor
def __init__(self, name, hp, mp, prof, xMap=18, yMap=13, location=0):
super().__init__(name, hp, mp, prof)
self.__xMap = xMap
self.__yMap = yMap
self.__location = location
self.__maxHP = self.getHP()
self.__maxMP = self.getMP()
self.__inv = []
self.armor = []
self.dodgeChance = 0
self.didBattle = False
self.__didAction = False
self.__stopCombat = False
self.__keyCount = 0
self.__buffs = {}
self.__level = 1
self.__exp = 0
self.__expRequired = 100
Character.totalChars += 1
def __str__(self):
result = ""
result += "{} the {}\n".format(self.__name, self.__profession)
result += "HP: {}\n".format(str(self.__hp))
result += "MP: {}\n".format(str(self.__mp))
return result
@staticmethod
def callDeath():
from os import system
from pygame import mixer
mixer.music.fadeout(5000)
sleep(2)
system('cls')
intro.slow_type(["\n\n\tThanks for playing!",
"\n", "\tPlay again?:"])
ans = input("\t >> ")
if "y" in ans:
from TextRPG import main
main()
else:
quit()
# Getters
def getLevel(self):
return self.__level
def getMaxHP(self):
return self.__maxHP
def getMaxMP(self):
return self.__maxMP
def getX(self):
return self.__xMap
def getY(self):
return self.__yMap
def getLoc(self):
return self.__location
def getInv(self):
return self.__inv
def getKeys(self):
return self.__keyCount
def getExp(self):
return self.__exp
def getExpReq(self):
return self.__expRequired
def getArmor(self):
return self.armor[0].getDam()
def actionCheck(self):
return self.__didAction
def isCombatStopped(self):
return self.__stopCombat
# Setters
def setMaxHP(self, newMax):
self.__maxHP = newMax
def didAction(self, action):
self.__didAction = action
def stopCombat(self):
self.__stopCombat = True
def resetCombat(self):
self.__stopCombat = False
def addKey(self):
self.__keyCount += 1
def removeKey(self):
self.__keyCount -= 1
def setLoc(self, newLoc):
self.__location = newLoc
def setX(self, newX):
self.__xMap = newX
def setY(self, newY):
self.__yMap = newY
def addItem(self, item):
if "Potion" in item.getName():
self.__inv.append(item)
else:
self.__inv.insert(1, item)
def addBuff(self, buff, length):
self.__buffs[buff] = length
def getBuffs(self):
return self.__buffs
def clearBuffs(self):
self.__buffs.clear()
def removeBuff(self, buff):
del self.__buffs[buff]
def buffDown(self):
if len(self.__buffs) > 0:
for buffLength in self.__buffs.values():
buffLength -= 1
for i in self.__buffs:
if self.__buffs[i] == 0:
self.__buffs.pop(i)
if i == "Shadow":
if self.getLevel() == 5:
self.dodgeChance = 30
else:
self.dodgeChance = 15
def levelUp(self):
self.__level += 1
intro.slow_type(["You leveled up! You are now Level {}!".format(self.getLevel()),
"\nYou regained half your missing HP & MP!"])
if self.getLevel() == 5:
intro.slow_type(["You've reached the maximum level of 5, your passive trait has doubled."])
self.dodgeChance = 30
self.__maxHP += 20
self.__maxMP += 10
self.setHP(self.getHP() + ((self.__maxHP - self.getHP()) // 2))
self.setMP(self.getMP() + ((self.__maxMP - self.getMP()) // 2))
sleep(.5)
def addExp(self, target):
if self.getLevel() < 5:
intro.slow_type(["You gained {} experience.".format(target.expValue)])
sleep(.5)
self.__exp += target.expValue
if self.__exp >= self.__expRequired:
self.levelUp()
overflow = self.__exp - self.__expRequired
self.__expRequired += 50
self.__exp = overflow
def castSpell(self, E):
importName, importDesc = getSpellData("spellData.txt")
spellNames = []
spellDescs = []
if self.getProf() == "Warrior":
spellNames = importName[0:4]
spellDescs = importDesc[0:4]
elif self.getProf() == "Mage":
if self.getLevel() > 1:
spellNames = importName[4:7]
spellDescs = importDesc[4:7]
spellNames.insert(0, importName[0])
spellDescs.insert(0, importDesc[0])
else:
if self.getLevel() > 1:
spellNames = importName[7:]
spellDescs = importDesc[7:]
spellNames.insert(0, importName[0])
spellDescs.insert(0, importDesc[0])
spellNames = spellNames[:self.getLevel()]
spellDescs = spellDescs[:self.getLevel()]
print("\nSpells:")
for spell in range(len(spellNames)):
print("\t{}) {} {}".format(spell + 1, spellNames[spell], spellDescs[spell]))
print("\n0) Return to previous menu")
# Practicing some list comprehension
# This little guy cut down on a TON of lines!
cond = [" the" if E.getProf() == "Monster" else ""]
name = [E.getName().lower() if E.getProf() == "Monster" else E.getName().title()]
finished = False
while not finished:
try:
# Flush input
while kbhit():
getch()
ans = getch().decode()
ans = int(ans)
# ans = int(input(">> "))
while ans > len(spellNames):
ans = int(getch().decode())
if ans == 0:
return False
elif ans == 1:
if self.getMP() >= 10:
healAmount = randint(10, 20)
self.setMP(self.getMP() - 10)
intro.slow_type(["\n\tA restorative mist begins to rise around you...",
"\n\tYou cast a healing spell, restoring {} health.".format(str(healAmount))])
self.setHP(self.getHP() + healAmount)
sleep(.5)
self.didAction(True)
else:
print("Not enough mana!")
sleep(1)
elif ans == 2:
if self.getMP() >= 15:
damage = randint(20, 30)
self.setMP(self.getMP() - 15)
if self.getProf() == "Warrior":
intro.slow_type(["\n\tYou wind up an attack, preparing to cut deep into your enemy...",
"\n\tYou strike{} {} with expert precision, doing {} damage!"
.format(cond[0], name[0], str(damage))])
E.setHP(E.getHP() - damage)
self.addBuff("Bleed", 2)
elif self.getProf() == "Mage":
intro.slow_type(["\n\tFire swirls amongst your fingertips as you begin to concentrate...",
"\n\tYou cast a fireball at{} {}, doing {} damage!"
.format(cond[0], name[0], str(damage))])
E.setHP(E.getHP() - damage)
self.addBuff("Burn", 2)
elif self.getProf() == "Thief":
intro.slow_type(["\n\tYou begin to sink into the shadows surrounding you...",
"\n\tYou appear behind{} {} and strike, doing {} damage!"
.format(cond[0], name[0], round(self.getDamage() * 1.5))])
E.setHP(E.getHP() - round(self.getDamage() * 1.5))
sleep(.5)
self.didAction(True)
else:
print("Not enough mana!")
sleep(1)
elif ans == 3:
if self.getMP() >= 20:
self.setMP(self.getMP() - 20)
if self.getProf() == "Warrior":
intro.slow_type(["\n\tReflecting on your years of battle, your posture stiffens.",
"\n\tYou brace yourself for incoming attacks."])
self.addBuff("Brace", 3)
elif self.getProf() == "Mage":
damage = randint(15, 25)
intro.slow_type(["\n\tThe cold vapor in the air around you begins to crystallize...",
"\n\tIce materializes around you, barraging{} {} for {} damage."
.format(cond[0], name[0], damage)])
E.setHP(E.getHP() - damage)
self.addBuff("Freeze", 2)
elif self.getProf() == "Thief":
intro.slow_type(["\n\tThe lines between your body and the light begin to fade...",
"\n\tYou become seemingly invisible amongst the darkness."])
self.addBuff("Shadow", 3)
sleep(.5)
self.didAction(True)
else:
print("Not enough mana!")
sleep(1)
elif ans == 4:
if self.getMP() >= 25:
self.setMP(self.getMP() - 25)
if self.getProf() == "Warrior":
intro.slow_type(["\n\tYour experience tells you that{} {} will soon expose itself.".format(cond[0], name[0]),
"\n\tYou assume a defensive stance, preparing to counterattack."])
self.addBuff("Counter", 10)
elif self.getProf() == "Mage":
damage = randint(15, 25)
intro.slow_type(["\n\tYou conjure a slowly fading protective bubble around yourself...",
"\n\tIncoming damage is reduced for the next three turns."
.format(cond[0], name[0], damage)])
E.setHP(E.getHP() - damage)
self.addBuff("Bubble", 4)
elif self.getProf() == "Thief":
intro.slow_type(["\n\tYou coat your equipped weapon with a deadly poison...",
"\n\tYour attacks become even more efficient and deadly."])
self.addBuff("Poison", 3)
sleep(.5)
self.didAction(True)
else:
print("Not enough mana!")
sleep(1)
finished = True
except ValueError:
print("Invalid input.")
def checkInv(self):
print("\n\nSelect a weapon to use/equip:\n\n\tCurrently equipped weapon:")
print("\n\t\t" + str(self.__inv[0]) + "\n")
if len(self.armor) > 0:
print("\tCurrently equipped shield:")
print("\n\t\t" + str(self.armor[0]) + "\n")
for i in range(len(self.__inv[1:])):
print("\t{}) {}".format(i + 1, self.__inv[i + 1]))
print("\nD: Discard Item\nU: Unequip Offhand\n0) Return to previous menu")
finished = False
while not finished:
try:
# ans = input("\t >> ")
ans = getch().decode()
if ans in "du0123456789":
if ans == "0":
break
elif ans == "d":
intro.slow_type(["\t\tPress number of the item to discard."])
print("\t (This cannot be undone, 0 to return to previous menu.)")
tempAns = getch().decode()
if int(tempAns) != 0 and int(tempAns) <= len(self.__inv):
self.__inv.pop(int(tempAns))
elif ans == "u":
if len(self.__inv) <= 8:
self.addItem(self.armor.pop(0))
elif "Potion" in self.__inv[int(ans)].getName():
self.__inv[int(ans)].usePotion(self)
temp = ["HP" if "Health" in self.__inv[int(ans)].getName() else "MP"]
intro.slow_type(["\tYou drank a {}, restoring {} {}.".format(
self.__inv[int(ans)].getName().lower(), self.__inv[int(ans)].getPower(), temp[0])])
self.__inv.pop(int(ans))
sleep(.5)
self.didAction(True)
elif "Great" in self.__inv[int(ans)].getName() and len(self.armor) > 0:
if len(self.__inv) <= 8:
# Add the armor | |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
from knack.help_files import helps
helps['datamigration'] = '''
type: group
short-summary: Manage Data Migration
'''
helps['datamigration sql-managed-instance'] = """
type: group
short-summary: Manage database migrations to SQL Managed Instance.
"""
helps['datamigration sql-managed-instance show'] = """
type: command
short-summary: "Retrieve the specified database migration for a given SQL Managed Instance."
examples:
- name: Get Database Migration resource.
text: |-
az datamigration sql-managed-instance show --managed-instance-name "managedInstance1" --resource-group \
"testrg" --target-db-name "db1"
"""
helps['datamigration sql-managed-instance create'] = """
type: command
short-summary: "Create a new database migration to a given SQL Managed Instance."
parameters:
- name: --source-sql-connection
short-summary: "Source SQL Server connection details."
long-summary: |
Usage: --source-sql-connection data-source=XX authentication=XX user-name=XX password=XX \
encrypt-connection=XX trust-server-certificate=XX
data-source: Data source.
authentication: Authentication type.
user-name: User name to connect to source SQL.
password: Password to connect to source SQL.
encrypt-connection: Whether to encrypt connection or not.
trust-server-certificate: Whether to trust server certificate or not.
- name: --offline-configuration
short-summary: "Offline configuration."
long-summary: |
Usage: --offline-configuration offline=XX last-backup-name=XX
offline: Offline migration
last-backup-name: Last backup name for offline migration. This is optional for migrations from file share. \
If it is not provided, then the service will determine the last backup file name based on latest backup files present \
in file share.
- name: --target-location
short-summary: "Target location for copying backups."
long-summary: |
Usage: --target-location storage-account-resource-id=XX account-key=XX
storage-account-resource-id: Resource Id of the storage account copying backups.
account-key: Storage Account Key.
examples:
- name: Create or Update Database Migration resource with Maximum parameters.
text: |-
az datamigration sql-managed-instance create --managed-instance-name "managedInstance1" \
--source-location "{\\"fileShare\\":{\\"path\\":\\"C:\\\\\\\\aaa\\\\\\\\bbb\\\\\\\\ccc\\",\\"password\\":\\"placeholder\
\\",\\"username\\":\\"name\\"}}" --target-location account-key="abcd" storage-account-resource-id="account.database.win\
dows.net" --migration-service "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg/providers/Micr\
osoft.DataMigration/sqlMigrationServices/testagent" --offline-configuration last-backup-name="last_backup_file_name" \
offline=true --scope "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg/providers/Microsoft.Sql\
/managedInstances/instance" --source-database-name "aaa" --source-sql-connection authentication="WindowsAuthentication"\
data-source="aaa" encrypt-connection=true password="<PASSWORD>" trust-server-certificate=true user-name="bbb" \
--resource-group "testrg" --target-db-name "db1"
- name: Create or Update Database Migration resource with Minimum parameters.
text: |-
az datamigration sql-managed-instance create --managed-instance-name "managedInstance1" \
--source-location "{\\"fileShare\\":{\\"path\\":\\"C:\\\\\\\\aaa\\\\\\\\bbb\\\\\\\\ccc\\",\\"password\\":\\"placeholder\
\\",\\"username\\":\\"name\\"}}" --target-location account-key="abcd" storage-account-resource-id="account.database.win\
dows.net" --migration-service "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg/providers/Micr\
osoft.DataMigration/sqlMigrationServices/testagent" --offline-configuration last-backup-name="last_backup_file_name" \
offline=true --scope "/subscriptions/00000000-1111-2222-3333-444444444444/resourceGroups/testrg/providers/Microsoft.Sql\
/managedInstances/instance" --source-database-name "aaa" --source-sql-connection authentication="WindowsAuthentication"\
data-source="aaa" encrypt-connection=true password="<PASSWORD>" trust-server-certificate=true user-name="bbb" \
--resource-group "testrg" --target-db-name "db1"
"""
helps['datamigration sql-managed-instance cancel'] = """
type: command
short-summary: "Stop in-progress database migration to SQL Managed Instance."
examples:
- name: Stop ongoing migration for the database.
text: |-
az datamigration sql-managed-instance cancel --managed-instance-name "managedInstance1" \
--migration-operation-id "4124fe90-d1b6-4b50-b4d9-46d02381f59a" --resource-group "testrg" --target-db-name "db1"
"""
helps['datamigration sql-managed-instance cutover'] = """
type: command
short-summary: "Initiate cutover for in-progress online database migration to SQL Managed Instance."
examples:
- name: Cutover online migration operation for the database.
text: |-
az datamigration sql-managed-instance cutover --managed-instance-name "managedInstance1" \
--migration-operation-id "4124fe90-d1b6-4b50-b4d9-46d02381f59a" --resource-group "testrg" --target-db-name "db1"
"""
helps['datamigration sql-managed-instance wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the datamigration sql-managed-instance is \
met.
examples:
- name: Pause executing next line of CLI script until the datamigration sql-managed-instance is successfully \
created.
text: |-
az datamigration sql-managed-instance wait --managed-instance-name "managedInstance1" --resource-group \
"testrg" --target-db-name "db1" --created
"""
helps['datamigration sql-vm'] = """
type: group
short-summary: Manage database migrations to SQL VM.
"""
helps['datamigration sql-vm show'] = """
type: command
short-summary: "Retrieve the specified database migration for a given SQL VM."
examples:
- name: Get Database Migration resource.
text: |-
az datamigration sql-vm show --resource-group "testrg" --sql-vm-name "testvm" --target-db-name "db1"
"""
helps['datamigration sql-vm create'] = """
type: command
short-summary: "Create a new database migration to a given SQL VM."
parameters:
- name: --source-sql-connection
short-summary: "Source SQL Server connection details."
long-summary: |
Usage: --source-sql-connection data-source=XX authentication=XX user-name=XX password=XX \
encrypt-connection=XX trust-server-certificate=XX
data-source: Data source.
authentication: Authentication type.
user-name: User name to connect to source SQL.
password: Password to connect to source SQL.
encrypt-connection: Whether to encrypt connection or not.
trust-server-certificate: Whether to trust server certificate or not.
- name: --offline-configuration
short-summary: "Offline configuration."
long-summary: |
Usage: --offline-configuration offline=XX last-backup-name=XX
offline: Offline migration
last-backup-name: Last backup name for offline migration. This is optional for migrations from file share. \
If it is not provided, then the service will determine the last backup file name based on latest backup files present \
in file share.
- name: --target-location
short-summary: "Target location for copying backups."
long-summary: |
Usage: --target-location storage-account-resource-id=XX account-key=XX
storage-account-resource-id: Resource Id of the storage account copying backups.
account-key: Storage Account Key.
examples:
- name: Create or Update Database Migration resource with Maximum parameters.
text: |-
az datamigration sql-vm create --source-location "{\\"fileShare\\":{\\"path\\":\\"C:\\\\\\\\aaa\\\\\\\\b\
bb\\\\\\\\ccc\\",\\"password\\":\\"<PASSWORD>\\",\\"username\\":\\"name\\"}}" --target-location account-key="abcd" \
storage-account-resource-id="account.database.windows.net" --migration-service "/subscriptions/00000000-1111-2222-3333-\
444444444444/resourceGroups/testrg/providers/Microsoft.DataMigration/sqlMigrationServices/testagent" \
--offline-configuration last-backup-name="last_backup_file_name" offline=true --scope "/subscriptions/00000000-1111-222\
2-3333-444444444444/resourceGroups/testrg/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/testvm" \
--source-database-name "aaa" --source-sql-connection authentication="WindowsAuthentication" data-source="aaa" \
encrypt-connection=true password="<PASSWORD>" trust-server-certificate=true user-name="bbb" --resource-group "testrg" \
--sql-vm-name "testvm" --target-db-name "db1"
- name: Create or Update Database Migration resource with Minimum parameters.
text: |-
az datamigration sql-vm create --source-location "{\\"fileShare\\":{\\"path\\":\\"C:\\\\\\\\aaa\\\\\\\\b\
bb\\\\\\\\ccc\\",\\"password\\":\\"<PASSWORD>\\",\\"username\\":\\"name\\"}}" --target-location account-key="abcd" \
storage-account-resource-id="account.database.windows.net" --migration-service "/subscriptions/00000000-1111-2222-3333-\
444444444444/resourceGroups/testrg/providers/Microsoft.DataMigration/sqlMigrationServices/testagent" \
--offline-configuration last-backup-name="last_backup_file_name" offline=true --scope "/subscriptions/00000000-1111-222\
2-3333-444444444444/resourceGroups/testrg/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/testvm" \
--source-database-name "aaa" --source-sql-connection authentication="WindowsAuthentication" data-source="aaa" \
encrypt-connection=true password="<PASSWORD>" trust-server-certificate=true user-name="bbb" --resource-group "testrg" \
--sql-vm-name "testvm" --target-db-name "db1"
"""
helps['datamigration sql-vm cancel'] = """
type: command
short-summary: "Stop in-progress database migration to SQL VM."
examples:
- name: Stop ongoing migration for the database.
text: |-
az datamigration sql-vm cancel --migration-operation-id "4124fe90-d1b6-4b50-b4d9-46d02381f59a" \
--resource-group "testrg" --sql-vm-name "testvm" --target-db-name "db1"
"""
helps['datamigration sql-vm cutover'] = """
type: command
short-summary: "Initiate cutover for in-progress online database migration to SQL VM."
examples:
- name: Cutover online migration operation for the database.
text: |-
az datamigration sql-vm cutover --migration-operation-id "4124fe90-d1b6-4b50-b4d9-46d02381f59a" \
--resource-group "testrg" --sql-vm-name "testvm" --target-db-name "db1"
"""
helps['datamigration sql-vm wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of the datamigration sql-vm is met.
examples:
- name: Pause executing next line of CLI script until the datamigration sql-vm is successfully created.
text: |-
az datamigration sql-vm wait --resource-group "testrg" --sql-vm-name "testvm" --target-db-name "db1" \
--created
"""
helps['datamigration sql-service'] = """
type: group
short-summary: Manage Database Migration Service.
"""
helps['datamigration sql-service list'] = """
type: command
short-summary: "Retrieve all Database Migration Services in the resource group. And Retrieve all Database \
Migration Services in the subscription."
examples:
- name: Get Migration Services in the Resource Group.
text: |-
az datamigration sql-service list --resource-group "testrg"
- name: Get Services in the Subscriptions.
text: |-
az datamigration sql-service list
"""
helps['datamigration sql-service show'] = """
type: command
short-summary: "Retrieve the Database Migration Service."
examples:
- name: Get Migration Service.
text: |-
az datamigration sql-service show --resource-group "testrg" --name "service1"
"""
helps['datamigration sql-service create'] = """
type: command
short-summary: "Create Database Migration Service."
examples:
- name: Create or Update SQL Migration Service with maximum parameters.
text: |-
az datamigration sql-service create --location "northeurope" --resource-group "testrg" --name \
"testagent"
- name: Create or Update SQL Migration Service with minimum parameters.
text: |-
az datamigration sql-service create --location "northeurope" --resource-group "testrg" --name \
"testagent"
"""
helps['datamigration sql-service update'] = """
type: command
short-summary: "Update Database Migration Service."
examples:
- name: Update SQL Migration Service.
text: |-
az datamigration sql-service update --tags mytag="myval" --resource-group "testrg" --name "testagent"
"""
helps['datamigration sql-service delete'] = """
type: command
short-summary: "Delete Database Migration Service."
examples:
- name: Delete SQL Migration Service.
text: |-
az datamigration sql-service delete --resource-group "testrg" --name "service1"
"""
helps['datamigration sql-service delete-node'] = """
type: command
short-summary: "Delete the integration runtime node."
examples:
- name: Delete the integration runtime node.
text: |-
az datamigration sql-service delete-node --ir-name "IRName" --node-name "nodeName" --resource-group \
"testrg" --name "service1"
"""
helps['datamigration sql-service list-auth-key'] = """
type: command
short-summary: "Retrieve the List of Authentication Keys for Self Hosted Integration Runtime."
examples:
- name: Retrieve the List of Authentication Keys.
text: |-
az datamigration sql-service list-auth-key --resource-group "testrg" --name "service1"
"""
helps['datamigration sql-service list-integration-runtime-metric'] = """
type: command
short-summary: "Retrieve the registered Integration Runtine nodes and their monitoring data for a given Database \
Migration Service."
examples:
- name: Retrieve the Monitoring Data.
text: |-
az datamigration sql-service list-integration-runtime-metric --resource-group "testrg" --name \
"service1"
"""
helps['datamigration sql-service list-migration'] = """
type: command
short-summary: "Retrieve the List of database migrations attached to the service."
examples:
- name: List database migrations attached to the service.
text: |-
az datamigration sql-service list-migration --resource-group "testrg" --name "service1"
"""
helps['datamigration sql-service regenerate-auth-key'] = """
type: command
short-summary: "Regenerate a new set of | |
is None:
umsg ('Segmentation map not opened')
return
if len(smod.regions) <= 1:
umsg ('%s has %d regions' % (smod.name, len(smod.regions)))
return
csyms = None
if self.useSymmetry.get() :
print "Using symmetry..."
self.DetectSym ()
csyms = [self.scenters, self.syms]
regions = None
if 0 and self.groupByConsOnlyVis.get() :
regions = smod.visible_regions()
if len(regions) == 0 :
umsg ("Grouping by connections: no visible regions found or they are from a different model" )
return
umsg ("Grouping by connections: applying only to %d regions visible" % len(regions) )
if 1 :
from chimera import tasks, CancelOperation
task = tasks.Task('Group by connections', modal = True)
try :
newRegs, removedRegs = smod.group_connected_n ( 1, 1, regions, csyms, task )
#self.RegsDispUpdate ( task ) # Display region surfaces
except CancelOperation :
umsg('Cancelled group by connections')
return
finally:
task.finished()
else :
newRegs, removedRegs = smod.group_connected_n ( 1, 1, regions, csyms, task )
for r in newRegs : r.make_surface (None, None, smod.regions_scale)
print " - removig %d surfs" % len(removedRegs)
for r in removedRegs : r.remove_surface()
self.ReportRegionCount(smod)
if smod.adj_graph :
graph.create_graph ( smod, smod.graph_links )
umsg ( "Got %d regions after grouping by connections" % (len(smod.regions)) )
def SmoothAndGroupOneStep ( self ) :
smod = self.CurrentSegmentation()
if smod is None:
return
if smod.volume_data() is None:
umsg ('Segmentation map not opened')
return
if len(smod.regions) <= 1:
umsg ('%s has %d regions' % (smod.name, len(smod.regions)))
return
try :
step = float ( self.stepSize.get() )
except :
umsg ( "Enter <float> for step size" )
return
sdev = step + smod.smoothing_level
csyms = None
if self.useSymmetry.get() :
print "Using symmetry..."
self.DetectSym ()
csyms = [self.scenters, self.syms]
umsg ( "Smoothing and grouping, standard deviation %.3g voxels" % sdev)
from chimera import tasks, CancelOperation
task = tasks.Task('Smooth and group', modal = True)
try:
for i in range ( 10 ) :
new_regs = len(smod.smooth_and_group(1, sdev, 1, csyms, task))
# if symmetry is being used we should stop after one step
# since symmetry can block regions from joining indefinitely
if new_regs > 0 : break
umsg ('No new groups smoothing %.3g voxels' % sdev)
sdev += step
self.RegsDispUpdate ( task ) # Display region surfaces
except CancelOperation:
umsg('Cancelled smooth and group')
return
finally:
task.finished()
self.ReportRegionCount(smod)
if smod.adj_graph :
graph.create_graph ( smod, smod.graph_links )
umsg ( "Got %d regions after smoothing %.3g voxels." %
(len(smod.regions), sdev) )
def Overlapping ( self ) :
dmap = self.SegmentationMap()
if dmap == None :
umsg ( "No map selected" )
return
smod = self.CurrentSegmentation()
if smod == None :
umsg ( "No segmentation selected" )
return
if len(smod.regions) == 0 :
umsg ( "No regions found in %s" % smod.name )
return
selatoms = chimera.selection.currentAtoms()
spoints = None
if len ( selatoms ) > 0 :
spoints = _multiscale.get_atom_coordinates ( selatoms, transformed = True )
else :
mods = chimera.selection._currentSelection.models()
if len(mods) == 1 :
mod = mods[0]
print "Using for selection:", mod.name
import axes
spoints, weights = axes.map_points ( mod, True )
print " - map - got %d points in contour" % len (spoints)
from _contour import affine_transform_vertices as transform_vertices
transform_vertices( spoints, Matrix.xform_matrix( mod.openState.xform ) )
else :
umsg ("0 or more than 1 model selected")
return
simap = self.PointIndexesInMap ( spoints, dmap )
umsg ( "Overlapping %d atoms with %d regions" % (
len(selatoms), len(smod.regions) ) )
ovRatio = float ( self.overlappingPercentage.get() ) / 100.0
print " - overlap ratio: %f" % ovRatio
oregs = []
for ri, r in enumerate ( smod.regions ) :
ipoints = r.points()
noverlap = 0
for i,j,k in ipoints :
try : simap[i][j][k]
except: continue
noverlap += 1
ov = float ( noverlap ) / float ( len(ipoints) )
if ov > ovRatio : oregs.append ( r )
#if noverlap > 0 : oregs.append ( r )
regions.select_regions ( oregs )
umsg ( "Selected %d regions" % ( len(oregs) ) )
def GroupUsingFits ( self ) :
dmap = self.SegmentationMap()
if dmap == None : print "Map %s not open" % self.dmap.get(); return
smod = self.CurrentSegmentation()
if smod == None : return
if len(smod.regions) == 0 : print "No regions in", smod.name; return
try : dmap.fitted_mols
except : dmap.fitted_mols = []
if len(dmap.fitted_mols) == 0 : print "No fits found for", dmap.name; return
print "Grouping %d regions by overlap to %d fitted structures" % (
len(smod.regions), len(dmap.fitted_mols) )
dmap.chain_maps = []
for mol in dmap.fitted_mols :
try : mol.fmap.imap
except : mol.fmap.imap = self.MapIndexesInMap ( dmap, mol.fmap )
from random import random as rand
mol.fmap.surf_color = ( rand(), rand(), rand(), 1 )
dmap.chain_maps.append ( mol.fmap )
# self.SegAccuracy ( "_fits_acc", True )
def RegSurfsShowNone ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
for reg in smod.regions :
if reg.surface_piece:
reg.surface_piece.display = False
def RegSurfsShowAll ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
from chimera import tasks, CancelOperation
task = tasks.Task('Showing all regions', modal = True)
try:
self.RegsDispUpdate(task)
except CancelOperation:
pass
finally:
task.finished()
def RegSurfsShowOnlySelected ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
regions.show_only_regions(smod.selected_regions())
def RegSurfsHide ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
#if len(sregs) == 0 : sregs = smod.all_regions()
for r in sregs : r.hide_surface()
def RegSurfsShow ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
#if len(sregs) == 0 : sregs = smod.all_regions()
for r in sregs : r.show_surface()
def RegSurfsShowAdjacent ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
if len(sregs) == 0 :
return
cr = set()
for r in sregs :
cr.update(r.contacting_regions())
umsg ( "Region has %d adjacent regions" % len(cr) )
for r in cr :
r.show_surface()
def RegSurfsShowNotGrouped ( self ) :
print "Showing not-grouped regions..."
smod = self.CurrentSegmentation()
if smod == None : return
for reg in smod.regions :
if len(reg.cregs) == 0 :
if reg.surface_piece:
reg.surface_piece.display = True
else :
if reg.surface_piece:
reg.surface_piece.display = False
def SelectGrouped ( self ) :
print "Selecting grouped regions..."
smod = self.CurrentSegmentation()
if smod == None : return
surfs = []
for reg in smod.regions :
if len(reg.cregs) > 0 :
if reg.surface_piece:
surfs.append ( reg.surface_piece )
chimera.selection.clearCurrent ()
chimera.selection.addCurrent ( surfs )
def SelectVisible ( self ) :
print "Selecting visible regions..."
smod = self.CurrentSegmentation()
if smod == None : return
surfs = []
for reg in smod.regions :
if reg.surface_piece and reg.surface_piece.display:
surfs.append ( reg.surface_piece )
chimera.selection.clearCurrent ()
chimera.selection.addCurrent ( surfs )
def SelectNotGrouped ( self ) :
print "Showing not-grouped regions..."
smod = self.CurrentSegmentation()
if smod == None : return
surfs = []
for reg in smod.regions :
if len(reg.cregs) == 0 :
if reg.surface_piece:
surfs.append ( reg.surface_piece )
chimera.selection.clearCurrent ()
chimera.selection.addCurrent ( surfs )
def RegSurfsShowGrouped ( self ) :
print "Showing grouped regions..."
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.grouped_regions()
if len(sregs) == 0 :
umsg ( "No grouped regions" )
return
umsg ( "Showing %d grouped regions" % len(sregs) )
regions.show_only_regions(sregs)
def RegSurfsTransparent ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
if len(sregs) == 0 : sregs = smod.all_regions()
for r in sregs :
if r.has_surface():
cr,cg,cb = r.surface_piece.color[:3] #r.color[:3]
r.surface_piece.color = ( cr, cg, cb, REG_OPACITY )
r.surface_piece.displayStyle = r.surface_piece.Solid
def RegSurfsOpaque ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
if len(sregs) == 0 : sregs = smod.all_regions()
for r in sregs :
if r.has_surface():
cr,cg,cb = r.surface_piece.color[:3] #r.color[:3]
r.surface_piece.color = ( cr, cg, cb, 1.0 )
r.surface_piece.displayStyle = r.surface_piece.Solid
def RegSurfsMesh ( self ) :
smod = self.CurrentSegmentation()
if smod == None : return
sregs = smod.selected_regions()
if len(sregs) == 0 : sregs = smod.all_regions()
for r in sregs :
| |
from collections import OrderedDict
import ujson
from django.conf import settings
from utils import solr
from utils.camel_case import render, parser
from .uris import APIUris
from .simpleserializers import SimpleSerializer, SimpleSerializerWithLookups
import logging
# set up logger, for debugging
logger = logging.getLogger('sierra.custom')
class APIUserSerializer(SimpleSerializer):
fields = OrderedDict()
fields['username'] = {'type': 'str'}
fields['first_name'] = {'type': 'str'}
fields['last_name'] = {'type': 'str'}
fields['email'] = {'type': 'str'}
fields['permissions'] = {'type': 'compound'}
def render_field_name(self, field_name):
ret_val = field_name
if field_name[0] != '_':
ret_val = render.underscoreToCamel(field_name)
return ret_val
def process_permissions(self, value, obj):
permissions = ujson.decode(obj.apiuser.permissions)
new_permissions = {}
for key, val in permissions.iteritems():
new_permissions[self.render_field_name(key)] = val
return new_permissions
class ItemSerializer(SimpleSerializerWithLookups):
fields = OrderedDict()
fields['_links'] = {'type': 'compound'}
fields['id'] = {'type': 'int'}
fields['parent_bib_record_number'] = {'type': 'int'}
fields['parent_bib_title'] = {'type': 'str'}
fields['parent_bib_main_author'] = {'type': 'str'}
fields['parent_bib_publication_year'] = {'type': 'str'}
fields['record_number'] = {'type': 'str'}
fields['call_number'] = {'type': 'str'}
fields['call_number_type'] = {'type': 'str'}
fields['call_number_sort'] = {'type': 'str'}
fields['call_number_search'] = {'type': 'str'}
fields['volume'] = {'type': 'str'}
fields['volume_sort'] = {'type': 'str'}
fields['copy_number'] = {'type': 'int'}
fields['barcode'] = {'type': 'str'}
fields['long_messages'] = {'type': 'str'}
fields['internal_notes'] = {'type': 'str'}
fields['public_notes'] = {'type': 'str'}
fields['local_code1'] = {'type': 'int'}
fields['number_of_renewals'] = {'type': 'int'}
fields['item_type_code'] = {'type': 'str'}
fields['item_type'] = {'type': 'str'}
fields['price'] = {'type': 'str'}
fields['internal_use_count'] = {'type': 'int'}
fields['copy_use_count'] = {'type': 'int'}
fields['iuse3_count'] = {'type': 'int'}
fields['total_checkout_count'] = {'type': 'int'}
fields['total_renewal_count'] = {'type': 'int'}
fields['year_to_date_checkout_count'] = {'type': 'int'}
fields['last_year_to_date_checkout_count'] = {'type': 'int'}
fields['location_code'] = {'type': 'str'}
fields['location'] = {'type': 'str'}
fields['status_code'] = {'type': 'str'}
fields['status'] = {'type': 'str'}
fields['due_date'] = {'type': 'datetime'}
fields['checkout_date'] = {'type': 'datetime'}
fields['last_checkin_date'] = {'type': 'datetime'}
fields['overdue_date'] = {'type': 'datetime'}
fields['recall_date'] = {'type': 'datetime'}
fields['record_creation_date'] = {'type': 'datetime'}
fields['record_last_updated_date'] = {'type': 'datetime'}
fields['record_revision_number'] = {'type': 'datetime'}
fields['suppressed'] = {'type': 'bool'}
def render_field_name(self, field_name):
ret_val = field_name
if field_name[0] != '_':
ret_val = render.underscoreToCamel(field_name)
return ret_val
def restore_field_name(self, field_name):
return parser.camel_to_underscore(field_name)
def cache_all_lookups(self):
types = ['Location', 'ItemStatus', 'Itype']
qs = solr.Queryset(page_by=1000).filter(type__in=types)
qs = qs.only('type', 'code', 'label')
results = [i for i in qs]
lookups = {'Location': {}, 'ItemStatus': {}, 'Itype': {}}
for r in results:
try:
lookups[r['type']][r['code']] = r['label']
except KeyError:
try:
lookups[r['type']][r['code']] = None
except KeyError:
pass
self.cache_lookup('location', lookups['Location'])
self.cache_lookup('status', lookups['ItemStatus'])
self.cache_lookup('item_type', lookups['Itype'])
def process_location(self, value, obj):
'''
Returns a location's label based on the obj's location_code.
'''
return self.get_lookup_value('location', getattr(obj, 'location_code',
None))
def process_status(self, value, obj):
'''
Returns a status label based on the status_code.
'''
value = getattr(obj, 'status_code', None)
if value == '-' and getattr(obj, 'due_date', None) is not None:
return 'CHECKED OUT'
else:
return self.get_lookup_value('status', value)
def process_item_type(self, value, obj):
'''
Returns item_type label based on item_type_code.
'''
return self.get_lookup_value('item_type', getattr(obj,
'item_type_code',
None))
def process__links(self, value, obj):
'''
Generates links for each item. Doesn't use reverse URL lookups
because those get really slow when you have lots of objects.
I implemented my own reverse URL lookup (sort of) in api.urls,
which is much faster.
'''
req = self.context.get('request', None)
view = self.context.get('view', None)
obj_id = getattr(obj, 'id', None)
p_bib_id = getattr(obj, 'parent_bib_id', None)
l_code = getattr(obj, 'location_code', None)
itype_code = getattr(obj, 'item_type_code', None)
istatus_code = getattr(obj, 'status_code', None)
ret = OrderedDict()
if req is not None and view is not None:
ret['self'] = {
'href': APIUris.get_uri('items-detail', req=req, absolute=True,
v=view.api_version,
id=obj_id)
}
ret['parentBib'] = {
'href': APIUris.get_uri('bibs-detail', req=req, absolute=True,
v=view.api_version,
id=p_bib_id)
}
ret['location'] = {
'href': APIUris.get_uri('locations-detail', req=req,
absolute=True, v=view.api_version,
code=l_code)
}
ret['itemtype'] = {
'href': APIUris.get_uri('itemtypes-detail', req=req,
absolute=True, v=view.api_version,
code=itype_code)
}
ret['itemstatus'] = {
'href': APIUris.get_uri('itemstatuses-detail', req=req,
absolute=True, v=view.api_version,
code=istatus_code)
}
return ret
class BibSerializer(SimpleSerializer):
fields = OrderedDict()
fields['_links'] = {'type': 'compound'}
fields['id'] = {'type': 'int'}
fields['record_number'] = {'type': 'str'}
fields['timestamp'] = {'type': 'datetime'}
fields['suppressed'] = {'type': 'bool'}
fields['language'] = {'type': 'str'}
fields['format'] = {'type': 'str'}
fields['material_type'] = {'type': 'str'}
fields['main_call_number'] = {'type': 'str'}
fields['main_call_number_sort'] = {'type': 'str'}
fields['loc_call_numbers'] = {'type': 'str'}
fields['dewey_call_numbers'] = {'type': 'str'}
fields['other_call_numbers'] = {'type': 'str'}
fields['sudoc_numbers'] = {'type': 'str'}
fields['isbn_numbers'] = {'type': 'str'}
fields['issn_numbers'] = {'type': 'str'}
fields['lccn_numbers'] = {'type': 'str'}
fields['oclc_numbers'] = {'type': 'str'}
fields['full_title'] = {'type': 'str'}
fields['main_title'] = {'type': 'str'}
fields['subtitle'] = {'type': 'str'}
fields['statement_of_responsibility'] = {'type': 'str'}
fields['uniform_title'] = {'type': 'str'}
fields['alternate_titles'] = {'type': 'str'}
fields['related_titles'] = {'type': 'str'}
fields['series'] = {'type': 'str'}
fields['series_exact'] = {'type': 'str'}
fields['creator'] = {'type': 'str'}
fields['contributors'] = {'type': 'str'}
fields['series_creators'] = {'type': 'str'}
fields['people'] = {'type': 'str'}
fields['corporations'] = {'type': 'str'}
fields['meetings'] = {'type': 'str'}
fields['imprints'] = {'type': 'str'}
fields['publication_country'] = {'type': 'str'}
fields['publication_places'] = {'type': 'str'}
fields['publishers'] = {'type': 'str'}
fields['publication_dates'] = {'type': 'str'}
fields['full_subjects'] = {'type': 'str'}
fields['general_terms'] = {'type': 'str'}
fields['topic_terms'] = {'type': 'str'}
fields['genre_terms'] = {'type': 'str'}
fields['geographic_terms'] = {'type': 'str'}
fields['era_terms'] = {'type': 'str'}
fields['form_terms'] = {'type': 'str'}
fields['other_terms'] = {'type': 'str'}
fields['physical_characteristics'] = {'type': 'str'}
fields['toc_notes'] = {'type': 'str'}
fields['context_notes'] = {'type': 'str'}
fields['summary_notes'] = {'type': 'str'}
fields['urls'] = {'type': 'str'}
fields['url_labels'] = {'type': 'str'}
fields['people_facet'] = {'type': 'str'}
fields['corporations_facet'] = {'type': 'str'}
fields['meetings_facet'] = {'type': 'str'}
fields['topic_terms_facet'] = {'type': 'str'}
fields['general_terms_facet'] = {'type': 'str'}
fields['genre_terms_facet'] = {'type': 'str'}
fields['geographic_terms_facet'] = {'type': 'str'}
fields['era_terms_facet'] = {'type': 'str'}
fields['form_terms_facet'] = {'type': 'str'}
def render_field_name(self, field_name):
ret_val = field_name
if field_name[0] != '_':
ret_val = render.underscoreToCamel(field_name)
return ret_val
def restore_field_name(self, field_name):
return parser.camel_to_underscore(field_name)
def process__links(self, value, obj):
req = self.context.get('request', None)
view = self.context.get('view', None)
obj_id = getattr(obj, 'id', None)
item_ids = getattr(obj, 'item_ids', None)
ret = OrderedDict()
if req is not None and view is not None:
ret['self'] = {
'href': APIUris.get_uri('bibs-detail', req=req, absolute=True,
v=view.api_version, id=obj_id)
}
ret['marc'] = {
'href': APIUris.get_uri('marc-detail', req=req, absolute=True,
v=view.api_version, id=obj_id)
}
if item_ids is not None:
items = []
for item_id in item_ids:
items.append({
'href': APIUris.get_uri('items-detail', req=req,
absolute=True, v=view.api_version,
id=item_id)
})
ret['items'] = items
return ret
class MarcSerializer(SimpleSerializer):
fields = OrderedDict()
fields['_links'] = {'type': 'compound'}
fields['id'] = {'type': 'int'}
fields['record_number'] = {'type': 'str'}
fields['timestamp'] = {'type': 'datetime'}
fields['record'] = {'type': 'compound'}
def render_field_name(self, field_name):
ret_val = field_name
if field_name[0] != '_':
ret_val = render.underscoreToCamel(field_name)
return ret_val
def restore_field_name(self, field_name):
return parser.camel_to_underscore(field_name)
def process_record(self, value, obj):
return ujson.loads(obj.json)
def process__links(self, value, obj):
req = self.context.get('request', None)
view = self.context.get('view', None)
obj_id = getattr(obj, 'id', None)
ret = OrderedDict()
if req is not None and view is not None:
ret['self'] = {
'href': APIUris.get_uri('marc-detail', req=req, absolute=True,
v=view.api_version, id=obj_id)
}
ret['bib'] = {
'href': APIUris.get_uri('bibs-detail', req=req, absolute=True,
v=view.api_version, id=obj_id)
}
return ret
class EResourceSerializer(SimpleSerializerWithLookups):
fields = OrderedDict()
fields['_links'] = {'type': 'compound'}
fields['id'] = {'type': 'int'}
fields['record_number'] = {'type': 'str'}
fields['title'] = {'type': 'str'}
fields['alternate_titles'] = {'type': 'str'}
fields['eresource_type'] = {'type': 'str'}
fields['publisher'] = {'type': 'str'}
fields['subjects'] = {'type': 'str'}
fields['summary'] = {'type': 'str'}
fields['internal_notes'] = {'type': 'str'}
fields['public_notes'] = {'type': 'str'}
fields['alert'] = {'type': 'str'}
fields['holdings'] = {'type': 'str'}
fields['external_url'] = {'type': 'str'}
fields['record_creation_date'] = {'type': 'datetime'}
fields['record_last_updated_date'] = {'type': 'datetime'}
fields['record_revision_number'] = {'type': 'datetime'}
fields['suppressed'] = {'type': 'bool'}
def render_field_name(self, field_name):
ret_val = field_name
if field_name[0] != '_':
ret_val = render.underscoreToCamel(field_name)
return ret_val
def restore_field_name(self, field_name):
return parser.camel_to_underscore(field_name)
def process__links(self, value, obj):
req = self.context.get('request', None)
view = self.context.get('view', None)
obj_id = getattr(obj, 'id', None)
ret = OrderedDict()
if req is not None and view is not None:
ret['self'] = {
'href': APIUris.get_uri('eresources-detail', req=req, absolute=True,
v=view.api_version, id=obj_id)
}
return ret
class LocationSerializer(SimpleSerializer):
foreign_key_field = 'location_code'
fields = OrderedDict()
fields['_links'] = {'type': 'compound'}
fields['code'] = {'type': 'str'}
fields['label'] = {'type': 'str'}
def render_field_name(self, field_name):
ret_val = field_name
if field_name[0] != '_':
ret_val = render.underscoreToCamel(field_name)
return ret_val
def restore_field_name(self, field_name):
return parser.camel_to_underscore(field_name)
def process__links(self, value, obj):
req = self.context.get('request', None)
view = self.context.get('view', None)
code = getattr(obj, 'code', None)
fk = self.render_field_name(self.foreign_key_field)
ret = OrderedDict()
if req is not None and view is not None:
ret['self'] = {
'href': APIUris.get_uri('locations-detail', req=req,
absolute=True, v=view.api_version,
code=code)
}
ret['items'] = {
'href': '{}?{}={}'.format(
APIUris.get_uri('items-list', req=req, absolute=True,
v=view.api_version), fk, code
)
}
return ret
class ItemTypeSerializer(SimpleSerializer):
foreign_key_field = 'item_type_code'
fields = OrderedDict()
fields['_links'] = {'type': 'compound'}
fields['code'] = {'type': 'str'}
fields['label'] = {'type': 'str'}
def render_field_name(self, field_name):
ret_val = field_name
if field_name[0] != '_':
ret_val = render.underscoreToCamel(field_name)
return ret_val
def restore_field_name(self, field_name):
return parser.camel_to_underscore(field_name)
def process__links(self, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2012-2020 iSolver Software Solutions (C) 2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from __future__ import division, absolute_import, print_function
import os
import atexit
import numpy as np
from builtins import str
from builtins import object
from pkg_resources import parse_version
from ..server import DeviceEvent
from ..constants import EventConstants
from ..errors import ioHubError, printExceptionDetailsToStdErr, print2err
import tables
from tables import parameters, StringCol, UInt32Col, UInt16Col, NoSuchNodeError
if parse_version(tables.__version__) < parse_version('3'):
from tables import openFile as open_file
create_table = "createTable"
create_group = "createGroup"
f_get_child = "_f_getChild"
else:
from tables import open_file
create_table = "create_table"
create_group = "create_group"
_f_get_child = "_f_get_child"
parameters.MAX_NUMEXPR_THREADS = None
"""The maximum number of threads that PyTables should use internally in
Numexpr. If `None`, it is automatically set to the number of cores in
your machine. In general, it is a good idea to set this to the number of
cores in your machine or, when your machine has many of them (e.g. > 4),
perhaps one less than this. < <NAME> Note: These are 'not' GIL bound
threads and therefore actually improve performance > """
parameters.MAX_BLOSC_THREADS = None
"""The maximum number of threads that PyTables should use internally in
Blosc. If `None`, it is automatically set to the number of cores in
your machine. In general, it is a good idea to set this to the number of
cores in your machine or, when your machine has many of them (e.g. > 4),
perhaps one less than this. < <NAME> Note: These are 'not' GIL bound
threads and therefore actually improve performance > """
DATA_FILE_TITLE = "ioHub DataStore - Experiment Data File."
FILE_VERSION = '0.9.1.1'
SCHEMA_AUTHORS = '<NAME>'
SCHEMA_MODIFIED_DATE = 'March 19th, 2021'
class DataStoreFile(object):
def __init__(self, fileName, folderPath, fmode='a', iohub_settings=None):
self.fileName = fileName
self.folderPath = folderPath
self.filePath = os.path.join(folderPath, fileName)
if iohub_settings.get('multiple_sessions', False) is False:
fmode = 'w'
self.settings = iohub_settings
self.active_experiment_id = None
self.active_session_id = None
self.flushCounter = self.settings.get('flush_interval', 32)
self._eventCounter = 0
self.TABLES = dict()
self._eventGroupMappings = dict()
self.emrtFile = open_file(self.filePath, mode=fmode)
atexit.register(close_open_data_files, False)
if len(self.emrtFile.title) == 0:
self.buildOutTemplate()
self.flush()
else:
self.loadTableMappings()
def loadTableMappings(self):
# create meta-data tables
self.TABLES['EXPERIMENT_METADETA']=self.emrtFile.root.data_collection.experiment_meta_data
self.TABLES['SESSION_METADETA']=self.emrtFile.root.data_collection.session_meta_data
self.TABLES['CLASS_TABLE_MAPPINGS']=self.emrtFile.root.class_table_mapping
def buildOutTemplate(self):
self.emrtFile.title = DATA_FILE_TITLE
self.emrtFile.FILE_VERSION = FILE_VERSION
self.emrtFile.SCHEMA_DESIGNER = SCHEMA_AUTHORS
self.emrtFile.SCHEMA_MODIFIED = SCHEMA_MODIFIED_DATE
#CREATE GROUPS
self.TABLES['CLASS_TABLE_MAPPINGS'] = getattr(self.emrtFile, create_table)(
self.emrtFile.root,
'class_table_mapping',
ClassTableMappings,
title='ioHub DeviceEvent Class to DataStore Table Mappings.')
getattr(self.emrtFile, create_group)(
self.emrtFile.root,
'data_collection',
title='Data Collected using the ioHub Event Framework.'
)
self.flush()
getattr(self.emrtFile, create_group)(
self.emrtFile.root.data_collection,
'events',
title='Data Collected using the ioHub Event Framework.'
)
getattr(self.emrtFile, create_group)(
self.emrtFile.root.data_collection,
'condition_variables',
title="Experiment Session DV and IV's Values."
)
self.flush()
self.TABLES['EXPERIMENT_METADETA'] = getattr(self.emrtFile, create_table)(
self.emrtFile.root.data_collection,
'experiment_meta_data',
ExperimentMetaData,
title='Information About Experiments Saved to This ioHub DataStore File.'
)
self.TABLES['SESSION_METADETA'] = getattr(self.emrtFile, create_table)(
self.emrtFile.root.data_collection,
'session_meta_data',
SessionMetaData,
title='Information About Sessions Saved to This ioHub DataStore File.'
)
self.flush()
getattr(self.emrtFile, create_group)(self.emrtFile.root.data_collection.events, 'experiment', title='Experiment Device Events.')
getattr(self.emrtFile, create_group)(self.emrtFile.root.data_collection.events, 'keyboard', title='Keyboard Device Events.')
getattr(self.emrtFile, create_group)(self.emrtFile.root.data_collection.events, 'mouse', title='Mouse Device Events.')
getattr(self.emrtFile, create_group)(self.emrtFile.root.data_collection.events, 'wintab', title='Wintab Device Events.')
getattr(self.emrtFile, create_group)(self.emrtFile.root.data_collection.events, 'eyetracker', title='EyeTracker Device Events.')
getattr(self.emrtFile, create_group)(self.emrtFile.root.data_collection.events, 'serial', title='Serial Interface Events.')
getattr(self.emrtFile, create_group)(self.emrtFile.root.data_collection.events, 'pstbox', title='Serial Pstbox Device Events.')
self.flush()
@staticmethod
def eventTableLabel2ClassName(event_table_label):
tokens = str(
event_table_label[0] +
event_table_label[
1:].lower() +
'Event').split('_')
return ''.join([t[0].upper() + t[1:] for t in tokens])
def groupNodeForEvent(self, event_cls):
evt_group_label = event_cls.PARENT_DEVICE.DEVICE_TYPE_STRING.lower()
datevts_node = self.emrtFile.root.data_collection.events
try:
# If group for event table already exists return it....
return datevts_node._f_get_child(evt_group_label)
except tables.NoSuchNodeError:
# Create the group node for the event....
egtitle = "%s%s Device Events."%(evt_group_label[0].upper(),
evt_group_label[1:])
self.emrtFile.createGroup(datevts_node, evt_group_label,
title=egtitle)
return datevts_node._f_get_child(evt_group_label)
def updateDataStoreStructure(self, device_instance, event_class_dict):
dfilter = tables.Filters(
complevel=0,
complib='zlib',
shuffle=False,
fletcher32=False)
for event_cls_name, event_cls in event_class_dict.items():
if event_cls.IOHUB_DATA_TABLE:
event_table_label = event_cls.IOHUB_DATA_TABLE
if event_table_label not in self.TABLES:
try:
self.TABLES[event_table_label] = getattr(self.emrtFile, create_table)(
self.groupNodeForEvent(event_cls),
self.eventTableLabel2ClassName(event_table_label),
event_cls.NUMPY_DTYPE,
title='%s Data' %
(device_instance.__class__.__name__,
),
filters=dfilter.copy())
self.flush()
except tables.NodeError:
self.TABLES[event_table_label] = self.groupNodeForEvent(event_cls)._f_get_child(self.eventTableLabel2ClassName(event_table_label))
except Exception as e:
print2err('---------------ERROR------------------')
print2err(
'Exception %s in iohub.datastore.updateDataStoreStructure:' %
(e.__class__.__name__))
print2err('\tevent_cls: {0}'.format(event_cls))
print2err(
'\tevent_cls_name: {0}'.format(event_cls_name))
print2err(
'\tevent_table_label: {0}'.format(event_table_label))
print2err(
'\teventTableLabel2ClassName: {0}'.format(
self.eventTableLabel2ClassName(event_table_label)))
print2err(
'\tgroupNodeForEvent(event_cls): {0}'.format(
self.groupNodeForEvent(event_cls)))
print2err('\nException:')
printExceptionDetailsToStdErr()
print2err('--------------------------------------')
if event_table_label in self.TABLES:
self.addClassMapping(event_cls,
self.TABLES[event_table_label])
else:
print2err(
'---- IOHUB.DATASTORE CANNOT ADD CLASS MAPPING ----')
print2err(
'\t** TABLES missing key: {0}'.format(event_table_label))
print2err('\tevent_cls: {0}'.format(event_cls))
print2err('\tevent_cls_name: {0}'.format(event_cls_name))
print2err(
'\teventTableLabel2ClassName: {0}'.format(
self.eventTableLabel2ClassName(event_table_label)))
print2err('----------------------------------------------')
def addClassMapping(self,ioClass,ctable):
names = [
x['class_id'] for x in self.TABLES['CLASS_TABLE_MAPPINGS'].where(
'(class_id == %d)' %
(ioClass.EVENT_TYPE_ID))]
if len(names)==0:
trow = self.TABLES['CLASS_TABLE_MAPPINGS'].row
trow['class_id'] = ioClass.EVENT_TYPE_ID
trow['class_type_id'] = 1 # Device or Event etc.
trow['class_name'] = ioClass.__name__
trow['table_path'] = ctable._v_pathname
trow.append()
self.flush()
def createOrUpdateExperimentEntry(self,experimentInfoList):
experiment_metadata = self.TABLES['EXPERIMENT_METADETA']
result = [row for row in experiment_metadata.iterrows() if row[
'code'] == experimentInfoList[1]]
if len(result) > 0:
result = result[0]
self.active_experiment_id = result['experiment_id']
return self.active_experiment_id
max_id = 0
id_col = experiment_metadata.col('experiment_id')
if len(id_col) > 0:
max_id = np.amax(id_col)
self.active_experiment_id = max_id + 1
experimentInfoList[0] = self.active_experiment_id
experiment_metadata.append([tuple(experimentInfoList), ])
self.flush()
return self.active_experiment_id
def createExperimentSessionEntry(self, sessionInfoDict):
session_metadata = self.TABLES['SESSION_METADETA']
max_id = 0
id_col = session_metadata.col('session_id')
if len(id_col) > 0:
max_id = np.amax(id_col)
self.active_session_id = int(max_id + 1)
values = (
self.active_session_id,
self.active_experiment_id,
sessionInfoDict['code'],
sessionInfoDict['name'],
sessionInfoDict['comments'],
sessionInfoDict['user_variables']
)
session_metadata.append([values, ])
self.flush()
return self.active_session_id
def initConditionVariableTable(
self, experiment_id, session_id, np_dtype):
expcv_table = None
exp_session = [('EXPERIMENT_ID','i4'),('SESSION_ID','i4')]
exp_session.extend(np_dtype)
np_dtype = []
for npctype in exp_session:
if isinstance(npctype[0], str):
nv = [str(npctype[0]),]
nv.extend(npctype[1:])
np_dtype.append(tuple(nv))
else:
np_dtype.append(npctype)
np_dtype2=[]
for adtype in np_dtype:
adtype2=[]
for a in adtype:
if isinstance(a, bytes):
a = str(a, 'utf-8')
adtype2.append(a)
np_dtype2.append(tuple(adtype2))
np_dtype = np_dtype2
self._EXP_COND_DTYPE = np.dtype(np_dtype)
try:
expCondTableName = "EXP_CV_%d"%(experiment_id)
experimentConditionVariableTable = getattr(self.emrtFile.root.data_collection.condition_variables, _f_get_child)(expCondTableName)
self.TABLES['EXP_CV'] = experimentConditionVariableTable
except NoSuchNodeError:
try:
experimentConditionVariableTable = getattr(self.emrtFile, create_table)(self.emrtFile.root.data_collection.condition_variables, expCondTableName, self._EXP_COND_DTYPE, title='Condition Variable Values for Experiment ID %d' % (experiment_id))
self.TABLES['EXP_CV'] = experimentConditionVariableTable
self.emrtFile.flush()
except Exception:
printExceptionDetailsToStdErr()
return False
except Exception:
print2err(
'Error getting expcv_table for experiment %d, table name: %s' %
(experiment_id, expCondTableName))
printExceptionDetailsToStdErr()
return False
self._activeRunTimeConditionVariableTable = expcv_table
return True
def extendConditionVariableTable(self, experiment_id, session_id, data):
if self._EXP_COND_DTYPE is None:
return False
if self.emrtFile and 'EXP_CV' in self.TABLES:
temp = [experiment_id,session_id]
temp.extend(data)
data = temp
try:
etable = self.TABLES['EXP_CV']
for i, d in enumerate(data):
if isinstance(d, (list, tuple)):
data[i] = tuple(d)
np_array = np.array([tuple(data), ],
dtype=self._EXP_COND_DTYPE)
etable.append(np_array)
self.bufferedFlush()
return True
except Exception:
printExceptionDetailsToStdErr()
return False
def addMetaDataToFile(self, metaData):
pass
def checkForExperimentAndSessionIDs(self, event=None):
if self.active_experiment_id is None or self.active_session_id is None:
exp_id = self.active_experiment_id
if exp_id is None:
exp_id = 0
sess_id = self.active_session_id
if sess_id is None:
sess_id = 0
return False
return True
def checkIfSessionCodeExists(self, sessionCode):
if self.emrtFile:
sessionsForExperiment = self.emrtFile.root.data_collection.session_meta_data.where(
'experiment_id == %d' % (self.active_experiment_id,))
sessionCodeMatch = [
sess for sess in sessionsForExperiment if sess['code'] == sessionCode]
if len(sessionCodeMatch)>0:
return True
return False
def _handleEvent(self, event):
try:
if self.checkForExperimentAndSessionIDs(event) is False:
return False
etype = event[DeviceEvent.EVENT_TYPE_ID_INDEX]
eventClass = EventConstants.getClass(etype)
etable = self.TABLES[eventClass.IOHUB_DATA_TABLE]
event[DeviceEvent.EVENT_EXPERIMENT_ID_INDEX] = self.active_experiment_id
event[DeviceEvent.EVENT_SESSION_ID_INDEX] = self.active_session_id
np_array = np.array([tuple(event), ], dtype=eventClass.NUMPY_DTYPE)
etable.append(np_array)
self.bufferedFlush()
except Exception:
print2err("Error saving event: ", event)
printExceptionDetailsToStdErr()
def _handleEvents(self, events):
try:
if self.checkForExperimentAndSessionIDs(len(events)) is False:
return False
event = events[0]
etype = event[DeviceEvent.EVENT_TYPE_ID_INDEX]
eventClass = EventConstants.getClass(etype)
etable = self.TABLES[eventClass.IOHUB_DATA_TABLE]
np_events = []
for event in events:
event[DeviceEvent.EVENT_EXPERIMENT_ID_INDEX] = self.active_experiment_id
event[DeviceEvent.EVENT_SESSION_ID_INDEX] = self.active_session_id
np_events.append(tuple(event))
np_array = np.array(np_events, dtype=eventClass.NUMPY_DTYPE)
etable.append(np_array)
self.bufferedFlush(len(np_events))
except ioHubError as e:
print2err(e)
except Exception:
printExceptionDetailsToStdErr()
def bufferedFlush(self,eventCount=1):
"""
If flushCounter threshold is >=0 then do some checks. If it is < 0,
then flush only occurs when command is sent to ioHub,
so do nothing here.
"""
if self.flushCounter >= 0:
if self.flushCounter == 0:
self.flush()
return True
if self.flushCounter <= self._eventCounter:
self.flush()
self._eventCounter = 0
return True
self._eventCounter += eventCount
return False
def flush(self):
try:
if self.emrtFile:
self.emrtFile.flush()
except tables.ClosedFileError:
pass
except Exception:
printExceptionDetailsToStdErr()
def close(self):
self.flush()
self._activeRunTimeConditionVariableTable = None
self.emrtFile.close()
def __del__(self):
try:
self.close()
except Exception:
pass
## -------------------- Utility Functions ------------------------ ##
def close_open_data_files(verbose):
open_files = tables.file._open_files
clall = hasattr(open_files, 'close_all')
if clall:
open_files.close_all()
else:
are_open_files = len(open_files) > 0
if verbose and are_open_files:
print2err('Closing remaining open data files:')
for fileh in open_files:
if verbose:
print2err('%s...' % (open_files[fileh].filename,))
open_files[fileh].close()
if verbose:
print2err('done')
registered_close_open_data_files = True
atexit.register(close_open_data_files, False)
## ---------------------- Pytable Definitions ------------------- ##
class ClassTableMappings(tables.IsDescription):
class_id = UInt32Col(pos=1)
class_type_id = UInt32Col(pos=2) # Device or Event etc.
class_name = StringCol(32, pos=3)
table_path = StringCol(128, pos=4)
class ExperimentMetaData(tables.IsDescription):
experiment_id = UInt32Col(pos=1)
code = StringCol(256, pos=2)
title = | |
so_connections['name'] = "Sales Order"
so_connections['count'] = sales_order_count
so_connections['icon'] = "https://erpcloud.systems/icons/sales_order.png"
connections.append(so_connections)
if delivery_note_count > 0 and doc_data:
dn_connections['name'] = "Delivery Note"
dn_connections['count'] = delivery_note_count
dn_connections['icon'] = "https://erpcloud.systems/icons/delivery_note.png"
connections.append(dn_connections)
if payment_entry_count > 0 and doc_data:
pe_connections['name'] = "Payment Entry"
pe_connections['count'] = payment_entry_count
pe_connections['icon'] = "https://erpcloud.systems/icons/payment_entry.png"
connections.append(pe_connections)
sinv['conn'] = connections
if doc_data:
return sinv
else:
return "لا يوجد فاتورة مبيعات بهذا الاسم"
@frappe.whitelist()
def payment_entry(name):
pe = {}
doc_data = frappe.db.get_list('Payment Entry', filters={'name': name},
fields=['name',
'party_type',
'party',
'party_name',
'posting_date',
'status',
'reference_no',
'reference_date',
'payment_type',
'mode_of_payment',
'mode_of_payment_2',
'paid_from_account_balance',
'paid_to_account_balance',
'paid_from',
'paid_to',
'paid_amount',
'docstatus'
])
for x in doc_data:
pe['name'] = x.name
pe['party_type'] = x.party_type
pe['party'] = x.party
pe['party_name'] = x.party_name
pe['posting_date'] = x.posting_date
pe['status'] = x.status
pe['reference_no'] = x.reference_no
pe['reference_date'] = x.reference_date
pe['payment_type'] = x.payment_type
pe['mode_of_payment'] = x.mode_of_payment
pe['mode_of_payment_2'] = x.mode_of_payment_2
pe['paid_from'] = x.paid_from
pe['paid_from_account_balance'] = x.paid_from_account_balance
pe['paid_to'] = x.paid_to
pe['paid_to_account_balance'] = x.paid_to_account_balance
pe['paid_amount'] = x.paid_amount
pe['docstatus'] = x.docstatus
attachments = frappe.db.sql(""" Select file_name, file_url,
Date_Format(creation,'%d/%m/%Y') as date_added
from `tabFile` where `tabFile`.attached_to_doctype = "Payment Entry"
and `tabFile`.attached_to_name = "{name}"
""".format(name=name), as_dict=1)
pe['attachments'] = attachments
comments = frappe.db.sql(""" Select creation, (Select `tabUser`.full_name from `tabUser` where `tabUser`.name = `tabComment`.owner) as owner, content
from `tabComment` where `tabComment`.reference_doctype = "Payment Entry"
and `tabComment`.reference_name = "{name}"
and `tabComment`.comment_type = "Comment"
""".format(name=name), as_dict=1)
pe['comments'] = comments
print_formats = frappe.db.sql(
""" Select name from `tabPrint Format` where doc_type = "Payment Entry" and disabled = 0 """, as_dict=1)
pe['print_formats'] = print_formats
pf_standard = {}
pf_standard['name'] = "Standard"
print_formats.append(pf_standard)
if doc_data:
return pe
else:
return "لا يوجد مدفوعات ومقبوضات بهذا الاسم"
@frappe.whitelist()
def item(name):
item_ = {}
doc_data = frappe.db.get_list('Item', filters={'name': name},
fields=['name',
'item_code',
'item_name',
'item_group',
'brand',
'stock_uom',
'description',
'image',
'disabled',
'is_stock_item',
'include_item_in_manufacturing',
'is_fixed_asset',
'asset_category',
'is_purchase_item',
'purchase_uom',
'is_sales_item',
'sales_uom',
])
for x in doc_data:
item_['name'] = x.name
item_['image'] = x.image
item_['item_name'] = x.item_name
item_['item_code'] = x.item_code
item_['disabled'] = x.disabled
item_['item_group'] = x.item_group
item_['brand'] = x.brand
item_['stock_uom'] = x.stock_uom
item_['description'] = x.description
item_['is_stock_item'] = x.is_stock_item
item_['include_item_in_manufacturing'] = x.include_item_in_manufacturing
item_['is_fixed_asset'] = x.is_fixed_asset
item_['asset_category'] = x.asset_category
item_['is_sales_item'] = x.is_sales_item
item_['sales_uom'] = x.sales_uom
item_['is_purchase_item'] = x.is_purchase_item
item_['purchase_uom'] = x.purchase_uom
child_data1 = frappe.db.get_list('UOM Conversion Detail', filters={'parent': name}, order_by='idx',
fields=[
'idx',
'uom',
'conversion_factor',
],
)
if child_data1 and doc_data:
item_['uoms'] = child_data1
child_data2 = frappe.db.get_list('Item Price', filters={'item_code': name, 'selling': 1}, order_by='price_list',
fields=[
'price_list',
'price_list_rate',
'currency',
],
)
if child_data2 and doc_data:
item_['selling_price_lists_rate'] = child_data2
balances = frappe.db.sql(""" select
tabBin.warehouse as warehouse,
(select warehouse_type from tabWarehouse where tabWarehouse.name = tabBin.warehouse ) as warehouse_type,
tabBin.actual_qty as actual_qty,
tabBin.reserved_qty as reserved_qty,
tabBin.ordered_qty as ordered_qty,
tabBin.indented_qty as indented_qty,
tabBin.projected_qty as projected_qty
from
tabBin
inner join tabItem on tabBin.item_code = tabItem.item_code
where
tabBin.item_code = '{name}'
and tabItem.has_variants = 0
and tabBin.actual_qty >0
""".format(name=name), as_dict=1)
result = []
for item_dict in balances:
data = {
'warehouse': item_dict.warehouse,
'warehouse_type': item_dict.warehouse_type,
'actual_qty': item_dict.actual_qty,
'reserved_qty': item_dict.reserved_qty,
'ordered_qty': item_dict.ordered_qty,
'indented_qty': item_dict.indented_qty,
'projected_qty': item_dict.projected_qty
}
result.append(data)
if result and doc_data:
item_['stock_balances'] = result
attachments = frappe.db.sql(""" Select file_name, file_url,
Date_Format(creation,'%d/%m/%Y') as date_added
from `tabFile` where `tabFile`.attached_to_doctype = "Item"
and `tabFile`.attached_to_name = "{name}"
""".format(name=name), as_dict=1)
item_['attachments'] = attachments
comments = frappe.db.sql(""" Select creation, (Select `tabUser`.full_name from `tabUser` where `tabUser`.name = `tabComment`.owner) as owner, content
from `tabComment` where `tabComment`.reference_doctype = "Item"
and `tabComment`.reference_name = "{name}"
and `tabComment`.comment_type = "Comment"
""".format(name=name), as_dict=1)
item_['comments'] = comments
print_formats = frappe.db.sql(
""" Select name from `tabPrint Format` where doc_type = "Item" and disabled = 0 """, as_dict=1)
item_['print_formats'] = print_formats
pf_standard = {}
pf_standard['name'] = "Standard"
print_formats.append(pf_standard)
quotation = frappe.db.get_list('Quotation Item', filters={'item_code': name}, fields=['item_code'], group_by='parent')
sales_order = frappe.db.get_list('Sales Order Item', filters={'item_code': name}, fields=['item_code'], group_by='parent')
delivery_note = frappe.db.get_list('Delivery Note Item', filters={'item_code': name}, fields=['item_code'], group_by='parent')
sales_invoice = frappe.db.get_list('Sales Invoice Item', filters={'item_code': name}, fields=['item_code'], group_by='parent')
material_request = frappe.db.get_list('Material Request Item', filters={'item_code': name}, fields=['item_code'], group_by='parent')
supplier_quotation = frappe.db.get_list('Supplier Quotation Item', filters={'item_code': name}, fields=['item_code'], group_by='parent')
purchase_order = frappe.db.get_list('Purchase Order Item', filters={'item_code': name}, fields=['item_code'], group_by='parent')
purchase_receipt = frappe.db.get_list('Purchase Receipt Item', filters={'item_code': name}, fields=['item_code'], group_by='parent')
purchase_invoice = frappe.db.get_list('Purchase Invoice Item', filters={'item_code': name}, fields=['item_code'], group_by='parent')
stock_entry = frappe.db.get_list('Stock Entry Detail', filters={'item_code': name}, fields=['item_code'], group_by='parent')
quotation_count = len(quotation)
sales_order_count = len(sales_order)
delivery_note_count = len(delivery_note)
sales_invoice_count = len(sales_invoice)
material_request_count = len(material_request)
supplier_quotation_count = len(supplier_quotation)
purchase_order_count = len(purchase_order)
purchase_receipt_count = len(purchase_receipt)
purchase_invoice_count = len(purchase_invoice)
stock_entry_count = len(stock_entry)
qtn_connections = {}
so_connections = {}
dn_connections = {}
sinv_connections = {}
mr_connections = {}
sup_qtn_connections = {}
po_connections = {}
pr_connections = {}
pinv_connections = {}
se_connections = {}
connections = []
if quotation_count > 0 and doc_data:
qtn_connections['name'] = "Quotation"
qtn_connections['count'] = quotation_count
qtn_connections['icon'] = "https://erpcloud.systems/icons/quotation.png"
connections.append(qtn_connections)
if sales_order_count > 0 and doc_data:
so_connections['name'] = "Sales Order"
so_connections['count'] = sales_order_count
so_connections['icon'] = "https://erpcloud.systems/icons/sales_order.png"
connections.append(so_connections)
if delivery_note_count > 0 and doc_data:
dn_connections['name'] = "Delivery Note"
dn_connections['count'] = delivery_note_count
dn_connections['icon'] = "https://erpcloud.systems/icons/delivery_note.png"
connections.append(dn_connections)
if sales_invoice_count > 0 and doc_data:
sinv_connections['name'] = "Sales Invoice"
sinv_connections['count'] = sales_invoice_count
sinv_connections['icon'] = "https://erpcloud.systems/icons/sales_invoice.png"
connections.append(sinv_connections)
if material_request_count > 0 and doc_data:
mr_connections['name'] = "Material Request"
mr_connections['count'] = material_request_count
mr_connections['icon'] = "https://erpcloud.systems/icons/material_request.png"
connections.append(mr_connections)
if supplier_quotation_count > 0 and doc_data:
sup_qtn_connections['name'] = "Supplier Quotation"
sup_qtn_connections['count'] = supplier_quotation_count
sup_qtn_connections['icon'] = "https://erpcloud.systems/icons/supplier_quotation.png"
connections.append(sup_qtn_connections)
if purchase_order_count > 0 and doc_data:
po_connections['name'] = "Purchase Order"
po_connections['count'] = purchase_order_count
po_connections['icon'] = "https://erpcloud.systems/icons/purchase_order.png"
connections.append(po_connections)
if purchase_receipt_count > 0 and doc_data:
pr_connections['name'] = "Purchase Receipt"
pr_connections['count'] = purchase_receipt_count
pr_connections['icon'] = "https://erpcloud.systems/icons/purchase_receipt.png"
connections.append(pr_connections)
if purchase_invoice_count > 0 and doc_data:
pinv_connections['name'] = "Purchase Invoice"
pinv_connections['count'] = purchase_invoice_count
pinv_connections['icon'] = "https://erpcloud.systems/icons/purchase_invoice.png"
connections.append(pinv_connections)
if stock_entry_count > 0 and doc_data:
se_connections['name'] = "Stock Entry"
se_connections['count'] = stock_entry_count
se_connections['icon'] = "https://erpcloud.systems/icons/stock_entry.png"
connections.append(se_connections)
item_['conn'] = connections
if doc_data:
return item_
else:
return "لا يوجد صنف بهذا الاسم"
@frappe.whitelist()
def stock_entry(name):
se = {}
doc_data = frappe.db.get_list('Stock Entry', filters={'name': name},
fields=['name',
'stock_entry_type',
'purpose',
'posting_date',
'docstatus',
'from_warehouse',
'to_warehouse',
'project',
'docstatus'
])
for x in doc_data:
se['name'] = x.name
se['stock_entry_type'] = x.stock_entry_type
se['purpose'] = x.purpose
se['posting_date'] = x.posting_date
if x.docstatus == 0:
se['status'] = "Draft"
if x.docstatus == 1:
se['status'] = "Submitted"
if x.docstatus == 2:
se['status'] = "Cancelled"
se['from_warehouse'] = x.from_warehouse
se['to_warehouse'] = x.to_warehouse
se['project'] = x.project
se['docstatus'] = x.docstatus
child_data = frappe.db.get_list('Stock Entry Detail', filters={'parent': name}, order_by='idx',
fields=[
'name',
'idx',
'item_code',
'item_name',
'description',
'item_group',
'image',
'qty',
'transfer_qty',
'stock_uom',
'uom',
'conversion_factor',
's_warehouse',
't_warehouse',
'cost_center',
'project',
'actual_qty',
'transferred_qty',
])
if child_data and doc_data:
se['items'] = child_data
attachments = frappe.db.sql(""" Select file_name, file_url,
Date_Format(creation,'%d/%m/%Y') as date_added
from `tabFile` where `tabFile`.attached_to_doctype = "Stock Entry"
and `tabFile`.attached_to_name = "{name}"
""".format(name=name), as_dict=1)
se['attachments'] = attachments
comments = frappe.db.sql(""" Select creation, (Select `tabUser`.full_name from `tabUser` where `tabUser`.name = `tabComment`.owner) as owner, content
from `tabComment` where `tabComment`.reference_doctype = "Stock Entry"
and `tabComment`.reference_name = "{name}"
and `tabComment`.comment_type = "Comment"
""".format(name=name), as_dict=1)
se['comments'] = comments
print_formats = frappe.db.sql(
""" Select name from `tabPrint Format` where doc_type = "Stock Entry" and disabled = 0 """, as_dict=1)
se['print_formats'] = print_formats
pf_standard = {}
pf_standard['name'] = "Standard"
print_formats.append(pf_standard)
if doc_data:
return se
else:
return "لا يوجد حركة مخزنية بهذا الاسم"
@frappe.whitelist()
def delivery_note(name):
dn = {}
doc_data = frappe.db.get_list('Delivery Note', filters={'name': name},
fields=['name',
'customer',
'customer_name',
'posting_date',
'status',
'is_return',
'tax_id',
'customer_group',
'territory',
'customer_address',
'address_display',
'contact_display',
'contact_mobile',
'contact_email',
'project',
'cost_center',
'currency',
'conversion_rate',
'selling_price_list',
'price_list_currency',
'plc_conversion_rate',
'ignore_pricing_rule',
'set_warehouse',
'set_target_warehouse',
'tc_name',
'sales_partner',
'commission_rate',
'total_commission',
'total_qty',
'base_total',
'base_net_total',
'total',
'net_total',
'base_total_taxes_and_charges',
'total_taxes_and_charges',
'apply_discount_on',
'base_discount_amount',
'additional_discount_percentage',
'discount_amount',
'base_grand_total',
'base_in_words',
'grand_total',
'in_words',
'docstatus'
])
for x in doc_data:
dn['name'] = x.name
dn['customer'] = x.customer
dn['customer_name'] = x.customer_name
dn['posting_date'] = x.posting_date
dn['status'] = x.status
dn['is_return'] = x.is_return
dn['tax_id'] = x.order_type
dn['customer_group'] = x.customer_group
dn['territory'] = x.territory
dn['customer_address'] = x.customer_address
dn['address_display'] = x.address_display
dn['contact_display'] = x.contact_display
dn['contact_mobile'] = x.contact_mobile
dn['contact_email'] = x.contact_email
dn['project'] = x.project
dn['cost_center'] = x.cost_center
dn['currency'] = x.currency
dn['conversion_rate'] = x.conversion_rate
dn['selling_price_list'] = x.selling_price_list
dn['price_list_currency'] = x.price_list_currency
dn['plc_conversion_rate'] = x.plc_conversion_rate
dn['set_warehouse'] = x.set_warehouse
dn['set_target_warehouse'] = x.set_target_warehouse
dn['tc_name'] = x.tc_name
dn['sales_partner'] = x.sales_partner
dn['commission_rate'] = x.commission_rate
dn['total_commission'] = x.total_commission
dn['total_qty'] = x.total_qty
dn['base_total'] = x.base_total
dn['base_net_total'] = x.base_net_total
dn['total'] = x.total
dn['net_total'] = x.net_total
dn['base_total_taxes_and_charges'] = x.base_total_taxes_and_charges
dn['total_taxes_and_charges'] = x.total_taxes_and_charges
dn['apply_discount_on'] = x.apply_discount_on
dn['base_discount_amount'] = x.base_discount_amount
dn['additional_discount_percentage'] = x.additional_discount_percentage
dn['discount_amount'] = x.discount_amount
dn['base_grand_total'] = x.base_grand_total
dn['base_in_words'] = x.base_in_words
dn['grand_total'] = x.grand_total
dn['in_words'] = x.in_words
dn['docstatus'] | |
import os
import platform
import shutil
import time
from utils import FilterBlock, FilterManager
import settings
SHOW = 'Show'
HIDE = 'Hide'
DEBUG = SHOW if settings.DEBUG else HIDE
CLASS_TWO_HAND = '"Bows" "Two Hand" "Staves"'
CLASS_SMALL_ONE_HAND = '"Daggers" "Wands"'
CLASS_BIG_ONE_HAND = '"Quivers" "Shields" "Claws" "Sceptres" "One Hand"'
CLASS_HAND = ' '.join([CLASS_TWO_HAND, CLASS_SMALL_ONE_HAND, CLASS_BIG_ONE_HAND])
CLASS_ACCESSORY = '"Belts" "Amulets" "Rings"'
BASE_TYPE_BODY_STR = '"Plate Vest" "Chestplate" "Plate"'
BASE_TYPE_BODY_EVA = '"Shabby Jerkin" "Leather" "Buckskin Tunic" "Eelskin Tunic" "Sharkskin Tunic" ' \
'"Thief\'s Garb" "Cutthroat\'s Garb" "Assassin\'s Garb"'
BASE_TYPE_BODY_ES = '"Robe" "Silken Vest" "Silken Garb" "Vestment" "Regalia" "Silken Wrap" "Necromancer Silks"'
BASE_TYPE_BODY_EE = '"Padded Vest" "Oiled Vest" "Jacket" "Oiled Coat" "Sleek Coat" "Varnished Coat" "Raiment" ' \
'"Waxed Garb" "Lacquered Garb" "Sadist Garb" "Bone Armour" "Crypt Armour" "Carnal Armour"'
BASE_TYPE_SHIELD_NON_SPELL = '"Tower Shield" "Buckler" "Round Shield" "Spiked Shield"'
RARITY_NORMAL = 'Normal'
RARITY_MAGIC = 'Magic'
RARITY_RARE = 'Rare'
RARITY_UNIQUE = 'Unique'
RARITY_N2M = '<= Magic'
RARITY_N2R = '<= Rare'
ITEM_LEVEL_REGAL = '>= 75'
ITEM_LEVEL_CHAOS = '>= 60'
FONT_SIZE_MAX = 45
FONT_SIZE_MIN = 17 # https://dd.reddit.com/r/pathofexile/comments/991bym/to_filter_creators_setfontsize_the_truth/
COLOR_WHITE = '255 255 255'
COLOR_SILVER = '200 200 200'
COLOR_GRAY = '150 150 150'
COLOR_BLACK = '0 0 0'
COLOR_RED = '255 0 0'
COLOR_YELLOW = '255 255 0'
COLOR_TANGERINE = '213 159 0'
COLOR_ORANGE = '255 125 0'
COLOR_UNIQUE = '175 96 37' # <NAME>
COLOR_OLIVE = '100 75 0'
COLOR_LIME = '0 255 0'
COLOR_LIME_LIGHT = '0 210 0 210'
COLOR_SPRING_BUD = '180 255 0'
COLOR_AQUA = '0 255 255'
COLOR_BLUE = '0 0 255'
COLOR_MAGENTA = '255 0 255'
COLOR_MAGENTA_DARK = '129 15 213 200'
_SOUND_1 = '1 300' # 重敲锣声
_SOUND_2 = '2 300'
_SOUND_3 = '3 300' # 沉闷地敲锣
_SOUND_4 = '4 300'
_SOUND_5 = '5 300' # 摩托快速驶过
_SOUND_6 = '6 300'
_SOUND_7 = '7 300' # 类似8,废弃
_SOUND_8 = '8 300'
_SOUND_9 = '9 300' # 类似8,废弃
_SOUND_10 = '10 300' # 重锤铁板
_SOUND_11 = '11 300' # 重锤石板
_SOUND_12 = '12 300'
_SOUND_13 = '13 300' # 敲锣声
_SOUND_14 = '14 300' # 类似8,废弃
_SOUND_15 = '15 300' # 类似8,废弃
_SOUND_16 = '16 300' # 锤石板
SOUND_TOP_VALUE = _SOUND_8
SOUND_MID_VALUE = _SOUND_1
SOUND_LOW_VALUE = _SOUND_2
SOUND_SHAPER_ELDER_T2 = _SOUND_3
SOUND_SHAPER_ELDER = _SOUND_10
SOUND_MAP = _SOUND_4
SOUND_UNIQUE = _SOUND_6
SOUND_LEVELING = _SOUND_12
SOUND_MAGIC_MOD = _SOUND_3
STYLE_TOP = {'SetFontSize': FONT_SIZE_MAX,
'SetTextColor': COLOR_RED, 'SetBorderColor': COLOR_RED, 'SetBackgroundColor': COLOR_WHITE}
STYLE_TOP_RARE = {'SetFontSize': FONT_SIZE_MAX, 'SetBorderColor': COLOR_ORANGE, 'SetBackgroundColor': COLOR_OLIVE}
STYLE_T1_RARE = {'SetBorderColor': COLOR_ORANGE, 'SetBackgroundColor': COLOR_OLIVE + ' 225'}
STYLE_TOP_UNIQUE = {'SetTextColor': COLOR_UNIQUE, 'SetBorderColor': COLOR_UNIQUE, 'SetBackgroundColor': COLOR_WHITE}
_STYLE_MAP_BASE = {'SetFontSize': FONT_SIZE_MAX, 'SetTextColor': COLOR_BLACK, 'SetBackgroundColor': COLOR_SILVER}
STYLE_MAP_HIGH_11_14 = {**_STYLE_MAP_BASE, 'SetBorderColor': COLOR_RED}
STYLE_MAP_MID_9_10 = {**_STYLE_MAP_BASE, 'SetBorderColor': COLOR_YELLOW}
STYLE_MAP_MID_6_8 = {**_STYLE_MAP_BASE, 'SetBorderColor': COLOR_SPRING_BUD}
STYLE_MAP_LOW_3_5 = {**_STYLE_MAP_BASE, 'SetBorderColor': COLOR_BLUE}
STYLE_MAP_LOW_1_2 = {**_STYLE_MAP_BASE, 'SetBorderColor': COLOR_WHITE}
STYLE_LINKS = {'SetBorderColor': COLOR_AQUA}
STYLE_NONE = {'SetFontSize': None, 'SetTextColor': None, 'SetBorderColor': None, 'SetBackgroundColor': None}
BLOCK_5L = 702 # After 6 LINKS
BLOCK_HIDE_RARES_65 = 1300 # Endgame and leveling
BLOCK_ACT_1 = 3006 # My own bases and weapon_template
BLOCK_HIDE_REMAINING = 3200 # SetFontSize=FONT_SIZE_MIN
def modify_endgame_mix(filter_manager):
filter_manager.add_comment(100, 'OVERRIDE AREA 1 - Override ALL rules here', ignored=True)
# never override 6L
# 8
blocks = filter_manager.add_comment(200, '6 LINKS')
for block in blocks:
block.PlayAlertSound = SOUND_TOP_VALUE
filter_manager.extend_blocks(blocks)
# BLOCK_5L 8
blocks_5l = filter_manager.get_blocks(BLOCK_5L)
for block in blocks_5l:
block.PlayAlertSound = SOUND_TOP_VALUE
filter_manager.extend_blocks(blocks_5l)
# special rules here
# if settings.DARKNESS:
# _global = {'ElderItem': False, 'ShaperItem': False}
# filter_manager.append_block(FilterBlock(status=DEBUG, Rarity=RARITY_MAGIC, **_global))
# filter_manager.append_block(FilterBlock(status=DEBUG, Rarity=RARITY_RARE, **_global))
filter_manager.add_comment(300, 'SHAPER ITEMS', ignored=True)
filter_manager.add_comment(301, 'Exception Handling - Rings, Amulets, Belts', ignored=True)
# 8 shaper_alerts
blocks = filter_manager.add_comment(302, 'Shaper Item Layers - T1')
for block in blocks:
block.PlayAlertSound = SOUND_TOP_VALUE
shaper_alerts = blocks[0].copy_modify(Class='"Amulets" "Rings" "Belts" "Gloves"', BaseType=None) # gloves for trap
filter_manager.append_block(shaper_alerts)
filter_manager.extend_blocks(blocks)
# 10
blocks = filter_manager.add_comment(303, 'Shaper Item Layers - T2')
for block in blocks:
block.PlayAlertSound = SOUND_SHAPER_ELDER
filter_manager.extend_blocks(blocks)
blocks = filter_manager.add_comment(304, 'Shaper Item Layers - T3')
filter_manager.extend_blocks(blocks)
filter_manager.add_comment(400, 'ELDER ITEMS', ignored=True)
filter_manager.add_comment(401, 'Exception Handling - Rings, Amulets, Belts', ignored=True)
# 8 elder_alerts
blocks = filter_manager.add_comment(402, 'Elder Item Layers - T1')
for block in blocks:
block.PlayAlertSound = SOUND_TOP_VALUE
elder_alerts = blocks[0].copy_modify(Class='"Amulets" "Rings" "Belts"', BaseType=None)
filter_manager.append_block(elder_alerts)
filter_manager.extend_blocks(blocks)
# 10
blocks = filter_manager.add_comment(403, 'Elder Item Layers - T2')
for block in blocks:
block.PlayAlertSound = SOUND_SHAPER_ELDER
filter_manager.extend_blocks(blocks)
blocks = filter_manager.add_comment(404, 'Elder Item Layers - T3')
filter_manager.extend_blocks(blocks)
filter_manager.add_comment(500, 'Explicit Mod filtering', ignored=True)
blocks = filter_manager.add_comment(501, 'League-Specific Magic Items')
filter_manager.extend_blocks(blocks)
# 3
blocks = filter_manager.add_comment(502, 'Magic Mod Permutations')
blocks[-2].PlayAlertSound = SOUND_MAGIC_MOD
filter_manager.extend_blocks(blocks)
# 8
blocks = filter_manager.add_comment(503, 'Rare Item Permutations')
blocks[0].PlayAlertSound = SOUND_TOP_VALUE
filter_manager.extend_blocks(blocks)
filter_manager.add_comment(600, 'Explicit Mod filtering - EXPERIMENTAL', ignored=True)
blocks = filter_manager.add_comment(601, 'Rare Item Permutations')
filter_manager.extend_blocks(blocks)
blocks = filter_manager.add_comment(602, 'Weapons-Physical (Key: IPD)')
filter_manager.extend_blocks(blocks)
blocks = filter_manager.add_comment(603, 'The Suffix Abomination')
filter_manager.extend_blocks(blocks)
blocks = filter_manager.add_comment(604, 'Casters')
filter_manager.extend_blocks(blocks)
filter_manager.add_comment(700, 'Recipes, Magic and Normal items (endgame!)', ignored=True)
blocks = filter_manager.add_comment(701, 'Overqualitied Items')
filter_manager.extend_blocks(blocks)
# 移到 6L 之后
filter_manager.add_comment(BLOCK_5L, '5-Linked items', ignored=True)
# 8 样式改掉
blocks = filter_manager.add_comment(703, '6-Socket Items')
blocks[0].modify(PlayAlertSound=SOUND_TOP_VALUE, **STYLE_TOP)
blocks[1].modify(PlayAlertSound=SOUND_TOP_VALUE, **STYLE_TOP)
blocks[-1].modify(SetBorderColor=COLOR_BLACK, SetBackgroundColor=COLOR_TANGERINE)
filter_manager.extend_blocks(blocks)
# 8 1 1
blocks = filter_manager.add_comment(704, 'Exclusive bases: Stygian Vise')
blocks[0].PlayAlertSound = SOUND_TOP_VALUE
blocks[1].PlayAlertSound = SOUND_MID_VALUE
blocks[2].PlayAlertSound = SOUND_MID_VALUE
filter_manager.extend_blocks(blocks)
# 8 8 1 2 2
blocks = filter_manager.add_comment(705, 'Abyss Jewels (Rare and Magic)')
blocks[0].PlayAlertSound = SOUND_TOP_VALUE
blocks[1].PlayAlertSound = SOUND_TOP_VALUE
blocks[2].PlayAlertSound = SOUND_MID_VALUE
blocks[3].PlayAlertSound = SOUND_LOW_VALUE
blocks[-1].PlayAlertSound = SOUND_LOW_VALUE
filter_manager.extend_blocks(blocks)
# 8
blocks = filter_manager.add_comment(706, 'Exclusive bases: Top Value')
for block in blocks:
block.BaseType += ' "Marble Amulet" '
block.modify(PlayAlertSound=SOUND_TOP_VALUE, **STYLE_TOP)
filter_manager.extend_blocks(blocks)
# 8 1 hide
blocks = filter_manager.add_comment(707, 'Exclusive bases: Trinkets')
blocks[0].PlayAlertSound = SOUND_TOP_VALUE
blocks[1].PlayAlertSound = SOUND_TOP_VALUE
blocks[2].modify(PlayAlertSound=SOUND_MID_VALUE, **STYLE_TOP_RARE)
blocks[3].modify(PlayAlertSound=SOUND_MID_VALUE, **STYLE_TOP_RARE)
blocks[-2].status = DEBUG
filter_manager.extend_blocks(blocks)
# 8 1 ALERT_ATLAS_NORMAL_BASE_TYPE 2
blocks = filter_manager.add_comment(708, 'Exclusive bases: Others')
blocks[0].PlayAlertSound = SOUND_TOP_VALUE
blocks[1].PlayAlertSound = SOUND_TOP_VALUE
blocks[2].modify(PlayAlertSound=SOUND_MID_VALUE, **STYLE_TOP_RARE)
blocks[3].modify(PlayAlertSound=SOUND_MID_VALUE, **STYLE_TOP_RARE)
if settings.ALERT_ATLAS_NORMAL_BASE_TYPE == '':
blocks[-1].status = DEBUG
else:
blocks[-1].modify(BaseType=settings.ALERT_ATLAS_NORMAL_BASE_TYPE, Rarity=RARITY_NORMAL,
PlayAlertSound=SOUND_LOW_VALUE)
filter_manager.extend_blocks(blocks)
# RARITY_MAGIC
# 项链:+1诅咒,+1球,抗性上限; 腰带:+1球
# 手:击中附加诅咒; 脚:+1球; 箭袋:+1箭
filter_manager.add_comment(709, 'Corrupted Amulets', ignored=True)
accessories = FilterBlock(Corrupted=True, Class='"Amulets" "Belts"', Rarity=RARITY_MAGIC,
SetFontSize=36, SetBorderColor=COLOR_ORANGE)
others = accessories.copy_modify(Class='"Gloves" "Boots" "Quivers"')
filter_manager.extend_blocks([accessories, others])
# CHANCING_BASE_TYPE
filter_manager.add_comment(710, 'Chancing items', ignored=True)
if settings.CHANCING_BASE_TYPE != '':
block = FilterBlock(Corrupted=False, BaseType=settings.CHANCING_BASE_TYPE, Rarity=RARITY_NORMAL,
SetFontSize=42, SetTextColor=COLOR_WHITE, SetBorderColor=COLOR_LIME_LIGHT)
filter_manager.append_block(block)
# ALERT_UTILITY_FLASK_BASE_TYPE 12 SHOW_FLASK_MANA
blocks = filter_manager.add_comment(711, 'FLASKS (Endgame rules)')
if settings.ALERT_UTILITY_FLASK_BASE_TYPE != '':
utility_flasks = blocks[-1].copy_modify(BaseType=settings.ALERT_UTILITY_FLASK_BASE_TYPE,
SetFontSize=FONT_SIZE_MAX, PlayAlertSound=SOUND_LEVELING)
filter_manager.append_block(utility_flasks)
filter_manager.extend_blocks(blocks[:2])
if settings.SHOW_FLASK_MANA:
blocks[-2].BaseType = '"Eternal"'
filter_manager.append_block(blocks[-2])
filter_manager.add_comment(712, 'Add your own crafting rules here', ignored=True)
# 8
blocks = filter_manager.add_comment(713, '86+ Endgame crafting rules')
for block in blocks:
block.PlayAlertSound = SOUND_TOP_VALUE
filter_manager.extend_blocks(blocks)
# Hide
filter_manager.add_comment(714, '83/84+ Endgame crafting rules', ignored=True)
# MAGIC_JEWEL_BASE_TYPE
blocks = filter_manager.add_comment(715, '60+ Crafting rules for 60++ trinkets')
if settings.MAGIC_JEWEL_BASE_TYPE != '':
blocks[0].BaseType = settings.MAGIC_JEWEL_BASE_TYPE
filter_manager.append_block(blocks[0])
# ALERT_NORMAL_BASE_TYPE, SSF_CRAFT_AMULETS_BASE_TYPE, SSF_CRAFT_RINGS_BASE_TYPE, SSF_CRAFT_BELTS_BASE_TYPE
# ALERT_MAGIC_BASE_TYPE
filter_manager.add_comment(716, 'Remaining crafting rules - add your own bases here!', ignored=True)
if settings.ALERT_NORMAL_BASE_TYPE != '':
normals = filter_manager.get_blocks(BLOCK_ACT_1)[0]
normals.modify(BaseType=settings.ALERT_NORMAL_BASE_TYPE, ItemLevel=None,
SetFontSize=40, PlayAlertSound=SOUND_LEVELING)
# str_n = normals.copy_modify(BaseType='"Amber Amulet" "Heavy Belt"', ItemLevel='<= 12')
filter_manager.extend_blocks([normals])
if settings.SSF_CRAFT_BELTS_BASE_TYPE != '':
if not settings.SPELL:
hide_normals = filter_manager.get_blocks(BLOCK_HIDE_REMAINING)[0]
hide_normals.modify(Class=None, Rarity=RARITY_NORMAL, SetFontSize=FONT_SIZE_MIN)
hide_n_leather_belt = hide_normals.copy_modify(BaseType='"Leather Belt"', ItemLevel='<= 29')
hide_n_rustic_sash = hide_normals.copy_modify(BaseType='"Rustic Sash"', ItemLevel='>= 30')
filter_manager.extend_blocks([hide_n_leather_belt, hide_n_rustic_sash]) # A4左右开始堆血(T5血从30开始)
filter_manager.append_block(FilterBlock(
Class='Belts', BaseType=settings.SSF_CRAFT_BELTS_BASE_TYPE, Rarity=RARITY_NORMAL, ItemLevel='>= 13',
SetTextColor=COLOR_WHITE))
if settings.SSF_CRAFT_AMULETS_BASE_TYPE != '':
filter_manager.append_block(FilterBlock(
Class='Amulets', BaseType=settings.SSF_CRAFT_AMULETS_BASE_TYPE, Rarity=RARITY_NORMAL, ItemLevel='>= 13',
SetTextColor=COLOR_WHITE))
if settings.SSF_CRAFT_RINGS_BASE_TYPE != '':
filter_manager.append_block(FilterBlock(
Class='Rings', BaseType=settings.SSF_CRAFT_RINGS_BASE_TYPE, Rarity=RARITY_NORMAL, ItemLevel='>= 13',
SetTextColor=COLOR_WHITE))
if settings.ALERT_MAGIC_BASE_TYPE != '':
alert_magics = filter_manager.get_blocks(BLOCK_ACT_1)[1]
alert_magics.modify(ItemLevel=None, BaseType=None, PlayAlertSound=SOUND_LEVELING)
if not settings.SPELL:
alert_m_iron_ring = alert_magics.copy_modify(BaseType='"Iron Ring"', ItemLevel='<= 20')
alert_magic_gloves = alert_magics.copy_modify(Class='"Gloves"', ItemLevel='<= 24')
filter_manager.extend_blocks([alert_m_iron_ring, alert_magic_gloves])
alert_magics.BaseType = settings.ALERT_MAGIC_BASE_TYPE
filter_manager.append_block(alert_magics)
# NEED_CHISEL
blocks = filter_manager.add_comment(717, 'Chisel recipe items')
if settings.NEED_CHISEL:
for block in blocks:
block.modify(SetFontSize=FONT_SIZE_MAX, PlayAlertSound=SOUND_MID_VALUE)
blocks[1].Quality = '>= 12'
blocks[2].Quality = None
filter_manager.extend_blocks(blocks)
# 8
blocks = filter_manager.add_comment(718, 'Fishing Rod')
blocks[0].PlayAlertSound = SOUND_TOP_VALUE
filter_manager.extend_blocks(blocks)
filter_manager.add_comment(719, 'SRS Crude Bow', ignored=True)
# NEED_RGB
blocks = filter_manager.add_comment(720, 'Chromatic recipe items ("RGB Recipe")')
if settings.NEED_RGB:
filter_manager.extend_blocks(blocks)
blocks = filter_manager.add_comment(721, 'Endgame-start 4-links')
if settings.DARKNESS:
blocks[0].modify(ItemLevel=None, DropLevel='>= 50')
filter_manager.extend_blocks(blocks)
filter_manager.add_comment(722, 'Animate Weapon script - deactivated by default', ignored=True)
if settings.AW:
aw = FilterBlock(Class='"One Hand" "Two Hand" "Staves" "Daggers" "Thrusting" "Sceptres" "Claws"',
Rarity=RARITY_NORMAL,
SetBackgroundColor='0 0 0 255', SetTextColor='150 0 0 255', SetBorderColor='150 0 0 255',
SetFontSize=FONT_SIZE_MAX)
if settings.AW_RANGE:
aw.Class += ' "Bows" "Wands"'
filter_manager.append_block(aw)
# 8
blocks = filter_manager.add_comment(723, 'W-soc offhand weapons')
for block in blocks:
block.Class = '"Wands" "Daggers" "Sceptres" "Claws" "One Hand" "Shields"'
blocks[0].modify(SetFontSize=FONT_SIZE_MAX, PlayAlertSound=SOUND_TOP_VALUE)
filter_manager.append_block(blocks[0])
blocks = filter_manager.add_comment(724, 'Sacrificial Garb')
filter_manager.extend_blocks(blocks)
# other rules here
def modify_endgame_rare(filter_manager, show_rare_class=''):
filter_manager.add_comment(1100, 'RARE ITEMS - TRINKETS (ENDGAME)', ignored=True)
# 8
blocks = filter_manager.add_comment(1101, 'Rare trinkets 86+')
for block in blocks:
block.PlayAlertSound = SOUND_TOP_VALUE
filter_manager.extend_blocks(blocks)
# 8
blocks = filter_manager.add_comment(1102, 'Rare trinkets 84+')
for block in blocks:
block.PlayAlertSound = SOUND_TOP_VALUE
filter_manager.extend_blocks(blocks)
blocks = filter_manager.add_comment(1103, 'Breach Rings')
filter_manager.extend_blocks(blocks)
# 移除前两个 1 ITEM_LEVEL_CHAOS
blocks = filter_manager.add_comment(1104, | |
<reponame>mesoscope/cellpack
## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
#
# $Header: /opt/cvs/python/packages/share1.5/mglutil/math/transformation.py,v 1.45 2007/07/24 17:30:40 vareille Exp $
#
import numpy as np
from . import rotax
# from mglutil.math.VectorModule import Vector
Vector = None # sVectorModule.Vector
class Quaternion:
"""Base Quaternion class"""
def __init__(self, data=(1.0, np.array((0.0, 0.0, 0.0), "f"))):
"""data is in the form ( c, (x y, z)), where c is the
real part (float) and (x,y,z) is the pure part (Numeric
array of floats)
"""
try:
self.real = float(data[0])
self.pure = np.array((data[1][0], data[1][1], data[1][2]), "f")
except Exception:
raise ValueError("1Arguments must be (c,(x,y,z))")
if len(self.pure) != 3:
raise ValueError("2Arguments must be (c,(x,y,z))")
def __repr__(self):
"""representation of a general quaternion must be (real,pure),
since not all quaternions are rotations
"""
result = "Quaternion (%g (%g %g %g))" % (
self.real,
self.pure[0],
self.pure[1],
self.pure[2],
)
return result
def __add__(self, other):
"""Get the sum of two quaternions."""
real = self.real + other.real
pure = self.pure + other.pure
return Quaternion((real, pure))
def __mul__(self, other):
"""Multiply two quaternions together.
For unit Quaternons, this is equivalent to concatenating rotations"""
real = self.real * other.real - np.inner(self.pure, other.pure)
v1 = self.pure
v2 = other.pure
cofactor1 = v1[1] * v2[2] - v1[2] * v2[1]
cofactor2 = v1[2] * v2[0] - v1[0] * v2[2]
cofactor3 = v1[0] * v2[1] - v1[1] * v2[0]
pure = (
np.array([cofactor1, cofactor2, cofactor3])
+ self.real * other.pure
+ other.real * self.pure
)
return Quaternion((real, pure))
def conjugate(self):
"""The conjugate of quaternion (c,(x,y,z)) is (c,(-x,-y,-z))
So the product of a quaternion and its conjugate is its
magnitude
"""
pure = -self.pure
real = self.real
return Quaternion((real, pure))
def magnitude(self):
""" Quicker than multiplying conjugates"""
return self.real ** 2 + np.inner(self.pure, self.pure)
def inverse(self):
"""Get the multiplicative inverse of a quaternion"""
real = self.real / self.magnitude()
pure = -self.pure / self.magnitude()
return Quaternion((real, pure))
def normal(self):
"""Normalise a quaternion by dividing throughout by the
magnitude
"""
M = np.sqrt(self.magnitude())
self.pure = self.pure / M
self.real = self.real / M
class UnitQuaternion(Quaternion):
"""Special subclass of Quaternions with magnitude 1.0
Can be used to represent rotations, in which case real =
cos(theta/2) and pure = sin(theta/2)*(unit direction vector)
Input can also be given in the form (x,y,z,theta), where (x,y,z)
is the rotation axis (not necessarily normalized) and theta is the
rotation angle in degrees.
"""
def __init__(self, data=(1.0, np.array((0.0, 0.0, 0.0), "f"))):
"""(real,(pure x,pure y,pure z)) or (x,y,z,theta) (theta in degrees)"""
if len(data) == 2:
self.real = data[0]
try:
theta = np.arccos(self.real)
self.pure = np.array((data[1][0], data[1][1], data[1][2]), "f")
except Exception:
raise ValueError("The real part must be between -1.0 and 1.0")
elif len(data) == 4:
theta = np.pi * data[3] / 360.0
self.real = np.cos(theta)
self.pure = np.sin(theta) * np.array((data[0], data[1], data[2]), "f")
else:
raise ValueError("Args must be (x,y,z,theta) or (real,pure)")
self.normal()
def normal(self):
if self.real != 1.0:
theta = np.arccos(self.real)
vector = self.pure / np.sin(theta)
vector = vector / np.sqrt(np.inner(vector, vector))
self.pure = np.sin(theta) * vector
else:
self.pure = np.zeros(3, "f")
def __repr__(self):
"""Representation of a unit quaternion is as rx,ry,rz,theta,
so we can see what it does
"""
if self.real != 1.0:
# if it is not the identity
theta = np.arccos(self.real)
angle = 360 * theta / np.pi
xyz = self.pure / np.sin(theta)
else:
# if it is the identity
angle = 0.0
xyz = self.pure
return "Unit Quaternion %7.4f %7.4f %7.4f %7.3f" % (
xyz[0],
xyz[1],
xyz[2],
angle,
)
def __mul__(self, other):
# same as Quaternion, except return another UnitQuaternion
result = Quaternion.__mul__(self, other)
return UnitQuaternion((result.real, result.pure))
def conjugate(self):
result = Quaternion.conjugate(self)
return UnitQuaternion((result.real, result.pure))
def inverse(self):
return self.conjugate()
def getAxisAndAngleDegres(self):
"""Given a quaternion, compute axis and angle."""
theta = np.arccos(self.real)
angle = 360 * theta / np.pi
xyz = self.pure / np.sin(theta)
return xyz, angle
def getRotMatrix(self, shape=(4, 4), transpose=None):
"""return the rotation matrix as a Numeric array of shape shape."""
try:
assert shape in [(3, 3), (4, 4), (9,), (16,)]
except Exception:
raise ValueError("shape must be (3,3), (4,4), (9,) or (16,)")
# get the inverse 4x4 from rotax
mtx = rotax.rotax(
np.array([0.0, 0.0, 0.0], "f"), self.pure, 2 * np.arccos(self.real)
)
# strip if necessary
if shape in ((3, 3), (9,)):
mtx = [x[:3] for x in mtx]
mtx = mtx[:3]
if not transpose:
return np.reshape(np.transpose(mtx), shape)
else:
return np.reshape(mtx, shape)
def apply(self, points):
# apply the rotational part alone to a point or list of points
# can be homogeneous coordinates or not.
pshape = np.shape(points)
homogeneous = 1
if len(pshape) == 1:
if pshape[0] == 3:
points = np.array(np.concatenate((points, np.ones(1, "f")), 1))
homogeneous = 0
elif len(pshape) == 2:
if pshape[1] == 3:
points = np.array(
np.concatenate((np.array(points), np.ones((pshape[0], 1), "f")), 1)
)
homogeneous = 0
mtx = self.getRotMatrix((4, 4), transpose=1)
newpoints = np.dot(points, mtx)
if homogeneous:
return newpoints
else:
# strip the final zero off the coordinates
if len(pshape) == 1:
return newpoints[:3]
else:
newpoints = [x[:3] for x in newpoints]
return newpoints
class Transformation(UnitQuaternion):
"""Base class for manipulating transformations."""
def __init__(
self,
trans=np.array([0.0, 0.0, 0.0, 1.0], "f"),
quaternion=np.array([0.0, 0.0, 0.0, 0.0], "f"),
scale=np.array([1.0, 1.0, 1.0, 1.0], "f"),
):
UnitQuaternion.__init__(self, quaternion)
# make the translation homogeneous if it isn't
if len(trans) == 3:
trans = list(trans)
trans.append(1.0)
self.trans = np.array((trans[0], trans[1], trans[2], trans[3]), "f")
def __repr__(self):
"""Representation is of the form tx,ty,tz,qx,qy,qz,theta"""
# first check for identity quaternion to avoid nans
if self.real != 1:
theta = np.arccos(self.real)
angle = 360 * theta / np.pi
xyz = self.pure / np.sin(theta)
else:
angle = 0.0
xyz = self.pure
result = "Transformation: tx ty tz rx ry rz angle\n %g %g %g %g %g %g %g" % (
self.trans[0],
self.trans[1],
self.trans[2],
xyz[0],
xyz[1],
xyz[2],
angle,
)
return result
def output(self):
"""As __repr__ but without the explanation. For getting the numbers only"""
if self.real != 1:
theta = np.arccos(self.real)
angle = 360 * theta / np.pi
xyz = self.pure / np.sin(theta)
else:
angle = 0.0
xyz = self.pure
result = "%g %g %g %g %g %g %g" % (
self.trans[0],
self.trans[1],
self.trans[2],
xyz[0],
xyz[1],
xyz[2],
angle,
)
return result
def __mul__(self, other):
"""concatenate two transformations. self*other (other performed first)."""
# combined rotation is the product of the two rotations (Rself*Rother):
v1 = self.pure
v2 = other.pure
real = self.real * other.real - np.inner(v1, v2)
cofactor1 = v1[1] * v2[2] - v1[2] * v2[1]
cofactor2 = v1[2] * v2[0] - v1[0] * v2[2]
cofactor3 = v1[0] * v2[1] - v1[1] * v2[0]
pure = (
np.array([cofactor1, cofactor2, cofactor3])
+ self.real * other.pure
+ other.real * self.pure
)
# combined translation
trans = self.getQuaternion().apply(other.trans) + self.trans
trans[3] = 1.0
return Transformation(trans=trans, quaternion=(real, pure))
def reset(self):
self.real = 1.0
self.pure = np.array((0.0, 0.0, 0.0))
self.trans = np.array([0.0, 0.0, 0.0, 1.0])
def getQuaternion(self):
return UnitQuaternion((self.real, self.pure))
def getTranslation(self, shape=(4,)):
"""get the translation vector with shape = (3,) or (4,)
(default is (4,))
"""
if shape == (3,):
return self.trans[:3]
elif shape == (4,):
return self.trans
else:
raise ValueError("Shape must be (3,) or (4,)")
def getMatrix(self, shape=(4, 4), transpose=None):
mtx = self.getRotMatrix((4, 4), transpose=transpose) # from Quaternion
mtx[3] = self.getTranslation()
if transpose:
return np.reshape(mtx, shape)
else:
return np.reshape(np.transpose(mtx), shape)
def getDejaVuMatrix(self):
"""returns a 4x matrix usable as an instance matrix"""
mtx = self.getRotMatrix((4, 4), transpose=None) # from Quaternion
mtx[3] = self.getTranslation()
mtx[:3, 3] = mtx[3, :3]
mtx[3, :3] = [0, 0, 0]
return mtx
def apply(self, points):
"""Apply the entire transformation to a list of points"""
pshape = np.shape(points)
homogeneous = 1
if len(pshape) == 1:
if pshape[0] == 3:
points = np.array(np.concatenate((points, np.ones(1, "f")), 1))
homogeneous = 0
elif len(pshape) == 2:
if pshape[1] == 3:
points = np.array(
np.concatenate((np.array(points), np.ones((pshape[0], 1), "f")), 1)
)
homogeneous = 0
mtx = self.getMatrix((4, 4), transpose=1)
newpoints = np.dot(points, mtx)
if homogeneous:
return newpoints
else:
# strip the final one off the coordinates
if len(pshape) == 1:
return newpoints[:3]
else:
newpoints = [x[:3] for x in newpoints]
return newpoints
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class CatalogOperations(object):
"""CatalogOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def create_secret(
self, account_name, database_name, secret_name, password, uri=None, custom_headers=None, raw=False, **operation_config):
"""Creates the specified secret for use with external data sources in the
specified database.
:param account_name: The Azure Data Lake Analytics account to execute
catalog operations on.
:type account_name: str
:param database_name: The name of the database in which to create the
secret.
:type database_name: str
:param secret_name: The name of the secret.
:type secret_name: str
:param password: <PASSWORD> for the secret to pass in
:type password: str
:param uri: the URI identifier for the secret in the format
<hostname>:<port>
:type uri: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`USqlSecret
<azure.mgmt.datalake.analytics.catalog.models.USqlSecret>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters(password=password, uri=uri)
# Construct URL
url = '/catalog/usql/databases/{databaseName}/secrets/{secretName}'
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True),
'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'secretName': self._serialize.url("secret_name", secret_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('USqlSecret', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_secret(
self, account_name, database_name, secret_name, password, uri=None, custom_headers=None, raw=False, **operation_config):
"""Modifies the specified secret for use with external data sources in
the specified database.
:param account_name: The Azure Data Lake Analytics account to execute
catalog operations on.
:type account_name: str
:param database_name: The name of the database containing the secret.
:type database_name: str
:param secret_name: The name of the secret.
:type secret_name: str
:param password: <PASSWORD> the <PASSWORD> in
:type password: str
:param uri: the URI identifier for the secret in the format
<hostname>:<port>
:type uri: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`USqlSecret
<azure.mgmt.datalake.analytics.catalog.models.USqlSecret>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters(password=password, uri=uri)
# Construct URL
url = '/catalog/usql/databases/{databaseName}/secrets/{secretName}'
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True),
'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'secretName': self._serialize.url("secret_name", secret_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('USqlSecret', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_secret(
self, account_name, database_name, secret_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified secret in the specified database.
:param account_name: The Azure Data Lake Analytics account to execute
catalog operations on.
:type account_name: str
:param database_name: The name of the database containing the secret.
:type database_name: str
:param secret_name: The name of the secret to get
:type secret_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`USqlSecret
<azure.mgmt.datalake.analytics.catalog.models.USqlSecret>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/catalog/usql/databases/{databaseName}/secrets/{secretName}'
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True),
'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'secretName': self._serialize.url("secret_name", secret_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('USqlSecret', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete_secret(
self, account_name, database_name, secret_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified secret in the specified database.
:param account_name: The Azure Data Lake Analytics account to execute
catalog operations on.
:type account_name: str
:param database_name: The name of the database containing the secret.
:type database_name: str
:param secret_name: The name of the secret to delete
:type secret_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/catalog/usql/databases/{databaseName}/secrets/{secretName}'
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True),
'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'secretName': self._serialize.url("secret_name", secret_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete_all_secrets(
self, account_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Deletes all secrets in the specified database.
:param account_name: The Azure Data Lake Analytics account to execute
catalog operations on.
:type account_name: str
:param database_name: The name of the database containing the secret.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/catalog/usql/databases/{databaseName}/secrets'
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True),
'adlaCatalogDnsSuffix': self._serialize.url("self.config.adla_catalog_dns_suffix", self.config.adla_catalog_dns_suffix, 'str', skip_quote=True),
'databaseName': self._serialize.url("database_name", database_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_external_data_source(
self, account_name, database_name, external_data_source_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves the specified external data source from the Data Lake
Analytics catalog.
:param account_name: The Azure Data Lake Analytics account to execute
catalog operations on.
:type account_name: str
:param database_name: The name of the database containing the
external data source.
:type database_name: str
:param external_data_source_name: The name of the external data
source.
:type external_data_source_name: str
:param dict custom_headers: headers that will be | |
"_histo_Y.png", bins=10)
self.histo(pz, basename + ingr.name + "_histo_Z.png", bins=10)
# do it for all ingredient cumulate?
def occurence_distribution(self, ingr):
basename = self.env.basename
occ = self.env.occurences[ingr.name]
self.simpleplot(range(len(occ)), occ, basename + ingr.name + "_occurence.png")
def correlation(self, ingr):
basename = self.env.basename
posxyz = numpy.array(self.env.ingrpositions[ingr.name]).transpose()
g_average, radii, x, y, z = self.PairCorrelationFunction_3D(
posxyz, 1000, 900, 100
)
self.plot(g_average, radii, basename + ingr.name + "_corr.png")
def PairCorrelationFunction_3D(self, data, S, rMax, dr):
"""Compute the three-dimensional pair correlation function for a set of
spherical particles contained in a cube with side length S. This simple
function finds reference particles such that a sphere of radius rMax drawn
around the particle will fit entirely within the cube, eliminating the need
to compensate for edge effects. If no such particles exist, an error is
returned. Try a smaller rMax...or write some code to handle edge effects! ;)
Arguments:
x an array of x positions of centers of particles
y an array of y positions of centers of particles
z an array of z positions of centers of particles
S length of each side of the cube in space
rMax outer diameter of largest spherical shell
dr increment for increasing radius of spherical shell
Returns a tuple: (g, radii, interior_x, interior_y, interior_z)
g(r) a numpy array containing the correlation function g(r)
radii a numpy array containing the radii of the
spherical shells used to compute g(r)
interior_x x coordinates of reference particles
interior_y y coordinates of reference particles
interior_z z coordinates of reference particles
"""
from numpy import zeros, sqrt, where, pi, average, arange, histogram
x = data[0]
y = data[1]
z = data[2]
# Find particles which are close enough to the cube center that a sphere of radius
# rMax will not cross any face of the cube
bools1 = x > rMax
bools2 = x < (S - rMax)
bools3 = y > rMax
bools4 = y < (S - rMax)
bools5 = z > rMax
bools6 = z < (S - rMax)
(interior_indices,) = where(bools1 * bools2 * bools3 * bools4 * bools5 * bools6)
num_interior_particles = len(interior_indices)
if num_interior_particles < 1:
raise RuntimeError(
"No particles found for which a sphere of radius rMax\
will lie entirely within a cube of side length S. Decrease rMax\
or increase the size of the cube."
)
edges = arange(0.0, rMax + 1.1 * dr, dr)
num_increments = len(edges) - 1
g = zeros([num_interior_particles, num_increments])
radii = zeros(num_increments)
numberDensity = len(x) / S ** 3
# Compute pairwise correlation for each interior particle
for p in range(num_interior_particles):
index = interior_indices[p]
d = sqrt((x[index] - x) ** 2 + (y[index] - y) ** 2 + (z[index] - z) ** 2)
d[index] = 2 * rMax
(result, bins) = histogram(d, bins=edges, normed=False)
g[p, :] = result / numberDensity
# Average g(r) for all interior particles and compute radii
g_average = zeros(num_increments)
for i in range(num_increments):
radii[i] = (edges[i] + edges[i + 1]) / 2.0
rOuter = edges[i + 1]
rInner = edges[i]
g_average[i] = average(g[:, i]) / (
4.0 / 3.0 * pi * (rOuter ** 3 - rInner ** 3)
)
return (
g_average,
radii,
x[interior_indices],
y[interior_indices],
z[interior_indices],
)
# Number of particles in shell/total number of particles/volume of shell/number density
# shell volume = 4/3*pi(r_outer**3-r_inner**3)
def PairCorrelationFunction_2D(self, x, y, S, rMax, dr):
"""Compute the two-dimensional pair correlation function, also known
as the radial distribution function, for a set of circular particles
contained in a square region of a plane. This simple function finds
reference particles such that a circle of radius rMax drawn around the
particle will fit entirely within the square, eliminating the need to
compensate for edge effects. If no such particles exist, an error is
returned. Try a smaller rMax...or write some code to handle edge effects! ;)
Arguments:
x an array of x positions of centers of particles
y an array of y positions of centers of particles
S length of each side of the square region of the plane
rMax outer diameter of largest annulus
dr increment for increasing radius of annulus
Returns a tuple: (g, radii, interior_x, interior_y)
g(r) a numpy array containing the correlation function g(r)
radii a numpy array containing the radii of the
annuli used to compute g(r)
interior_x x coordinates of reference particles
interior_y y coordinates of reference particles
"""
from numpy import zeros, sqrt, where, pi, average, arange, histogram
# Number of particles in ring/area of ring/number of reference particles/number density
# area of ring = pi*(r_outer**2 - r_inner**2)
# Find particles which are close enough to the box center that a circle of radius
# rMax will not cross any edge of the box
bools1 = x > 1.1 * rMax
bools2 = x < (S - 1.1 * rMax)
bools3 = y > rMax * 1.1
bools4 = y < (S - rMax * 1.1)
(interior_indices,) = where(bools1 * bools2 * bools3 * bools4)
num_interior_particles = len(interior_indices)
if num_interior_particles < 1:
raise RuntimeError(
"No particles found for which a circle of radius rMax\
will lie entirely within a square of side length S. Decrease rMax\
or increase the size of the square."
)
edges = arange(0.0, rMax + 1.1 * dr, dr)
num_increments = len(edges) - 1
g = zeros([num_interior_particles, num_increments])
radii = zeros(num_increments)
numberDensity = len(x) / S ** 2
# Compute pairwise correlation for each interior particle
for p in range(num_interior_particles):
index = interior_indices[p]
d = sqrt((x[index] - x) ** 2 + (y[index] - y) ** 2)
d[index] = 2 * rMax
(result, bins) = histogram(d, bins=edges, normed=False)
g[p, :] = result / numberDensity
# Average g(r) for all interior particles and compute radii
g_average = zeros(num_increments)
for i in range(num_increments):
radii[i] = (edges[i] + edges[i + 1]) / 2.0
rOuter = edges[i + 1]
rInner = edges[i]
# divide by the area of sphere cut by sqyare
g_average[i] = average(g[:, i]) / (pi * (rOuter ** 2 - rInner ** 2))
return (g_average, radii, interior_indices)
def histo(self, distances, filename, bins=100, size=1000.0):
pylab.clf()
numpy.mean(distances), numpy.std(distances)
# the histogram of the data
# b=numpy.arange(distances.min(), distances.max(), 2)
# n, bins, patches = pyplot.hist(distances, bins=bins, normed=1, facecolor='green')#, alpha=0.75)
y, binEdges = numpy.histogram(distances, bins=bins)
bincenters = 0.5 * (binEdges[1:] + binEdges[:-1])
menStd = numpy.sqrt(y) # or sigma?
width = bins
pyplot.bar(bincenters, y, width=width, color="r", yerr=menStd)
# add a 'best fit' line?
# y = mlab.normpdf( bins, mu, sigma)#should be the excepted distribution
# l = pyplot.plot(bins, y, 'r--', linewidth=3)
pyplot.savefig(filename)
# pylab.close() # closes the current figure
def plot(self, rdf, radii, file_name):
pylab.clf()
matplotlib.rc("font", size=14)
matplotlib.rc("figure", figsize=(5, 4))
# pylab.clf()
pylab.plot(radii, rdf, linewidth=3)
pylab.xlabel(r"distance $r$ in $\AA$")
pylab.ylabel(r"radial distribution function $g(r)$")
pylab.savefig(file_name)
def simpleplot(self, X, Y, filenameme, w=3):
pylab.clf()
pylab.plot(X, Y, linewidth=w)
pylab.savefig(filenameme)
def build_grid(
self,
bb,
forceBuild=True,
):
t1 = time()
gridFileIn = None
gridFileOut = None
self.env.buildGrid(
boundingBox=bb,
gridFileIn=gridFileIn,
rebuild=forceBuild,
gridFileOut=gridFileOut,
previousFill=False,
)
t2 = time()
gridTime = t2 - t1
print("time to Build Grid", gridTime)
def pack(
self, seed=20, vTestid=3, vAnalysis=0, fbox_bb=None, show_plotly_plot=True
):
if show_plotly_plot:
self.plotly.update_title(self.env.placeMethod)
t1 = time()
self.env.pack_grid(
seedNum=seed, vTestid=vTestid, vAnalysis=vAnalysis, fbox=fbox_bb
)
t2 = time()
print("time to run pack_grid", self.env.placeMethod, t2 - t1)
print("num placed", len(self.env.molecules))
if show_plotly_plot:
self.plotly.update_title(
f"{self.env.placeMethod} took {str(round(t2 - t1, 2))}s, packed {len(self.env.molecules)}"
)
self.plotly.make_grid_heatmap(self.env)
self.plotly.add_ingredient_positions(self.env)
self.plotly.show()
def calcDistanceMatrixFastEuclidean2(self, nDimPoints):
nDimPoints = numpy.array(nDimPoints)
n, m = nDimPoints.shape
delta = numpy.zeros((n, n), "d")
for d in range(m):
data = nDimPoints[:, d]
delta += (data - data[:, numpy.newaxis]) ** 2
return numpy.sqrt(delta)
def flush(self):
import gc
import pprint
for i in range(2):
print("Collecting %d ..." % i)
n = gc.collect()
print("Unreachable objects:", n)
print("Remaining Garbage:")
pprint.pprint(gc.garbage)
del gc.garbage[:]
print
def merge(self, d1, d2, merge=lambda x, y: y):
result = dict(d1)
for k, v in d2.items():
if k in result:
result[k].extend(v)
else:
result[k] = v
return result
def plotNResult2D(self, n, | |
<gh_stars>0
from copy import deepcopy
from ray import tune
import numpy as np
import os
from softlearning.misc.utils import get_git_rev, deep_update
DEFAULT_KEY = "__DEFAULT_KEY__"
# M = number of hidden units per layer
# N = number of hidden layers
# M = 512
# N = 2
M = 256
N = 2
REPARAMETERIZE = True
NUM_COUPLING_LAYERS = 2
GAUSSIAN_POLICY_PARAMS_BASE = {
'type': 'GaussianPolicy',
'kwargs': {
'hidden_layer_sizes': (M, ) * N,
'squash': True,
'observation_keys': None,
'goal_keys': None,
'observation_preprocessors_params': {}
}
}
ALGORITHM_PARAMS_BASE = {
'kwargs': {
'epoch_length': 1000,
'train_every_n_steps': 1,
'n_train_repeat': 1,
'eval_n_episodes': 3,
'eval_deterministic': False,
'discount': 0.99,
'tau': 5e-3,
'reward_scale': 1.0,
'save_training_video_frequency': 5,
'eval_render_kwargs': {
'width': 480,
'height': 480,
'mode': 'rgb_array',
},
}
}
ALGORITHM_PARAMS_ADDITIONAL = {
'SAC': {
'type': 'SAC',
'kwargs': {
'reparameterize': REPARAMETERIZE,
'lr': 3e-4,
'target_update_interval': 1,
'tau': 5e-3,
'n_initial_exploration_steps': int(1e3),
'target_entropy': 'auto',
'action_prior': 'uniform',
'verbose': True,
'eval_n_episodes': 3,
'ext_reward_coeff': 1,
'rnd_int_rew_coeff': tune.grid_search([0, 1]),
'normalize_ext_reward_gamma': 0.99,
},
'rnd_params': {
'convnet_params': {
'conv_filters': (16, 32, 64),
'conv_kernel_sizes': (3, 3, 3),
'conv_strides': (2, 2, 2),
'normalization_type': None,
},
'fc_params': {
'hidden_layer_sizes': (256, 256),
'output_size': 512,
},
}
},
'MultiSAC': {
'type': 'MultiSAC',
'kwargs': {
'reparameterize': REPARAMETERIZE,
'lr': 3e-4,
'target_update_interval': 1,
'tau': 5e-3,
'target_entropy': 'auto',
# 'n_initial_exploration_steps': int(1e4),
'n_initial_exploration_steps': int(5e3),
'action_prior': 'uniform',
'her_iters': tune.grid_search([0]),
'rnd_int_rew_coeffs': [0, 0], # [1, 1],
'ext_reward_coeffs': [1, 1], # 0 corresponds to reset policy
'normalize_ext_reward_gamma': 0.99,
'share_pool': False,
},
'rnd_params': {
'convnet_params': {
'conv_filters': (16, 32, 64),
'conv_kernel_sizes': (3, 3, 3),
'conv_strides': (2, 2, 2),
'normalization_type': None,
},
'fc_params': {
'hidden_layer_sizes': (256, 256),
'output_size': 512,
},
},
},
'HERQLearning': {
'type': 'HERQLearning',
'kwargs': {
'reparameterize': REPARAMETERIZE,
'lr': 3e-4,
'target_update_interval': 1,
'tau': 5e-3,
'n_initial_exploration_steps': int(5e3),
'target_entropy': 'auto',
'action_prior': 'uniform',
'ext_reward_coeff': 1,
'eval_n_episodes': 3,
'rnd_int_rew_coeff': tune.grid_search([0]),
# 'normalize_ext_reward_gamma': 0.99,
'verbose': True,
'replace_original_reward': tune.grid_search([True, False]), # True,
},
},
}
MAX_PATH_LENGTH_PER_UNIVERSE_DOMAIN_TASK = {
DEFAULT_KEY: 100,
'gym': {
DEFAULT_KEY: 100,
'Point2D': {
DEFAULT_KEY: 200,
},
'Pusher2D': {
DEFAULT_KEY: 100,
'Simple-v0': 150,
'Test-v0': 150,
},
'MiniGrid': {
DEFAULT_KEY: 50,
},
'DClaw': {
DEFAULT_KEY: 50,
'TurnFixed-v0': 50,
# 'TurnResetFree-v0': 100,
'TurnResetFree-v0': 50,
'TurnResetFreeSwapGoal-v0': tune.grid_search([100]),
'TurnResetFreeRandomGoal-v0': 100,
'TurnFreeValve3Fixed-v0': tune.grid_search([50]),
# 'TurnFreeValve3RandomReset-v0': 50,
'TurnFreeValve3ResetFree-v0': tune.grid_search([100]),
'TurnFreeValve3ResetFreeSwapGoal-v0': tune.grid_search([50]),
'TurnFreeValve3ResetFreeSwapGoalEval-v0': tune.grid_search([50]),
'TurnFreeValve3ResetFreeComposedGoals-v0': tune.grid_search([150]),
# Translating Tasks
'TranslatePuckFixed-v0': 50,
'TranslateMultiPuckFixed-v0': 100,
'TranslatePuckResetFree-v0': 50,
# Lifting Tasks
'LiftDDFixed-v0': tune.grid_search([50]),
'LiftDDResetFree-v0': tune.grid_search([50]),
# Flipping Tasks
'FlipEraserFixed-v0': tune.grid_search([50]),
'FlipEraserResetFree-v0': tune.grid_search([50]),
'FlipEraserResetFreeSwapGoal-v0': tune.grid_search([50]),
# Sliding Tasks
'SlideBeadsFixed-v0': tune.grid_search([25]),
'SlideBeadsResetFree-v0': tune.grid_search([25]),
'SlideBeadsResetFreeEval-v0': tune.grid_search([25]),
},
},
}
NUM_EPOCHS_PER_UNIVERSE_DOMAIN_TASK = {
DEFAULT_KEY: 200,
'gym': {
DEFAULT_KEY: 200,
'Point2D': {
DEFAULT_KEY: int(300),
},
'Pusher2D': {
DEFAULT_KEY: int(100),
},
'MiniGrid': {
DEFAULT_KEY: 100,
},
'DClaw': {
DEFAULT_KEY: int(250),
'TurnFreeValve3Fixed-v0': 750,
'TranslateMultiPuckFixed-v0': 500,
},
},
}
ENVIRONMENT_PARAMS_PER_UNIVERSE_DOMAIN_TASK_STATE = {
'gym': {
'Point2D': {
# === Point Mass ===
'Fixed-v0': {
# 'boundary_distance': tune.grid_search([8, 16]),
# 'action_scale': tune.grid_search([0.5, 0.25]),
'action_scale': 0.5,
'images_are_rgb': True,
'init_pos_range': None, # Random reset
'target_pos_range': None, # Random target
'render_onscreen': False,
# 'reward_type': tune.grid_search(['dense', 'sparse']),
'reward_type': tune.grid_search(['sparse']),
'observation_keys': ('state_achieved_goal', 'state_desired_goal'),
# 'goal_keys': ('state_desired_goal', ),
},
'SingleWall-v0': {
# 'boundary_distance': tune.grid_search([4, 8]),
'action_scale': tune.grid_search([1.0, 0.5]),
'images_are_rgb': True,
'init_pos_range': None, # Random reset
'target_pos_range': None, # Random target
'render_onscreen': False,
'reward_type': tune.grid_search(['dense', 'sparse']),
'observation_keys': ('state_observation', 'state_desired_goal'),
# 'goal_keys': ('state_desired_goal', ),
},
'BoxWall-v1': {
'action_scale': tune.grid_search([0.5]),
'images_are_rgb': True,
'reward_type': tune.grid_search(['sparse']),
'init_pos_range': ((-3, -3), (-3, -3)),
# 'init_pos_range': None, # Random reset
'target_pos_range': ((3, 3), (3, 3)),
'render_onscreen': False,
'observation_keys': ('state_achieved_goal', 'state_desired_goal'),
},
'Maze-v0': {
'action_scale': tune.grid_search([0.5]),
'images_are_rgb': True,
'reward_type': tune.grid_search(['sparse']),
'render_onscreen': False,
'observation_keys': ('state_achieved_goal', 'state_desired_goal'),
'use_count_reward': tune.grid_search([True, False]),
'n_bins': 10,
# === EASY ===
# 'wall_shape': 'easy-maze',
# 'init_pos_range': ((-2.5, -3), (-2.5, -3)),
# 'target_pos_range': ((2.5, -3), (2.5, -3)),
# === MEDIUM ===
'wall_shape': 'medium-maze',
'init_pos_range': ((-3, -3), (-3, -3)),
'target_pos_range': ((3, 3), (3, 3)),
# === HARD ===
# 'wall_shape': 'hard-maze',
# 'init_pos_range': ((-3, -3), (-3, -3)),
# 'target_pos_range': ((-0.5, 1.25), (-0.5, 1.25)),
},
# 'Fixed-v1': {
# 'ball_radius': 0.5,
# 'target_radius': 0.5,
# 'boundary_distance': 4,
# 'images_are_rgb': True,
# 'init_pos_range': None,
# 'target_pos_range': None,
# 'render_onscreen': False,
# 'reward_type': 'sparse',
# 'observation_keys': ('state_observation', ),
# 'goal_keys': ('state_desired_goal', ),
# },
},
'Pusher2D': {
'Simple-v0': {
'init_qpos_range': ((0, 0, 0), (0, 0, 0)),
'init_object_pos_range': ((1, 0), (1, 0)),
'target_pos_range': ((2, 2), (2, 2)),
'reset_gripper': True,
'reset_object': True,
'observation_keys': (
# 'observation',
'gripper_qpos',
'gripper_qvel',
'object_pos',
'object_vel',
'target_pos',
),
},
'Test-v0': {
'do_reset': True,
'multi_reset': False,
'multi_reset_block': False,
'reset_block': True,
'reset_gripper': True,
}
},
'DClaw': {
# === Fixed Screw ===
'TurnFixed-v0': {
'reward_keys_and_weights': {
# 'object_to_target_angle_distance_reward': 1,
'sparse_reward': 1,
},
'init_pos_range': (0, 0),
'target_pos_range': (np.pi, np.pi),
'observation_keys': (
'object_angle_cos',
'object_angle_sin',
'<KEY>',
'last_action'
),
},
'PoseStatic-v0': {},
'PoseDynamic-v0': {},
'TurnRandom-v0': {},
'TurnResetFree-v0': {
'reward_keys_and_weights': {
'object_to_target_angle_distance_reward': 1,
},
'reset_fingers': True,
'init_pos_range': (0, 0),
'target_pos_range': (np.pi, np.pi),
},
'TurnResetFreeSwapGoal-v0': {
'reward_keys': (
'object_to_target_angle_dist_cost',
),
'reset_fingers': True,
},
'TurnResetFreeRandomGoal-v0': {
'reward_keys': (
'object_to_target_angle_dist_cost',
),
'reset_fingers': True,
},
'TurnRandomDynamics-v0': {},
'TurnFreeValve3Fixed-v0': {
'reward_keys_and_weights': {
# 'object_to_target_position_distance_reward': tune.grid_search([2]),
# 'object_to_target_orientation_distance_reward': 1,
'sparse_reward': 1,
},
'observation_keys': (
'<KEY>',
'last_action',
'object_xy_position',
'object_z_orientation_cos',
'object_z_orientation_sin',
),
'init_qpos_range': ((0, 0, 0, 0, 0, 0), ) * 2,
'target_qpos_range': ((0, 0, 0, 0, 0, np.pi), ) * 2,
# 'target_qpos_range': [
# (0.01, 0.01, 0, 0, 0, -np.pi / 2),
# (-0.01, -0.01, 0, 0, 0, np.pi / 2)
# ],
# 'init_qpos_range': (
# (-0.08, -0.08, 0, 0, 0, -np.pi),
# (0.08, 0.08, 0, 0, 0, np.pi)
# ),
},
'TurnFreeValve3ResetFree-v0': {
'observation_keys': (
'<KEY>',
'last_action',
'object_xy_position',
'object_z_orientation_cos',
'object_z_orientation_sin',
),
'reward_keys_and_weights': {
'object_to_target_position_distance_reward': 2,
'object_to_target_orientation_distance_reward': 1,
},
'reset_fingers': True,
'reset_frequency': 0,
'target_qpos_range': [
(0.01, 0.01, 0, 0, 0, -np.pi / 2),
(-0.01, -0.01, 0, 0, 0, np.pi / 2)
],
'init_qpos_range': [
(0, 0, 0, 0, 0, 0)
],
# === BELOW IS FOR SAVING INTO THE REPLAY POOL. ===
# MAKE SURE TO SET `no_pixel_information = True` below in order
# to remove the pixels from the policy inputs/Q inputs.
# 'pixel_wrapper_kwargs': {
# 'observation_key': 'pixels',
# 'pixels_only': False,
# 'render_kwargs': {
# 'width': 32,
# 'height': 32,
# },
# },
# 'camera_settings': {
# 'azimuth': 180,
# 'distance': 0.38,
# 'elevation': -36,
# 'lookat': (0.04, 0.008, 0.025),
# },
},
'TurnFreeValve3RandomReset-v0': {
'reward_keys': (
'object_to_target_position_distance_cost',
'object_to_target_orientation_distance_cost',
),
'initial_distribution_path': '',
'reset_from_corners': True,
},
'TurnFreeValve3ResetFreeRandomGoal-v0': {
'observation_keys': (
'<KEY>',
'object_position',
'object_orientation_cos',
'object_orientation_sin',
'last_action',
'target_orientation',
'object_to_target_relative_position',
),
'reward_keys': (
'object_to_target_position_distance_cost',
'object_to_target_orientation_distance_cost',
),
'reset_fingers': True,
},
'TurnFreeValve3ResetFreeSwapGoal-v0': {
'reward_keys_and_weights': {
'object_to_target_position_distance_reward': tune.grid_search([1, 2]),
'object_to_target_orientation_distance_reward': 1,
# 'object_to_target_position_distance_reward': tune.grid_search([1]),
# 'object_to_target_orientation_distance_reward': 0,
},
'reset_fingers': True,
'observation_keys': (
'<KEY>',
'last_action',
'object_xy_position',
'object_z_orientation_cos',
'object_z_orientation_sin',
'target_xy_position',
'target_z_orientation_cos',
'target_z_orientation_sin',
),
'goals': tune.grid_search([
[(0, 0, 0, 0, 0, np.pi / 2), (-0.05, -0.06, 0, 0, 0, 0)],
# [(0.05, 0.06, 0, 0, 0, 0), (-0.05, -0.06, 0, 0, 0, 0)],
# [(0, 0, 0, 0, 0, 0), (-0.05, -0.06, 0, 0, 0, 0)],
]),
},
'TurnFreeValve3ResetFreeSwapGoalEval-v0': {
'reward_keys_and_weights': {
# 'object_to_target_position_distance_reward': tune.grid_search([1, 2]),
'object_to_target_position_distance_reward': tune.grid_search([2]),
'object_to_target_orientation_distance_reward': 1,
},
'observation_keys': (
'<KEY>',
'last_action',
'object_xy_position',
'object_z_orientation_cos',
'object_z_orientation_sin',
'target_z_orientation_cos',
'target_z_orientation_sin',
'target_xy_position',
),
# 'goals': tune.grid_search([
# [(0, 0, 0, 0, 0, np.pi / 2), (-0.05, -0.06, 0, 0, 0, 0)],
# [(0.05, 0.06, 0, 0, 0, 0), (-0.05, -0.06, 0, 0, 0, 0)],
# [(0, 0, 0, 0, 0, 0), (-0.05, -0.06, 0, 0, 0, 0)],
# ]),
},
'TurnFreeValve3ResetFreeCurriculum-v0': {
'reward_keys': (
'object_to_target_position_distance_cost',
'object_to_target_orientation_distance_cost',
),
'reset_fingers': False,
},
'XYTurnValve3Fixed-v0': {
'reward_keys': (
'object_to_target_position_distance_cost',
'object_to_target_orientation_distance_cost',
'eef_to_object_xy_distance_cost',
),
},
'XYTurnValve3RandomReset-v0': {
'reward_keys': (
'object_to_target_position_distance_cost',
'object_to_target_orientation_distance_cost',
'eef_to_object_xy_distance_cost',
),
'num_goals': 1,
},
'XYTurnValve3Random-v0': {
'reward_keys': (
'object_to_target_position_distance_cost',
'object_to_target_orientation_distance_cost',
'eef_to_object_xy_distance_cost',
),
},
'XYTurnValve3ResetFree-v0': {
'reward_keys': (
'object_to_target_position_distance_cost',
'object_to_target_orientation_distance_cost',
'eef_to_object_xy_distance_cost',
),
'reset_fingers': tune.grid_search([True, False]),
'reset_arm': False,
},
# Lifting Tasks
'LiftDDFixed-v0': {
'reward_keys_and_weights': {
'object_to_target_z_position_distance_reward': 10,
'object_to_target_xy_position_distance_reward': 0,
'object_to_target_orientation_distance_reward': 0, #5,
},
'init_qpos_range': (
(-0.05, -0.05, 0.041, -np.pi, -np.pi, -np.pi),
(0.05, 0.05, 0.041, np.pi, np.pi, np.pi)
),
'target_qpos_range': (
(-0.05, -0.05, 0, 0, 0, 0),
(0.05, 0.05, 0, 0, 0, 0)
),
'use_bowl_arena': False,
},
'LiftDDResetFree-v0': {
'reward_keys_and_weights': {
'object_to_target_z_position_distance_reward': 0,
'object_to_target_xy_position_distance_reward': 1,
'object_to_target_orientation_distance_reward': 0,
},
'init_qpos_range': (
(0, 0, 0.041, -np.pi, -np.pi, -np.pi),
(0, 0, 0.041, np.pi, np.pi, np.pi),
),
'target_qpos_range': (
(-0.05, -0.05, 0, 0, 0, 0),
(0.05, 0.05, 0, 0, 0, 0)
),
'use_bowl_arena': False,
},
# Flipping Tasks
'FlipEraserFixed-v0': {
'reward_keys_and_weights': {
'object_to_target_position_distance_reward': 1,
'object_to_target_orientation_distance_reward': 20,
},
'init_qpos_range': [(0, 0, 0, 0, 0, 0)],
| |
# -*- coding: utf-8 -*-
"""
Module to compute least cost xmission paths, distances, and costs for a clipped
area.
"""
import geopandas as gpd
import logging
import numpy as np
import pandas as pd
import rasterio
from shapely.geometry import Polygon
from shapely.geometry.linestring import LineString
from shapely.ops import nearest_points
from skimage.graph import MCP_Geometric
import time
from warnings import warn
from reV.handlers.exclusions import ExclusionLayers
from reVX.least_cost_xmission.config import (XmissionConfig, TRANS_LINE_CAT,
SINK_CAT, SUBSTATION_CAT,
LOAD_CENTER_CAT)
from reVX.utilities.exceptions import (InvalidMCPStartValueError,
LeastCostPathNotFoundError)
logger = logging.getLogger(__name__)
class TieLineCosts:
"""
Compute Least Cost Tie-line cost from start location to desired end
locations
"""
def __init__(self, cost_fpath, start_idx, capacity_class, row_slice,
col_slice, xmission_config=None, barrier_mult=100):
"""
Parameters
----------
cost_fpath : str
Full path of .h5 file with cost arrays
start_idx : tuple
row_idx, col_idx to compute least costs to.
capacity_class : int | str
Tranmission feature capacity_class class
radius : int, optional
Radius around sc_point to clip cost to, by default None
xmission_config : str | dict | XmissionConfig, optional
Path to Xmission config .json, dictionary of Xmission config
.jsons, or preloaded XmissionConfig objects, by default None
barrier_mult : int, optional
Multiplier on transmission barrier costs, by default 100
"""
self._cost_fpath = cost_fpath
self._config = self._parse_config(xmission_config=xmission_config)
self._start_idx = start_idx
self._row_slice = row_slice
self._col_slice = col_slice
self._capacity_class = self._config._parse_cap_class(capacity_class)
line_cap = self._config['power_classes'][self.capacity_class]
cost_layer = 'tie_line_costs_{}MW'.format(line_cap)
self._cost, self._mcp_cost = self._clip_costs(
cost_fpath, cost_layer, row_slice, col_slice,
barrier_mult=barrier_mult)
self._mcp = None
self._clip_shape = None
with ExclusionLayers(self._cost_fpath) as f:
self.transform = rasterio.Affine(*f.profile['transform'])
self._full_shape = f.shape
def __repr__(self):
msg = "{} starting at {}".format(self.__class__.__name__,
self._start_idx)
return msg
@property
def row_offset(self):
"""
Offset to apply to row indices to move into clipped array
Returns
-------
int
"""
offset = self._row_slice.start
if offset is None:
offset = 0
return offset
@property
def col_offset(self):
"""
Offset to apply to column indices to move into clipped array
Returns
-------
int
"""
offset = self._col_slice.start
if offset is None:
offset = 0
return offset
@property
def row(self):
"""
Row index inside clipped array
Returns
-------
int
"""
return self._start_idx[0]
@property
def col(self):
"""
Column index inside clipped array
Returns
-------
int
"""
return self._start_idx[1]
@property
def cost(self):
"""
Tie line costs array
Returns
-------
ndarray
"""
return self._cost
@property
def mcp_cost(self):
"""
Tie line costs array with barrier costs applied for MCP analysis
Returns
-------
ndarray
"""
return self._mcp_cost
@property
def mcp(self):
"""
MCP_Geometric instance intialized on mcp_cost array with starting point
at sc_point
Returns
-------
MCP_Geometric
"""
if self._mcp is None:
check = self.mcp_cost[self.row, self.col]
if check < 0:
msg = ("Start idx {} does not have a valid cost!"
.format((self.row, self.col)))
raise InvalidMCPStartValueError(msg)
self._mcp = MCP_Geometric(self.mcp_cost)
self._mcp.find_costs(starts=[(self.row, self.col)])
return self._mcp
@property
def capacity_class(self):
"""
SC point capacity class
Returns
-------
str
"""
return self._capacity_class
@property
def clip_shape(self):
"""
Shaped of clipped cost raster
Returns
-------
tuple
"""
if self._clip_shape is None:
if self._row_slice == slice(None):
row_shape = self._full_shape[0]
else:
row_max = (self._row_slice.stop if self._row_slice.stop
else self._full_shape[0])
row_min = (self._row_slice.start if self._row_slice.start
else 0)
row_shape = row_max - row_min
if self._col_slice == slice(None):
col_shape = self._full_shape[1]
else:
col_max = (self._col_slice.stop if self._col_slice.stop
else self._full_shape[1])
col_min = (self._col_slice.start if self._col_slice.start
else 0)
col_shape = col_max - col_min
self._clip_shape = (row_shape, col_shape)
return self._clip_shape
@staticmethod
def _parse_config(xmission_config=None):
"""
Load Xmission config if needed
Parameters
----------
config : str | dict | XmissionConfig, optional
Path to Xmission config .json, dictionary of Xmission config
.jsons, or preloaded XmissionConfig objects, by default None
Returns
-------
XmissionConfig
"""
if not isinstance(xmission_config, XmissionConfig):
xmission_config = XmissionConfig(config=xmission_config)
return xmission_config
@staticmethod
def _clip_costs(cost_fpath, cost_layer, row_slice, col_slice,
barrier_mult=100):
"""
Extract clipped cost arrays from exclusion .h5 files
Parameters
----------
cost_fpath : str
Full path of .h5 file with cost arrays
cost_layer : str
Name of cost layer to extract
row_slice : slice
slice along axis 0 (rows) to clip costs too
col_slice : slice
slice along axis 1 (columns) to clip costs too
barrier_mult : int, optional
Multiplier on transmission barrier costs, by default 100
Returns
-------
cost : ndarray
2d clipped array of raw tie-line costs
mcp_cost : ndarray
2d clipped array of mcp cost = cost * barrier * barrier_mult
"""
with ExclusionLayers(cost_fpath) as f:
cost = f[cost_layer, row_slice, col_slice]
barrier = f['transmission_barrier', row_slice, col_slice]
mcp_cost = cost + cost * barrier * barrier_mult
mcp_cost = np.where(mcp_cost < 0, -1, mcp_cost)
return cost, mcp_cost
@staticmethod
def _compute_path_length(indices):
"""
Compute the total length and cell by cell length of the lease cost path
defined by 'indices'
Parameters
----------
indices : ndarray
n x 2 array of MCP traceback of least cost path
Returns
-------
length : float
Total length of path in km
lens : ndarray
Vector of the distance of the least cost path accross each cell
"""
# Use Pythagorean theorem to calculate lengths between cells (km)
# Use c**2 = a**2 + b**2 to determine length of individual paths
lens = np.sqrt(np.sum(np.diff(indices, axis=0)**2, axis=1))
length = np.sum(lens) * 90 / 1000
# Need to determine distance coming into and out of any cell. Assume
# paths start and end at the center of a cell. Therefore, distance
# traveled in the cell is half the distance entering it and half the
# distance exiting it. Duplicate all lengths, pad 0s on ends for start
# and end cells, and divide all distance by half.
lens = np.repeat(lens, 2)
lens = np.insert(np.append(lens, 0), 0, 0)
lens = lens / 2
# Group entrance and exits distance together, and add them
lens = lens.reshape((int(lens.shape[0] / 2), 2))
lens = np.sum(lens, axis=1)
return length, lens
def least_cost_path(self, end_idx, save_path=False):
"""
Find least cost path, its length, and its total un-barriered cost
Parameters
----------
end_idx : tuple
(row, col) index of end point to connect and compute least cost
path to
save_path : bool
Flag to save path as a multi-line geometry
Returns
-------
length : float
Length of path (km)
cost : float
Cost of path including terrain and land use multipliers
path : shapely.geometry.linestring, optional
Path as a LineString
"""
row, col = end_idx
check = (row < 0 or col < 0 or row >= self.clip_shape[0]
or col >= self.clip_shape[1])
if check:
msg = ('End point ({}, {}) is out side of clipped cost raster '
'with shape {}'.format(row, col, self.clip_shape))
logger.exception(msg)
raise ValueError(msg)
check = self.mcp_cost[row, col]
if check < 0:
msg = ("End idx {} does not have a valid cost!"
.format(end_idx))
raise LeastCostPathNotFoundError(msg)
try:
indices = np.array(self.mcp.traceback((row, col)))
except ValueError as ex:
msg = ('Unable to find path from start {} to {}: {}'
.format((self.row, self.col), end_idx, ex))
raise LeastCostPathNotFoundError(msg) from ex
# Extract costs of cells
# pylint: disable=unsubscriptable-object
cell_costs = self.cost[indices[:, 0], indices[:, 1]]
length, lens = self._compute_path_length(indices)
# Multiple distance travel through cell by cost and sum it!
cost = np.sum(cell_costs * lens)
if save_path:
row = indices[:, 0] + self.row_offset
col = indices[:, 1] + self.col_offset
x, y = rasterio.transform.xy(self.transform, row, col)
path = LineString(list(zip(x, y)))
out = length, cost, path
else:
out = length, cost
return out
def compute(self, end_indices, save_paths=False):
"""
Compute least cost paths to given end indicies
Parameters
----------
end_idices : tuple | list
(row, col) index or list of (row, col) indices of end point(s) to
connect and compute least cost path to
save_paths : bool, optional
Flag to save least cost path as a multi-line geometry,
by default False
Returns
-------
tie_lines : pandas.DataFrame | gpd.GeoDataFrame
DataFrame of lenghts and costs for each path or GeoDataFrame of
lenght, cost, and geometry for each path
"""
if isinstance(end_indices, tuple):
end_indices = [end_indices]
lengths = []
costs = []
paths = []
for end_idx in end_indices:
out = self.least_cost_path(end_idx, save_path=save_paths)
lengths.append(out[0])
costs.append(out[1])
if save_paths:
paths.append(out[2])
tie_lines = pd.DataFrame({'length_km': lengths, 'cost': costs})
if save_paths:
with ExclusionLayers(self._cost_fpath) as f:
crs = f.crs
tie_lines = gpd.GeoDataFrame(tie_lines, geometry=paths, crs=crs)
return tie_lines
@classmethod
def run(cls, cost_fpath, start_idx, end_indices, capacity_class,
row_slice, col_slice, xmission_config=None, barrier_mult=100,
save_paths=False):
"""
Compute least cost tie-line path to all features | |
### First draft of a Quantum Circuit object
import numpy as np
def kron(*args):
## multiple kronecker product
qb = np.array([[1.0]])
for q in args:
qb = np.kron(qb, q)
return qb
def n_kron(n, vector):
## n kronecker product with itself
ret = np.array([[1.0]])
for _ in range(n):
ret = np.kron(ret, vector)
return ret
def dot(*args):
# multiple dot products
qb = 1
for q in args:
qb = np.dot(qb, q)
return qb
def gate_operator(O, i, n):
# O is the matrix representation of the gate
# It should be at the i-th place
# There are n qubits in total
I = np.eye(2)
return kron(n_kron(i, I), O, n_kron(n-i-1, I))
def gate_multiple_operator(O, args, n):
# O is the matrix representation of the gate
# It should be at the places indicated by the elements of args
# There are n qubits in total
I = np.eye(2)
ret = np.array([[1.0]])
for i in range(n):
if i+1 in args: # because of qubit notation
ret = np.kron(ret, O)
else:
ret = np.kron(ret, I)
return ret
def prepare_projector(P, i, n):
# P is the projector (generally of the form [[1,0],[0,0]]
# Measurement on the i-th qubit
# n qubits in the system
I = np.eye(2)
return kron(n_kron(n-i-1, I), P, n_kron(i, I))
class QCircuit:
### states are updated after every gate addition
def __init__(self, number_of_qubits = 1):
self.number_of_qubits = number_of_qubits
self.state_zero = np.array([[1.0],[0.0]])
# self.state_one = np.array([[0.0],[1.0]])
self.initial_state = n_kron(self.number_of_qubits, self.state_zero)
self.state = self.initial_state
self.track_gates1 = [[] for _ in range(self.number_of_qubits)]
self.track_gates2 = [[] for _ in range(self.number_of_qubits)]
self.basis = [('{:0'+str(self.number_of_qubits)+'b}').format(i) for i in range(2**self.number_of_qubits)]
# self.I = np.eye(2)
def one_X(self, i = 1): # first qubit per default
# add X gate to i-th qubit
i -= 1
self.X = np.array([[0.0, 1.0], [1.0, 0.0]])
self.state = np.dot(gate_operator(self.X, i, self.number_of_qubits), self.state)
for j in range(self.number_of_qubits):
if j == i:
self.track_gates1[j].append("X")
self.track_gates2[j].append("X")
else:
self.track_gates2[j].append("-")
def X(self, *args):
# add X gate to multiple qubits
if len(args) == 0:
args = [1]
self.X = np.array([[0.0, 1.0], [1.0, 0.0]])
self.state = np.dot(gate_multiple_operator(self.X, args, self.number_of_qubits), self.state)
for j in range(self.number_of_qubits):
if j+1 in args:
self.track_gates1[j].append("X")
self.track_gates2[j].append("X")
else:
self.track_gates2[j].append("-")
def H(self, *args):
# add H gate to multiple qubits
if len(args) == 0:
args = [1]
self.H = 1.0 / 2**.5 * np.array([[1, 1], [1, -1]])
self.state = np.dot(gate_multiple_operator(self.H, args, self.number_of_qubits), self.state)
for j in range(self.number_of_qubits):
if j+1 in args:
self.track_gates1[j].append("H")
self.track_gates2[j].append("H")
else:
self.track_gates2[j].append("-")
def CNOT(self, control=1, target=2):
# add CNOT gate w.r.t. control and target (both should be valid qubits)
# for now, the control and the target have to be next to each other
if abs(control - target) > 1:
print("Warning, the control and target should be next to eachother, nothing added")
elif control == target:
print("Warning, the control and target should be different, nothing added")
elif control < target:
self.CNOT = np.array([[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],])
self.state = np.dot(gate_operator(self.CNOT, control-1, self.number_of_qubits-1), self.state)
else:
self.CNOT = np.array([[0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],])
self.state = np.dot(gate_operator(self.CNOT, target-1, self.number_of_qubits-1), self.state)
if abs(control - target) == 1:
for j in range(self.number_of_qubits):
if j+1 == control:
self.track_gates1[j].append("ctrl")
self.track_gates2[j].append("ctrl")
elif j+1 == target:
self.track_gates1[j].append("CNOT")
self.track_gates2[j].append("CNOT")
else:
self.track_gates2[j].append("----")
def measure(self, i = 1):
# add measurement gate at i-th qubit
i -= 1
self.P = np.dot(self.state_zero, self.state_zero.T)
prob = dot(np.conjugate(self.state).T,gate_operator(self.P,i,self.number_of_qubits),self.state)
if np.random.rand() < prob:
self.state = np.dot(gate_operator(self.P,i,self.number_of_qubits),self.state) / np.sqrt(prob)
elif prob > 0.0:
self.state_one = np.array([[0.0],[1.0]])
self.P1 = np.dot(self.state_one, self.state_one.T)
self.state = np.dot(gate_operator(self.P1,i,self.number_of_qubits),self.state) / np.sqrt(prob)
for j in range(self.number_of_qubits):
if j == i:
self.track_gates1[j].append("M")
self.track_gates2[j].append("M")
else:
self.track_gates2[j].append("-")
def draw(self):
print("First Drawing")
for _,q in enumerate(self.track_gates1):
ret = "|0> --- " + " --- ".join(q)
print(ret)
print("Second Drawing")
for _,q in enumerate(self.track_gates2):
ret = "|0> --- " + " --- ".join(q)
print(ret)
def reinitialize(self):
# carefull for the previous states will be lost
self.state = self.initial_state
self.track_gates1 = [[] for _ in range(self.number_of_qubits)]
self.track_gates2 = [[] for _ in range(self.number_of_qubits)]
def dirac(self):
# returns a nice description of the state of the system
equation = "|Psi> = "
for i,s in enumerate(self.state):
equation += str(s[0]) + '|' + self.basis[i] + '> + '
print(equation[:-2])
equation = "|Psi> = "
for i,s in enumerate(self.state):
if s > 0.0:
equation += str(s[0]) + '|' + self.basis[i] + '> + '
print(equation[:-2])
## Introducing QCChain
class QCChain:
### states are updated after every simulation call, allows for more flexibility, and introduces a "running time"
def __init__(self, number_of_qubits = 1):
self.number_of_qubits = number_of_qubits
self.state_zero = np.array([[1.0],[0.0]])
# self.state_one = np.array([[0.0],[1.0]])
self.initial_state = n_kron(self.number_of_qubits, self.state_zero)
self.state = self.initial_state
self.track_gates1 = [[] for _ in range(self.number_of_qubits)]
self.track_gates2 = [[] for _ in range(self.number_of_qubits)]
self.basis = [('{:0'+str(self.number_of_qubits)+'b}').format(i) for i in range(2**self.number_of_qubits)]
# self.I = np.eye(2)
self.count = {basis:0 for basis in self.basis}
def X(self, *args):
# add X gate to multiple qubits, default to first qubit
if len(args) == 0:
args = [1]
for j in range(self.number_of_qubits):
if j+1 in args:
self.track_gates1[j].append("X")
self.track_gates2[j].append("X")
else:
self.track_gates2[j].append("-")
def H(self, *args):
# add H gate to multiple qubits
if len(args) == 0:
args = [1]
for j in range(self.number_of_qubits):
if j+1 in args:
self.track_gates1[j].append("H")
self.track_gates2[j].append("H")
else:
self.track_gates2[j].append("-")
def CNOT(self, control=1, target=2):
# add CNOT gate w.r.t. control and target (both should be valid qubits)
# for now, the control and the target have to be next to each other
if abs(control - target) > 1:
print("Warning, the control and target should be next to eachother, nothing added")
elif control == target:
print("Warning, the control and target should be different, nothing added")
else:
for j in range(self.number_of_qubits):
if j+1 == control:
self.track_gates1[j].append("ctrl")
self.track_gates2[j].append("ctrl")
elif j+1 == target:
self.track_gates1[j].append("CNOT")
self.track_gates2[j].append("CNOT")
else:
self.track_gates2[j].append("----")
def measure(self, *args):
# add measurement gate at i-th qubit
if len(args) == 0:
args = [1]
for j in range(self.number_of_qubits):
if j+1 in args:
self.track_gates1[j].append("M")
self.track_gates2[j].append("M")
else:
self.track_gates2[j].append("-")
def draw(self):
print("First Drawing")
for _,q in enumerate(self.track_gates1):
ret = "|0> --- " + " --- ".join(q)
print(ret)
print("Second Drawing")
for _,q in enumerate(self.track_gates2):
ret = "|0> --- " + " --- ".join(q)
print(ret)
def reinitialize(self):
# carefull for the previous states will be lost
self.state = self.initial_state
self.track_gates1 = [[] for _ in range(self.number_of_qubits)]
self.track_gates2 = [[] for _ in range(self.number_of_qubits)]
def add(self, gates=[['X']], qubit=[1], place=[0]):
# special method that adds a gate or several gate to specified place.
# Example: q.add([['X','H'],['X'],['H']],[1,5,6],[-1,0,1])
# this will add two gates X and H to the first qubit before the last gate,
# X to the fifth qubit after all the other added gates,
# H to the sixth qubit at first place (so before all the other).
for j in range(self.number_of_qubits):
if j+1 in qubit:
i = qubit.index(j+1)
if place[i] == 0:
for gate in gates[i]:
self.track_gates1[j].append(gate)
self.track_gates2[j].append(gate)
if place[i] > 0:
for gate in gates[i]:
self.track_gates1[j].insert(place[i]-1,gate)
self.track_gates2[j].insert(place[i]-1,gate)
if place[i] < 0:
for gate in gates[i]:
self.track_gates1[j].insert(place[i],gate)
self.track_gates2[j].insert(place[i],gate)
else:
self.track_gates2[j].append("-")
def delq(self,qubit=[1],place=[0]):
# allows to delete gates at specified places
for j in range(self.number_of_qubits):
if j+1 in qubit:
i = qubit.index(j+1)
if place[i] == 0:
del self.track_gates1[j][-1]
del self.track_gates2[j][-1]
else:
del self.track_gates1[j][place[i]-1]
del self.track_gates2[j][place[i]-1]
def simulate(self):
# simulate the circuit! uses the second tracker
self.state = self.initial_state
for j,_ in enumerate(self.track_gates2[0]):
queue = [self.track_gates2[e][j] for e in range(self.number_of_qubits)]
app = []
for i,g in enumerate(queue):
if g not in ['-','ctrl','CNOT','----']:
app.append(i+1)
c = g
elif g == 'ctrl':
control = i
elif g == 'CNOT':
target = i
c = g
if c == 'X':
self.X = np.array([[0.0, 1.0], [1.0, 0.0]])
self.state = np.dot(gate_multiple_operator(self.X, app, self.number_of_qubits), self.state)
elif c == 'H':
self.H = 1.0 / 2**.5 * np.array([[1, 1], [1, -1]])
self.state = np.dot(gate_multiple_operator(self.H, app, self.number_of_qubits), self.state)
elif c == 'M':
for i in app:
self.P = np.dot(self.state_zero, self.state_zero.T)
prob = dot(np.conjugate(self.state).T,gate_operator(self.P,i-1,self.number_of_qubits),self.state)
if np.random.rand() < prob:
self.state = np.dot(gate_operator(self.P,i-1,self.number_of_qubits),self.state) / np.sqrt(prob)
elif prob > 0.0:
| |
# Graph ensemble for AMR
from random import shuffle
import argparse
import penman
from amrlib.evaluate.smatch_enhanced import compute_smatch
from ensemble.utils import match_pair, align, get_entries
import re
from penman.model import Model
import time
import warnings
model = Model()
warnings.filterwarnings("ignore")
def get_node_maps(best_mapping):
mab = {}
mba = {}
i = 0
for j in best_mapping:
mab['a' + str(i)] = 'b' + str(j)
mba['b' + str(j)] = 'a' + str(i)
i += 1
return mab, mba
def get_attribute(best_mapping, attributes1, attributes2):
mab, mba = get_node_maps(best_mapping)
a1 = {}
a2 = {}
for a in attributes1:
a1[a] = 1
for a in attributes2:
a2[a] = 1
# attributes in the first list that present in the second list as well
i1 = []
# attributes in the first but not in the second list
r1 = []
for a in attributes1:
r = a[1]
if r in mab:
if (a[0], mab[r], a[2]) not in attributes2:
r1.append(a)
else:
i1.append(a)
else:
r1.append(a)
# attributes in the second list that present in the first list as well
i2 = []
# attributes in the second but not in the first list
r2 = []
for a in attributes2:
r = a[1]
if r in mba:
if (a[0], mba[r], a[2]) not in attributes1:
r2.append(a)
else:
i2.append(a)
else:
r2.append(a)
return r1, r2, i1, i2
def get_instance(best_mapping, instance1, instance2):
mab, mba = get_node_maps(best_mapping)
a1 = {}
a2 = {}
for a in instance1:
a1[a] = 1
for a in instance2:
a2[a] = 1
# instances in the first list that present in the second list as well
i1 = []
# instances in the first but not in the second list
r1 = []
for a in instance1:
r = a[1]
if r in mab:
if (a[0], mab[r], a[2]) not in instance2:
r1.append(a)
else:
i1.append(a)
else:
r1.append(a)
# attributes in the second list that present in the first list as well
i2 = []
# attributes in the second but not in the first list
r2 = []
for a in instance2:
r = a[1]
if r in mba:
if (a[0], mba[r], a[2]) not in instance1:
r2.append(a)
else:
i2.append(a)
else:
r2.append(a)
return r1, r2, i1, i2
def get_relation(best_mapping, relation1, relation2):
mab, mba = get_node_maps(best_mapping)
a1 = {}
a2 = {}
for a in relation1:
a1[a] = 1
for a in relation2:
a2[a] = 1
# relations in the first list that present in the second list as well
i1 = []
# relations in the first but not in the second list
r1 = []
for a in relation1:
if a[1] in mab and a[2] in mab:
if (a[0], mab[a[1]], mab[a[2]]) not in relation2:
r1.append(a)
else:
i1.append(a)
else:
r1.append(a)
# relations in the second list that present in the first list as well
i2 = []
# relations in the second but not in the first list
r2 = []
for a in relation2:
if a[1] in mba and a[2] in mba:
if (a[0], mba[a[1]], mba[a[2]]) not in relation1:
r2.append(a)
else:
i2.append(a)
else:
r2.append(a)
return r1, r2, i1, i2
def get_map(best_mapping, instance1, attributes1, relation1, instance2, attributes2, relation2):
a1, a2, ai1, ai2 = get_attribute(best_mapping, attributes1, attributes2)
i1, i2, ii1, ii2 = get_instance(best_mapping, instance1, instance2)
r1, r2, ri1, ri2 = get_relation(best_mapping, relation1, relation2)
return a1, ai1, i1, ii1, r1, ri1, a2, ai2, i2, ii2, r2, ri2
def get_variables(g):
v = {}
for t in g.triples:
if t[1] == ':instance':
v[t[0]] = 1
return v
def get_triples(amr_str):
g = penman.decode(amr_str)
variables = get_variables(g)
instances_dict = {}
attributes_dict = {}
relations_dict = {}
for t in g.triples:
if t[1] == ':instance':
instances_dict[t] = 1
elif t[0] in variables and t[2] in variables:
relations_dict[t] = 1
else:
attributes_dict[t] = 1
return instances_dict, attributes_dict, relations_dict
def match_count(amr1, amr2):
instances = {}
attributes = {}
relations = {}
best_mapping, instance1, attributes1, relation1, instance2, attributes2, relation2, node_maps_1, node_maps_2 = match_pair(
(amr1, amr2))
a1, ai1, i1, ii1, r1, ri1, a2, ai2, i2, ii2, r2, ri2 = get_map(best_mapping, instance1, attributes1, relation1,
instance2, attributes2, relation2)
mab, mba = get_node_maps(best_mapping)
# common in both amr
for a in ai1:
attributes[(a[0], node_maps_1[a[1]], a[2])] = 1
for a in ii1:
instances[(a[0], node_maps_1[a[1]], a[2])] = 1
for a in ri1:
relations[(a[0], node_maps_1[a[1]], node_maps_1[a[2]])] = 1
# exist in the second but not in the first amr
for a in a2:
n = a[1]
if n in mba:
attributes[(a[0], node_maps_1[mba[n]], a[2])] = 1
for a in i2:
n = a[1]
if n in mba:
instances[(a[0], node_maps_1[mba[n]], a[2])] = 1
for a in r2:
n1 = a[1]
n2 = a[2]
if n1 in mba and n2 in mba:
relations[(a[0], node_maps_1[mba[n1]], node_maps_1[mba[n2]])] = 1
convert_instances = {}
for k, v in instances.items():
convert_instances[convert_instance(k)] = v
convert_attributes = {}
for k, v in attributes.items():
convert_attributes[convert_attribute(k)] = v
convert_relations = {}
for k, v in relations.items():
convert_relations[convert_relation(k)] = v
return convert_instances, convert_attributes, convert_relations
def convert_instance(i):
t = (i[1], ':' + i[0], i[2])
return t
def convert_relation(i):
t = (i[1], ':' + i[0], i[2])
t = model.deinvert(t)
return t
def convert_attribute(i):
a = i[2]
if a[-1] == '_':
a = a[:-1]
a = '"' + a + '"'
t = (i[1], ':' + i[0], a)
return t
def ensemble(amrs, threshold):
amr1 = amrs[0]
instances = {}
attributes = {}
relations = {}
try:
instances, attributes, relations = get_triples(amr1)
for amr2 in amrs[1:]:
i, a, r = match_count(amr1, amr2)
for k, v in i.items():
if k in instances:
instances[k] += 1
else:
instances[k] = 1
for k, v in a.items():
if k in attributes:
attributes[k] += 1
else:
attributes[k] = 1
for k, v in r.items():
if k in relations:
relations[k] += 1
else:
relations[k] = 1
n = len(amrs)
original_g = penman.decode(amr1)
g = penman.decode(amr1)
# update the triples
update_instances(instances, threshold, n, g)
update_attributes(attributes, threshold, n, g)
update_relations(relations, threshold, n, g)
except:
for amr in amrs:
try:
g = penman.decode(amr)
original_g = penman.decode(amr)
except:
g = None
original_g = None
if g is None:
g = penman.decode('(z / end-01)')
if original_g is None:
original_g = penman.decode('(z / end-01)')
deduplicate_triples(g)
freq = threshold * len(amrs)
ensemble_support = get_total_support(instances, attributes, relations, g, freq)
original_support = get_total_support(instances, attributes, relations, original_g, freq)
return g, ensemble_support, original_support
def deduplicate_triples(g):
triples = {}
clean_triples = []
for t in g.triples:
rt = inverse(t)
if t in triples or rt in triples:
pass
else:
triples[t] = 1
clean_triples.append(t)
g.triples = clean_triples
def get_total_support(instances, attributes, relations, g, freq):
r = 0.0
n = 0
for t in g.triples:
if t in instances:
if instances[t] >= freq:
r += instances[t] - 1
n += 1
elif t in attributes:
if attributes[t] >= freq:
r += attributes[t] - 1
n += 1
else:
c = count_relation_freq(relations, t)
if t in relations and c >= freq:
r += c - 1
n += 1
if n == 0:
n = 1
return r
# return r/n
def update_instances(instances, threshold, n, g):
# only keep the instances with votes greater than the threshold
qualified_instance = {}
for t, v in instances.items():
if v / (n + 0.0) >= threshold:
qualified_instance[t] = v
# keep instances with the major votes if there exist multiple instances with the same instance relations
r = {}
for t, v in qualified_instance.items():
p = (t[0], t[1])
if p not in r:
r[p] = (t, v)
elif r[p][1] < v:
r[p] = (t, v)
triples = []
for t in g.triples:
p = (t[0], t[1])
if p in r:
# keep the instance with the greatest votes
triples.append(r[p][0])
elif t in instances:
# keep instance definition even not sufficient votes to make sure that the graph is a connected graph
triples.append(t)
else:
# non-instance triples, keep all of them
triples.append(t)
g.triples = triples
def update_attributes(attributes, threshold, n, g):
# only keep the attributes with votes greater than the threshold
qualified_attributes = {}
for t, v in attributes.items():
if v / (n + 0.0) >= threshold:
qualified_attributes[t] = v
# keep attributes with the major votes if there exist | |
<filename>BackmanAlgorithm/SmoothPlannerClass.py
import numpy as np
from numpy import sin, cos, tan
import matplotlib.pyplot as plt
from PathSegmentClass import SpiralSegment, CCSegment, LineSegment, C2ArcSegment, C2LineSegment, FullPath
class SmoothPathPlanner:
"""
Class for implementation of Smooth curvature path generating algorithm as described in <NAME> 2015 paper "Smooth Turning PathGeneration for Agricultural Vehicles in Headlands".
Note that variable names are chosen either to conform to standard conventions or in some cases to reflect the naming conventions in the paper.
"""
def __init__(self, dT):
if dT > 0.05:
print("Warning: dT is large, path may be discontinuous")
self.dT = dT
def setConstraints(self, kConstraints, vConstraints, headlandSpeed, headlandSpeedReverse):
"""
Set constraints on K, Kdot, Kddot, V, Vdot, Vddot, speed in headland and reverse speed in headland.
Input:
kConstraints: constraints on curvature and derivatives [kMax, kMin, kDotMax, kDotMin, kDDotMax, kDDotMin]
vConstraints: constraints on velocity and derivatives [vMax, vMin, vDotMax, vDotMin, vDDotMax, vDDotMin]
headlandSpeed: Desired speed in headland
headlandSpeedReverse: Desired reverse speed in headland
"""
if headlandSpeed > vConstraints[0]:
raise ValueError("Headland Speed should not be larger than V Max")
if headlandSpeedReverse < vConstraints[1]:
raise ValueError("Headland Speed in reverse should not be smaller than V Min")
if not (len(kConstraints) == 6 and len(vConstraints) == 6):
raise TypeError('kConstraintsand/or vConstraints not of correct dimension.')
if not (kConstraints[5] < 0):
raise ValueError('kDDotMin must be less than zero.')
if not (vConstraints[5] < 0):
raise ValueError('vDDotMin must be less than zero.')
self.kMax = kConstraints[0]
self.kMin = kConstraints[1]
self.kDotMax = kConstraints[2]
self.kDotMin = kConstraints[3]
self.kDDotMax = kConstraints[4]
self.kDDotMin = kConstraints[5]
self.vMax = vConstraints[0]
self.vMin = vConstraints[1]
self.vDotMax = vConstraints[2]
self.vDotMin = vConstraints[3]
self.vDDotMax = vConstraints[4]
self.vDDotMin = vConstraints[5]
self.headlandSpeed = headlandSpeed
self.headlandSpeedReverse = headlandSpeedReverse
self.kRange = self.kMax - self.kMin
def setStartAndGoal(self, initialState, finalState):
"""
Set Start and Goal States.
Input:
initialState: start state of vehicle [x, y, theta, v, k]
finalState: end or goal state of vehicle [x, y, theta, v, k]
"""
if not (len(initialState) == 5 and len(finalState) == 5):
raise TypeError('Start and/or Goal not of correct dimension: [x, y, th, v, k]')
else:
self.initialState = initialState
self.finalState = finalState
self.k_C0 = initialState[4]
self.k_C4 = finalState[4]
def setNominalCurvatures(self, kStart, kCenter, kEnd, reverse):
"""
Set the curvature for each constant curvature segment.
Input:
kStart: Curvature of first constant curvature segment
kCenter: Curvature of middle constant curvature segment
kEnd: Curvature of final constant curvature segment
reverse: Whether the middle segment is driven with reverse speed
"""
self.k_C1 = kStart
self.k_C2 = kCenter
self.k_C3 = kEnd
self.reverse = reverse
def generateCurvatureTrajectory(self, kInit, kFinal):
"""
Generates a curvature trajectory which has starting curvature equal to kInit and final curvature equal to kFinal. Time values start form tInit and increment by self.dT
Uses Eqn. 4 from paper.
Input:
kInit: starting curvature of trajectory
kFinal: final curvature of trajectory
Output:
kTrajectory: np array of curvature values for ever timestep in trajectory
"""
kTolerance = self.dT*max(np.abs(self.kDotMax),np.abs(self.kDotMin))
k = kInit
kTrajectory = np.array([k])
while np.abs(k - kFinal) > kTolerance:
if k < kFinal:
k = k + self.dT*self.kDotMax
else:
k = k + self.dT*self.kDotMin
kTrajectory = np.append(kTrajectory, np.array([k]), axis=0)
return kTrajectory
def generateSpeedTrajectory(self, vInit, vFinal):
"""
Generates a velocity trajectory which has starting velocity equal to vInit and final velocity equal to vFinal. Time values start form tInit and increment by self.dT
Eqn. 7 from paper
Input:
vInit: starting velocity of trajectory
vFinal: final velocity of trajectory
Output:
vTrajectory: np array of velocity values for ever timestep in trajectory
"""
vTolerance = self.dT*max(np.abs(self.vDotMax), np.abs(self.vDotMin))
v = vInit
vTrajectory = np.array([v])
while np.abs(v - vFinal) > vTolerance:
if v < vFinal:
v = v + self.dT*self.vDotMax
else:
v = v + self.dT*self.vDotMin
vTrajectory = np.append(vTrajectory, np.array([v]), axis=0)
return vTrajectory
def generateOptimalTrajectory(self, x0, xFinal, xDotMax, xDotMin, xDDotMax, xDDotMin):
"""
Analytically solves the optimal trajectory problem to find the x trajectory which moves from x0 to xFinal in minimum time subject to arbitrary boundary constraints on the first and second derivative of x. Returns optimal trajectory as a 1xN numpy array of floats.
Input:
x0: Initial state
xFinal: Final state
xDotMax: Upper bound on first derivative of x
xDotMin: Lower bound on first derivative of x
xDDotMax: Upper bound on second derivative of x
xDDotMin: Lower bound on second derivative of x (This value must be negative)
Output:
trajectory: Nx1 np array of x values for every timestep.
"""
dT = self.dT
if x0 < xFinal: #increase x to xf
riseTime = (xDotMax)/(xDDotMax)
fallTime = (-1*xDotMax)/(xDDotMin)
xRT = x0 + (xDDotMax/2.0)*riseTime**2
xFT = xRT + (xDDotMax*riseTime)*fallTime + (xDDotMin/2.0)*fallTime**2
if xFT < xFinal:
diff = xFinal - xFT
tTop = diff/xDotMax
t = np.linspace(0,riseTime,int(np.ceil(riseTime/dT)))
x0toRT = x0 + (xDDotMax/2.0)*t**2
t = np.linspace(dT, tTop, int(np.ceil(tTop/dT)))
xRTtoDiff = x0toRT[-1] + xDDotMax*riseTime*t
t = np.linspace(dT,fallTime,int(np.ceil(fallTime/dT)))
xDifftoFT = xRTtoDiff[-1] + xDDotMax*riseTime*t + 0.5*xDDotMin*t**2
xTrajectory = np.append(x0toRT,np.append(xRTtoDiff,xDifftoFT,axis=0),axis=0)
else:
t1 = np.sqrt((xFinal - x0)/((xDDotMax/2.0)*(1-xDDotMax/xDDotMin)))
t2 = -1*(xDDotMax/xDDotMin)*t1
t = np.linspace(0,t1,int(np.ceil(t1/dT)))
x0tot1 = x0 + xDDotMax/2.0*t**2
t = np.linspace(dT,t2,int(np.ceil(t2/dT)))
xt1tot2 = x0tot1[-1] + (xDDotMax*t1)*t + (xDDotMin/2.0)*t**2
xTrajectory = np.append(x0tot1,xt1tot2,axis=0)
elif x0 > xFinal: #decrease x to xf
fallTime = (xDotMin)/(xDDotMin)
riseTime = (-1*xDotMin)/(xDDotMax)
xFT = x0 + (xDDotMin/2.0)*fallTime**2
xRT = xFT + (xDDotMin*fallTime)*riseTime + (xDDotMax/2.0)*riseTime**2
if xRT > xFinal:
diff = xFinal - xRT
tBottom = diff/xDotMin
t = np.linspace(0,fallTime,int(np.ceil(fallTime/dT)))
x0toFT = x0 + (xDDotMin/2.0)*t**2
t = np.linspace(dT, tBottom, int(np.ceil(tBottom/dT)))
xFTtoDiff = x0toFT[-1] + xDDotMin*fallTime*t
t = np.linspace(dT,riseTime,int(np.ceil(riseTime/dT)))
xDifftoRT = xFTtoDiff[-1] + xDDotMin*fallTime*t + 0.5*xDDotMax*t**2
xTrajectory = np.append(x0toFT,np.append(xFTtoDiff,xDifftoRT,axis=0),axis=0)
else:
t1 = np.sqrt((xFinal - x0)/((xDDotMin/2.0)*(1-xDDotMin/xDDotMax)))
t2 = -1*(xDDotMin/xDDotMax)*t1
t = np.linspace(0,t1,int(np.ceil(t1/dT)))
x0tot1 = x0 + xDDotMin/2.0*t**2
t = np.linspace(dT,t2,int(np.ceil(t2/dT)))
xt1tot2 = x0tot1[-1] + (xDDotMin*t1)*t + (xDDotMax/2.0)*t**2
xTrajectory = np.append(x0tot1,xt1tot2,axis=0)
else: #x0 = xFinal
xTrajectory = np.array([x0, xFinal])
return xTrajectory
def generateCurvatureTrajectoryDDot(self, k0, kFinal):
"""
Helper function to call generateOptimalTrajectory and return a trajectory with curvature equal to kInit and final curvature equal to kFinal.
This function differs from generateCurvatureTrajectoryDDot in that it produces a transition which is both continuous and differentiable, and respects kDDot constraints.
Input:
k0: Initial curvature
kFinal: Final curvature
Output:
kTrajectory: 1xN numpy array of curvature values for each timestep
"""
xDotMax = self.kDotMax
xDotMin = self.kDotMin
xDDotMax = self.kDDotMax
xDDotMin = self.kDDotMin
return self.generateOptimalTrajectory(k0, kFinal, xDotMax, xDotMin, xDDotMax, xDDotMin)
def generateSpeedTrajectoryDDot(self, v0, vFinal):
"""
Helper function to call generateOptimalTrajectory and return a speed trajectory which has starting velocity equal to vInit and final speed equal to vFinal.
Input:
v0: Initial speed
vFinal: Final speed
Output:
vTrajectory: 1xN numpy array of velocity values
"""
xDotMax = self.vDotMax
xDotMin = self.vDotMin
xDDotMax = self.vDDotMax
xDDotMin = self.vDDotMin
return self.generateOptimalTrajectory(v0, vFinal, xDotMax, xDotMin, xDDotMax, xDDotMin)
def makeTrajectoriesEqualLength(self, kTrajIn, vTrajIn, fromStart=False):
"""
Takes curvature and velocity trajectories and makes them the same length. Either cutting vTraj from the start or end, or by adding values to the start or end of vTraj.
Input:
kTrajIn: Input curvature trajectory
vTrajIn: input velocity trajectory
fromStart: set to true if values will be cut or added to the front of the array, False if values are cut or added to end of the array
Output:
Output Dict:
kTraj: return curvature trajectory
vTraj: return velocity trajectory
cutV: bool value, true if the velocity trajectory was shortened by the operation
leftover: array of velocity values that was cut from input array, if cutV is False then this is a 2x1 array of either the first or last value of the input velocity array
"""
cutV = False
if (len(kTrajIn) < len(vTrajIn)) and not fromStart: # cut from end of vTraj
vTraj = vTrajIn[0:len(kTrajIn)]
leftover = vTrajIn[len(kTrajIn):len(vTrajIn)]
cutV = True
elif (len(kTrajIn) < len(vTrajIn)) and fromStart: # cut from start of vTraj
vTraj = vTrajIn[len(vTrajIn) - len(kTrajIn):len(vTrajIn)]
leftover = vTrajIn[0:len(vTrajIn) - len(kTrajIn)]
cutV = True
elif (len(kTrajIn) > len(vTrajIn)) and not fromStart: # add to end of vTraj
extension = vTrajIn[-1]*np.ones(len(kTrajIn) - len(vTrajIn))
vTraj = np.append(vTrajIn, extension, | |
trains on each day of the week that start, end or make an intermediate stop at a TIPLOC
Input parameters:
-----------------
flow_dataframe: Pandas dataframe with details of the weekly train timetable information
path_criteria: String column name that contains the TIPLOC path information of each train
Result:
-------
station_train_df: Pandas dataframe containing:
- tiploc - TIPLOC code IDs
- origin_trains_day (mon-sun) - Numbers of trains orginiating from TIPLOC on a day of a week
- destination_trains_day (mon-sun) - Numbers of trains terminating at TIPLOC on a day of a week
- intermediate_trains_day (mon-sun) - Numbers of trains passing through TIPLOC on a day of a week
"""
station_train_dict = []
for v in flow_dataframe.itertuples():
path = getattr(v,path_criteria)
start = {
'tiploc': path[0],
'origin_trains_mon':v.mon,
'origin_trains_tue':v.tue,
'origin_trains_wed':v.wed,
'origin_trains_thu':v.thu,
'origin_trains_fri':v.fri,
'origin_trains_sat':v.sat,
'origin_trains_sun':v.sun
}
station_train_dict.append(start)
end = {
'tiploc':path[-1],
'destination_trains_mon':v.mon,
'destination_trains_tue':v.tue,
'destination_trains_wed':v.wed,
'destination_trains_thu':v.thu,
'destination_trains_fri':v.fri,
'destination_trains_sat':v.sat,
'destination_trains_sun':v.sun
}
station_train_dict.append(end)
for p in path[1:-1]:
intermediate = {
'tiploc':p,
'intermediate_trains_mon':v.mon,
'intermediate_trains_tue':v.tue,
'intermediate_trains_wed':v.wed,
'intermediate_trains_thu':v.thu,
'intermediate_trains_fri':v.fri,
'intermediate_trains_sat':v.sat,
'intermediate_trains_sun':v.sun
}
station_train_dict.append(intermediate)
station_train_df = pd.DataFrame(station_train_dict).fillna(0)
train_cols = [c for c in station_train_df.columns.values.tolist() if c != 'tiploc']
return station_train_df.groupby(['tiploc'])[train_cols].sum().reset_index()
def create_networkx_from_dataframe(graph_dataframe, directed=False, simple=False):
"""Create a networkx graph object from a pandas dataframe
Input parameters:
-----------------
graph_dataframe: Pandas dataframe with graph topology
directed: Boolean True or False for creating a directed graph object
simple: Boolean True or False for creating a Graph, or DiGraph or MultiDiGraph object
Result:
-------
graph: Networkx graph object
"""
if directed and simple:
create_using = nx.DiGraph()
elif directed and not simple:
create_using = nx.MultiDiGraph()
elif not directed and not simple:
create_using = nx.MultiGraph()
else:
create_using = nx.Graph()
graph = nx.from_pandas_edgelist(
graph_dataframe,
'from_node',
'to_node',
edge_attr=list(graph_dataframe.columns)[2:],
create_using=create_using
)
es, vs, simple = graph.edges, graph.nodes, not graph.is_multigraph()
d = "directed" if directed else "undirected"
s = "simple" if simple else "multi"
print(
"Created {}, {} {}: {} edges, {} nodes.".format(
s, d, "nxgraph", len(es), len(vs)))
return graph
def path_attributes_networkx(graph, path, attribute):
"""Extract the attributes from a networkx graph object, given a node path
Input parameters:
-----------------
graph: Networkx graph object
path: List of nodes that form a path on the graph
attribute: Strring name of the attribute that should be in the networkx attrribute dictionary
Result:
-------
attr: List of the attribute values corresponding to the node path, extracted from the networkx object
"""
attr = []
ods = list(zip(path[:-1], path[1:]))
for od in ods:
attr += [
d[attribute]
for (u, v, d) in graph.edges(data = True)
if (u, v) == od or (v, u) == od
]
return attr
def path_to_route(path_dataframe,edge_dataframe,tiploc_mapping_nodes):
"""Map the TIPLOC route onto the network route by mapping TIPLOC to network nodes and
Finding the equivalent route on a network for given node-node paths that might not be adjacent to each other
By estimating the shortest path between consecutive nodes on the network
Input parameters:
-----------------
path_dataframe: Pandas dataframe with details of the node-node path information
edge_dataframe: Pandas dataframe with details of the network topology
tiploc_mapping_nodes: Pandas dataframe with the details of the matches between TIPLOC IDs and node IDs
Result:
-------
path_dataframe: Pandas dataframe with details of the network routes
with node IDs, edge IDs and distances along routes
"""
node_paths = []
edge_paths = []
distance_paths = []
net = create_networkx_from_dataframe(edge_dataframe,directed=False)
for path_data in path_dataframe.itertuples():
node_path = []
path = path_data.path_stops
for p in range(len(path)-1):
source = tiploc_mapping_nodes.loc[tiploc_mapping_nodes['tiploc']==path[p],'node_id'].values[0]
target = tiploc_mapping_nodes.loc[tiploc_mapping_nodes['tiploc']==path[p+1],'node_id'].values[0]
if source != target:
pth = nx.shortest_path(net,source,target, weight = 'length')
else:
pth = [source]
if len(node_path) == 0:
for item in range(len(pth)):
node_path.append(pth[item])
else:
for item in range(len(pth)-1):
if pth[item+1] != node_path[-1]:
node_path.append(pth[item+1])
edge_path = path_attributes_networkx(net,node_path,'edge_id')
distance_path = [round(dist,3) for dist in path_attributes_networkx(net,node_path,'length')]
node_paths.append(node_path)
edge_paths.append(edge_path)
distance_paths.append(distance_path)
print ('Done with path {} out of {}'.format(path_data.Index,len(path_dataframe.index)))
path_dataframe['node_path']=node_paths
path_dataframe['edge_path']=edge_paths
path_dataframe['distances']=distance_paths
return path_dataframe
def main():
###################################################################
# Specify all the input files and column names needed for the model
###################################################################
timetable_data = os.path.join(TIMETABLE_PATH,'ttisf459.mca') # The .mca is text file as released by ATOC
time_id_column = 'tiploc' # Name of the column assgined to station codes in the timetable data
rail_nodes = os.path.join(NETWORK_PATH,'rail_nodes.shp') # The rail node shapefile
node_id_column = 'node_id' # Name of ID column in nodes shapefile
rail_edges = os.path.join(NETWORK_PATH,'rail_edges.shp') # The rail edge shapefile
# The rail edges file should have the following columns: ['from_node','to_node','edge_id']
usage_data = os.path.join(USAGE_PATH,'estimates-of-station-usage-2017-18.xlsx') # ORR releases data in xlsx format
usage_sheet = 'Estimates of Station Usage' # Name of the excel sheet in the ORR data, which contains station annual usage data
usage_id_column = 'TLC' # Name of ID column which should be in usage column. LAter renamed to tlc
usage_entry = usage_exits = '1718 Entries & Exits' # Name of column(s) in ORR excel sheet contains statistics of annual entries and exits at stations
usage_interchanges = '1718 Interchanges' # Name of column in ORR excel sheet contains statistics of annual interchanges at stations
network_id_matches = os.path.join(ID_MATCH_PATH,'timetable_tiploc_crs_node_matching_final.csv') # To match network node IDs with timetable and station usage IDs
###################################################
# Specify some input data parameters and conditions
###################################################
cargo_type = ['OrdP','ExpP'] # Indicates that we only extract passenger train timetables from ATOC data
start_date = 190519 # We can set a start date to extract a weekly schedule of trains from the ATOC data
end_date = 190525 # We can set an end date to extract a weekly schedule of trains from the ATOC data
days = ['mon','tue','wed','thu','fri','sat','sun'] # Days in the week
selected_day = 'wed' # Select a typical day of the working week for which we will estimate the OD matrix
###############################################
# Create output folder and specify output files
###############################################
outputs = os.path.join(BASE_PATH, 'outputs')
if os.path.exists(outputs) == False:
os.mkdir(outputs)
full_timetable = os.path.join(outputs,'train_ttis_info_2019.csv')
store_full_timetable = False # Set to true if you want to store the output csv file
combined_timetable = os.path.join(outputs,'train_ttis_paths_combined_2019.csv')
store_combined_timetable = False # Set to true if you want to store the output csv file
station_daily_usage = os.path.join(outputs,'station_daily_entry_exits.csv')
store_station_daily_usage = False # Set to true if you want to store the output csv file
rail_routes = os.path.join(outputs,'rail_paths.csv')
store_rail_routes = False # Set to true if you want to store the output csv file
od_matrix_output = os.path.join(outputs,'od_matrix.csv')
store_od_matrix = True
######################################################################
# Step 1:
# Assign the annual station usage statistics to the rail network nodes
# Convert the station usage to weekly estimates
######################################################################
id_file = pd.read_csv(network_id_matches) # Contains mapping between network IDs, timetable IDs and suage IDs
print ('* Add station usage numbers of the network station nodes')
nodes = gpd.read_file(rail_nodes)
nodes = pd.merge(nodes[[node_id_column]],id_file,how='left',on=[node_id_column])
station_usage = pd.read_excel(usage_data,sheet_name=usage_sheet,thousands=",",na_values=[":"," :"]).fillna(0)
station_usage = station_usage[station_usage[usage_id_column] != 0]
station_usage.rename(columns = {usage_id_column:'tlc'},inplace=True)
usage_id_column = 'tlc'
station_usage['entries'] = 0.5*station_usage[usage_entry]
station_usage['exits'] = 0.5*station_usage[usage_exits]
station_usage['interchanges'] = station_usage[usage_interchanges]
nodes = pd.merge(nodes,
station_usage[[usage_id_column,'entries','exits','interchanges']],
on=[usage_id_column],how='left')
nodes['weekly_entries'] = 1.0*(nodes['entries'] + nodes['interchanges'])/52.0
nodes['weekly_entries'] = nodes['weekly_entries'].fillna(value=0)
nodes['weekly_exits'] = 1.0*(nodes['exits'] + nodes['interchanges'])/52.0
nodes['weekly_exits'] = nodes['weekly_exits'].fillna(value=0)
del station_usage
print ('* Process timetable data and extract the station stops')
#########################################
# Step 2:
# Extract the train timetable information
#########################################
timetable_df = process_timetable_data(timetable_data,
csv_file_write=store_full_timetable,
timetable_output=full_timetable)
##############################################################################################
# Step 3:
# Extract the train timetable for the specific date range and passenger types
# Truncate the timetable routes to only station stops
# Group all the unique train paths over a day and add up the numbers of trains along each path
##############################################################################################
station_stops = list(set(nodes[nodes['weekly_entries']>0][time_id_column].values.tolist()))
# Decided not to set a start and end date because not all routes were being represented
timetable_df = extract_timetable_slice(timetable_df,cargo_type,station_stops,
start_date=None,end_date=None,
csv_file_write=store_combined_timetable,
timetable_output=combined_timetable)
##############################################################################################
# Step 4:
# Find the number of trains that start, end and make an intermeidate stop at each TIPLOC
# Match this to the node IDs and the station usage numbers
# Find the total entries and exits along stations on a particular day
##############################################################################################
print ('* Find station starts and intermediate and final stops')
station_stops |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.