gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# -*- coding: utf-8 -*-
"""
gaema.auth
~~~~~~~~~~
Implementations of various third-party authentication schemes.
All the classes in this file are class Mixins designed to be used with
web.py RequestHandler classes. The primary methods for each service are
authenticate_redirect(), authorize_redirect(), and get_authenticated_user().
The former should be called to redirect the user to, e.g., the OpenID
authentication page on the third party service, and the latter should
be called upon return to get the user data from the data returned by
the third party service.
They all take slightly different arguments due to the fact all these
services implement authentication and authorization slightly differently.
See the individual service classes below for complete documentation.
Example usage for Google OpenID:
class GoogleHandler(tornado.web.RequestHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
# Save the user with, e.g., set_secure_cookie()
:copyright: 2009 by Facebook.
:license: Apache License Version 2.0. See gaema-LICENSE for more details.
"""
import base64
import binascii
import cgi
import hashlib
import hmac
import httpclient
import escape
import logging
import time
import urllib
import urlparse
import uuid
from werkzeug.exceptions import InternalServerError
def make_full_url(base, args):
def normalize(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
encoded = dict([(normalize(k), normalize(v)) for k, v in args.items()])
if "?" in base:
delimiter = "&"
else:
delimiter = "?"
return base + delimiter + urllib.urlencode(encoded)
class OpenIdMixin(object):
"""Abstract implementation of OpenID and Attribute Exchange.
See GoogleMixin below for example implementations.
"""
def authenticate_redirect(self, callback_uri=None,
ax_attrs=["name","email","language","username"]):
"""Returns the authentication URL for this service.
After authentication, the service will redirect back to the given
callback URI.
We request the given attributes for the authenticated user by
default (name, email, language, and username). If you don't need
all those attributes for your app, you can request fewer with
the ax_attrs keyword argument.
"""
callback_uri = callback_uri or self.request.path
args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
self.redirect(make_full_url(self._OPENID_ENDPOINT, args))
def get_authenticated_user(self, callback):
"""Fetches the authenticated user data upon redirect.
This method should be called by the handler that receives the
redirect from the authenticate_redirect() or authorize_redirect()
methods.
"""
# Verify the OpenID response via direct request to the OP
args = dict((k, v[-1]) for k, v in self.request.arguments.iteritems())
args["openid.mode"] = u"check_authentication"
url = make_full_url(self._OPENID_ENDPOINT, args)
http = httpclient.AsyncHTTPClient()
http.fetch(url, self.async_callback(
self._on_authentication_verified, callback))
def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):
url = urlparse.urljoin(self.request.full_url(), callback_uri)
if hasattr(self, "openid_realm"):
openid_realm = self.openid_realm
else:
openid_realm = self.request.url_root
args = {
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.claimed_id":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.identity":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.return_to": url,
"openid.realm": openid_realm,
"openid.mode": "checkid_setup",
}
if ax_attrs:
args.update({
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
})
ax_attrs = set(ax_attrs)
required = []
if "name" in ax_attrs:
ax_attrs -= set(["name", "firstname", "fullname", "lastname"])
required += ["firstname", "fullname", "lastname"]
args.update({
"openid.ax.type.firstname":
"http://axschema.org/namePerson/first",
"openid.ax.type.fullname":
"http://axschema.org/namePerson",
"openid.ax.type.lastname":
"http://axschema.org/namePerson/last",
})
known_attrs = {
"email": "http://axschema.org/contact/email",
"language": "http://axschema.org/pref/language",
"username": "http://axschema.org/namePerson/friendly",
}
for name in ax_attrs:
args["openid.ax.type." + name] = known_attrs[name]
required.append(name)
args["openid.ax.required"] = ",".join(required)
if oauth_scope:
args.update({
"openid.ns.oauth":
"http://specs.openid.net/extensions/oauth/1.0",
"openid.oauth.consumer": self.request.host.split(":")[0],
"openid.oauth.scope": oauth_scope,
})
return args
def _on_authentication_verified(self, callback, response):
if response.error or u"is_valid:true" not in response.body:
logging.warning("Invalid OpenID response: %s", response.error or
response.body)
callback(None)
return
# Make sure we got back at least an email from attribute exchange
ax_ns = None
for name, values in self.request.arguments.iteritems():
if name.startswith("openid.ns.") and \
values[-1] == u"http://openid.net/srv/ax/1.0":
ax_ns = name[10:]
break
openid_signed_params = self.get_argument("openid.signed","").split(',')
def get_ax_arg(uri):
if not ax_ns: return u""
prefix = "openid." + ax_ns + ".type."
ax_name = None
for name, values in self.request.arguments.iteritems():
if not name[len("openid."):] in openid_signed_params:
continue
if values[-1] == uri and name.startswith(prefix):
part = name[len(prefix):]
ax_name = "openid." + ax_ns + ".value." + part
break
if not ax_name: return u""
if not ax_name[len("openid."):] in openid_signed_params: return u""
return self.get_argument(ax_name, u"")
claimed_id = self.get_argument("openid.claimed_id", None)
email = get_ax_arg("http://axschema.org/contact/email")
name = get_ax_arg("http://axschema.org/namePerson")
first_name = get_ax_arg("http://axschema.org/namePerson/first")
last_name = get_ax_arg("http://axschema.org/namePerson/last")
username = get_ax_arg("http://axschema.org/namePerson/friendly")
locale = get_ax_arg("http://axschema.org/pref/language").lower()
user = dict()
name_parts = []
if first_name:
user["first_name"] = first_name
name_parts.append(first_name)
if last_name:
user["last_name"] = last_name
name_parts.append(last_name)
if name:
user["name"] = name
elif name_parts:
user["name"] = u" ".join(name_parts)
elif email:
user["name"] = email.split("@")[0]
if email: user["email"] = email
if locale: user["locale"] = locale
if username: user["username"] = username
if claimed_id: user["claimed_id"] = claimed_id
callback(user)
class OAuthMixin(object):
"""Abstract implementation of OAuth.
See TwitterMixin and FriendFeedMixin below for example implementations.
"""
def authorize_redirect(self, callback_uri=None):
"""Redirects the user to obtain OAuth authorization for this service.
Twitter and FriendFeed both require that you register a Callback
URL with your application. You should call this method to log the
user in, and then call get_authenticated_user() in the handler
you registered as your Callback URL to complete the authorization
process.
This method sets a cookie called _oauth_request_token which is
subsequently used (and cleared) in get_authenticated_user for
security purposes.
"""
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
raise Exception("This service does not support oauth_callback")
http = httpclient.AsyncHTTPClient()
http.fetch(self._oauth_request_token_url(), self.async_callback(
self._on_request_token, self._OAUTH_AUTHORIZE_URL, callback_uri))
def get_authenticated_user(self, callback):
"""Gets the OAuth authorized user and access token on callback.
This method should be called from the handler for your registered
OAuth Callback URL to complete the registration process. We call
callback with the authenticated user, which in addition to standard
attributes like 'name' includes the 'access_key' attribute, which
contains the OAuth access you can use to make authorized requests
to this service on behalf of the user.
"""
request_key = self.get_argument("oauth_token")
oauth_verifier = self.get_argument("oauth_verifier")
request_cookie = self.get_cookie("_oauth_request_token")
if not request_cookie:
logging.warning("Missing OAuth request token cookie")
callback(None)
return
self.clear_cookie("_oauth_request_token")
cookie_key, cookie_secret = request_cookie.split("|")
if cookie_key != request_key:
logging.warning("Request token does not match cookie")
callback(None)
return
token = dict(key=cookie_key, secret=cookie_secret, verifier=oauth_verifier)
http = httpclient.AsyncHTTPClient()
http.fetch(self._oauth_access_token_url(token), self.async_callback(
self._on_access_token, callback))
def _oauth_request_token_url(self):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_REQUEST_TOKEN_URL
args = dict(
oauth_consumer_key=consumer_token["key"],
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes),
oauth_version="1.0a",
oauth_callback=self.request.full_url(),
)
signature = _oauth_signature(consumer_token, "GET", url, args)
args["oauth_signature"] = signature
return make_full_url(url, args)
def _on_request_token(self, authorize_url, callback_uri, response):
if response.error:
raise InternalServerError("Could not get request token")
request_token = _oauth_parse_response(response.body)
data = "|".join([request_token["key"], request_token["secret"]])
self.set_cookie("_oauth_request_token", data)
args = dict(oauth_token=request_token["key"])
if callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
self.redirect(make_full_url(authorize_url, args))
def _oauth_access_token_url(self, request_token):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
oauth_consumer_key=consumer_token["key"],
oauth_token=request_token["key"],
oauth_verifier=request_token["verifier"],
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes),
oauth_version="1.0a",
)
signature = _oauth_signature(consumer_token, "GET", url, args,
request_token)
args["oauth_signature"] = signature
return make_full_url(url, args)
def _on_access_token(self, callback, response):
if response.error:
logging.warning("Could not fetch access token")
callback(None)
return
access_token = _oauth_parse_response(response.body)
user = self._oauth_get_user(access_token, self.async_callback(
self._on_oauth_get_user, access_token, callback))
def _oauth_get_user(self, access_token, callback):
raise NotImplementedError()
def _on_oauth_get_user(self, access_token, callback, user):
if not user:
callback(None)
return
user["access_token"] = access_token
callback(user)
def _oauth_request_parameters(self, url, access_token, parameters={},
method="GET"):
"""Returns the OAuth parameters as a dict for the given request.
parameters should include all POST arguments and query string arguments
that will be sent with the request.
"""
consumer_token = self._oauth_consumer_token()
base_args = dict(
oauth_consumer_key=consumer_token["key"],
oauth_token=access_token["key"],
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes),
oauth_version="1.0",
)
args = {}
args.update(base_args)
args.update(parameters)
signature = _oauth_signature(consumer_token, method, url, args,
access_token)
base_args["oauth_signature"] = signature
return base_args
class TwitterMixin(OAuthMixin):
"""Twitter OAuth authentication.
To authenticate with Twitter, register your application with
Twitter at http://twitter.com/apps. Then copy your Consumer Key and
Consumer Secret to the application settings 'twitter_consumer_key' and
'twitter_consumer_secret'. Use this Mixin on the handler for the URL
you registered as your application's Callback URL.
When your application is set up, you can use this Mixin like this
to authenticate the user with Twitter and get access to their stream:
class TwitterHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("oauth_token", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authorize_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Twitter auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by get_authenticated_user() includes the
attributes 'username', 'name', and all of the custom Twitter user
attributes describe at
http://apiwiki.twitter.com/Twitter-REST-API-Method%3A-users%C2%A0show
in addition to 'access_token'. You should save the access token with
the user; it is required to make requests on behalf of the user later
with twitter_request().
"""
_OAUTH_REQUEST_TOKEN_URL = "http://twitter.com/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "http://twitter.com/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "http://twitter.com/oauth/authorize"
_OAUTH_AUTHENTICATE_URL = "http://twitter.com/oauth/authenticate"
_OAUTH_NO_CALLBACKS = True
def authenticate_redirect(self):
"""Just like authorize_redirect(), but auto-redirects if authorized.
This is generally the right interface to use if you are using
Twitter for single-sign on.
"""
http = httpclient.AsyncHTTPClient()
http.fetch(self._oauth_request_token_url(), self.async_callback(
self._on_request_token, self._OAUTH_AUTHENTICATE_URL, None))
def twitter_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given API path, e.g., "/statuses/user_timeline/btaylor"
The path should not include the format (we automatically append
".json" and parse the JSON output).
If the request is a POST, post_args should be provided. Query
string arguments should be given as keyword arguments.
All the Twitter methods are documented at
http://apiwiki.twitter.com/Twitter-API-Documentation.
Many methods require an OAuth access token which you can obtain
through authorize_redirect() and get_authenticated_user(). The
user returned through that process includes an 'access_token'
attribute that can be used to make authenticated requests via
this method. Example usage:
class MainHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.twitter_request(
"/statuses/update",
post_args={"status": "Testing Tornado Web Server"},
access_token=user["access_token"],
callback=self.async_callback(self._on_post))
def _on_post(self, new_entry):
if not new_entry:
# Call failed; perhaps missing permission?
self.authorize_redirect()
return
self.finish("Posted a message!")
"""
# Add the OAuth resource request signature if we have credentials
url = "http://twitter.com" + path + ".json"
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
consumer_token = self._oauth_consumer_token()
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args: url = make_full_url(url, args)
callback = self.async_callback(self._on_twitter_request, callback)
http = httpclient.AsyncHTTPClient()
if post_args is not None:
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_twitter_request(self, callback, response):
if response.error:
logging.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
callback(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("twitter_consumer_key", "Twitter OAuth")
self.require_setting("twitter_consumer_secret", "Twitter OAuth")
return dict(
key=self.settings["twitter_consumer_key"],
secret=self.settings["twitter_consumer_secret"])
def _oauth_get_user(self, access_token, callback):
callback = self.async_callback(self._parse_user_response, callback)
self.twitter_request(
"/users/show/" + access_token["screen_name"],
access_token=access_token, callback=callback)
def _parse_user_response(self, callback, user):
if user:
user["username"] = user["screen_name"]
callback(user)
class FriendFeedMixin(OAuthMixin):
"""FriendFeed OAuth authentication.
To authenticate with FriendFeed, register your application with
FriendFeed at http://friendfeed.com/api/applications. Then
copy your Consumer Key and Consumer Secret to the application settings
'friendfeed_consumer_key' and 'friendfeed_consumer_secret'. Use
this Mixin on the handler for the URL you registered as your
application's Callback URL.
When your application is set up, you can use this Mixin like this
to authenticate the user with FriendFeed and get access to their feed:
class FriendFeedHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("oauth_token", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authorize_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "FriendFeed auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by get_authenticated_user() includes the
attributes 'username', 'name', and 'description' in addition to
'access_token'. You should save the access token with the user;
it is required to make requests on behalf of the user later with
friendfeed_request().
"""
_OAUTH_REQUEST_TOKEN_URL = "https://friendfeed.com/account/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://friendfeed.com/account/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://friendfeed.com/account/oauth/authorize"
_OAUTH_NO_CALLBACKS = True
def friendfeed_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/bret/friends"
If the request is a POST, post_args should be provided. Query
string arguments should be given as keyword arguments.
All the FriendFeed methods are documented at
http://friendfeed.com/api/documentation.
Many methods require an OAuth access token which you can obtain
through authorize_redirect() and get_authenticated_user(). The
user returned through that process includes an 'access_token'
attribute that can be used to make authenticated requests via
this method. Example usage:
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.friendfeed_request(
"/entry",
post_args={"body": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"],
callback=self.async_callback(self._on_post))
def _on_post(self, new_entry):
if not new_entry:
# Call failed; perhaps missing permission?
self.authorize_redirect()
return
self.finish("Posted a message!")
"""
# Add the OAuth resource request signature if we have credentials
url = "http://friendfeed-api.com/v2" + path
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
consumer_token = self._oauth_consumer_token()
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args: url = make_full_url(url, args)
callback = self.async_callback(self._on_friendfeed_request, callback)
http = httpclient.AsyncHTTPClient()
if post_args is not None:
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_friendfeed_request(self, callback, response):
if response.error:
logging.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
callback(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("friendfeed_consumer_key", "FriendFeed OAuth")
self.require_setting("friendfeed_consumer_secret", "FriendFeed OAuth")
return dict(
key=self.settings["friendfeed_consumer_key"],
secret=self.settings["friendfeed_consumer_secret"])
def _oauth_get_user(self, access_token, callback):
callback = self.async_callback(self._parse_user_response, callback)
self.friendfeed_request(
"/feedinfo/" + access_token["username"],
include="id,name,description", access_token=access_token,
callback=callback)
def _parse_user_response(self, callback, user):
if user:
user["username"] = user["id"]
callback(user)
class GoogleMixin(OpenIdMixin, OAuthMixin):
"""Google Open ID / OAuth authentication.
No application registration is necessary to use Google for authentication
or to access Google resources on behalf of a user. To authenticate with
Google, redirect with authenticate_redirect(). On return, parse the
response with get_authenticated_user(). We send a dict containing the
values for the user, including 'email', 'name', and 'locale'.
Example usage:
class GoogleHandler(tornado.web.RequestHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
# Save the user with, e.g., set_secure_cookie()
"""
_OPENID_ENDPOINT = "https://www.google.com/accounts/o8/ud"
_OAUTH_ACCESS_TOKEN_URL = "https://www.google.com/accounts/OAuthGetAccessToken"
def authorize_redirect(self, oauth_scope, callback_uri=None,
ax_attrs=["name","email","language","username"]):
"""Authenticates and authorizes for the given Google resource.
Some of the available resources are:
Gmail Contacts - http://www.google.com/m8/feeds/
Calendar - http://www.google.com/calendar/feeds/
Finance - http://finance.google.com/finance/feeds/
You can authorize multiple resources by separating the resource
URLs with a space.
"""
callback_uri = callback_uri or self.request.path
args = self._openid_args(callback_uri, ax_attrs=ax_attrs,
oauth_scope=oauth_scope)
self.redirect(make_full_url(self._OPENID_ENDPOINT, args))
def get_authenticated_user(self, callback):
"""Fetches the authenticated user data upon redirect."""
# Look to see if we are doing combined OpenID/OAuth
oauth_ns = ""
for name, values in self.request.arguments.iteritems():
if name.startswith("openid.ns.") and \
values[-1] == u"http://specs.openid.net/extensions/oauth/1.0":
oauth_ns = name[10:]
break
token = self.get_argument("openid." + oauth_ns + ".request_token", "")
if token:
http = httpclient.AsyncHTTPClient()
token = dict(key=token, secret="", verifier="")
http.fetch(self._oauth_access_token_url(token),
self.async_callback(self._on_access_token, callback))
else:
OpenIdMixin.get_authenticated_user(self, callback)
def _oauth_consumer_token(self):
self.require_setting("google_consumer_key", "Google OAuth")
self.require_setting("google_consumer_secret", "Google OAuth")
return dict(
key=self.settings["google_consumer_key"],
secret=self.settings["google_consumer_secret"])
def _oauth_get_user(self, access_token, callback):
OpenIdMixin.get_authenticated_user(self, callback)
class FacebookMixin(object):
"""Facebook Connect authentication.
To authenticate with Facebook, register your application with
Facebook at http://www.facebook.com/developers/apps.php. Then
copy your API Key and Application Secret to the application settings
'facebook_api_key' and 'facebook_secret'.
When your application is set up, you can use this Mixin like this
to authenticate the user with Facebook:
class FacebookHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("auth_token", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Facebook auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by get_authenticated_user() includes the
attributes 'facebook_uid' and 'name' in addition to session attributes
like 'session_key'. You should save the session key with the user; it is
required to make requests on behalf of the user later with
facebook_request().
"""
def authenticate_redirect(self, callback_uri=None, cancel_uri=None,
extended_permissions=None):
"""Authenticates/installs this app for the current user."""
self.require_setting("facebook_api_key", "Facebook Connect")
callback_uri = callback_uri or self.request.path
args = {
"api_key": self.settings["facebook_api_key"],
"v": "1.0",
"fbconnect": "true",
"display": "page",
"next": urlparse.urljoin(self.request.full_url(), callback_uri),
"return_session": "true",
}
if cancel_uri:
args["cancel_url"] = urlparse.urljoin(
self.request.full_url(), cancel_uri)
if extended_permissions:
if isinstance(extended_permissions, basestring):
extended_permissions = [extended_permissions]
args["req_perms"] = ",".join(extended_permissions)
self.redirect("http://www.facebook.com/login.php?" +
urllib.urlencode(args))
def authorize_redirect(self, extended_permissions, callback_uri=None,
cancel_uri=None):
"""Redirects to an authorization request for the given FB resource.
The available resource names are listed at
http://wiki.developers.facebook.com/index.php/Extended_permission.
The most common resource types include:
publish_stream
read_stream
email
sms
extended_permissions can be a single permission name or a list of
names. To get the session secret and session key, call
get_authenticated_user() just as you would with
authenticate_redirect().
"""
self.authenticate_redirect(callback_uri, cancel_uri,
extended_permissions)
def get_authenticated_user(self, callback):
"""Fetches the authenticated Facebook user.
The authenticated user includes the special Facebook attributes
'session_key' and 'facebook_uid' in addition to the standard
user attributes like 'name'.
"""
self.require_setting("facebook_api_key", "Facebook Connect")
session = escape.json_decode(self.get_argument("session"))
self.facebook_request(
method="facebook.users.getInfo",
callback=self.async_callback(
self._on_get_user_info, callback, session),
session_key=session["session_key"],
uids=session["uid"],
fields="uid,first_name,last_name,name,locale,pic_square")
def facebook_request(self, method, callback, **args):
"""Makes a Facebook API REST request.
We automatically include the Facebook API key and signature, but
it is the callers responsibility to include 'session_key' and any
other required arguments to the method.
The available Facebook methods are documented here:
http://wiki.developers.facebook.com/index.php/API
Here is an example for the stream.get() method:
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.facebook_request(
method="stream.get",
callback=self.async_callback(self._on_stream),
session_key=self.current_user["session_key"])
def _on_stream(self, stream):
if stream is None:
# Not authorized to read the stream yet?
self.redirect(self.authorize_redirect("read_stream"))
return
self.render("stream.html", stream=stream)
"""
self.require_setting("facebook_api_key", "Facebook Connect")
self.require_setting("facebook_secret", "Facebook Connect")
if not method.startswith("facebook."):
method = "facebook." + method
args["api_key"] = self.settings["facebook_api_key"]
args["v"] = "1.0"
args["method"] = method
args["call_id"] = str(long(time.time() * 1e6))
args["format"] = "json"
args["sig"] = self._signature(args)
url = "http://api.facebook.com/restserver.php?" + \
urllib.urlencode(args)
http = httpclient.AsyncHTTPClient()
http.fetch(url, callback=self.async_callback(
self._parse_response, callback))
def _on_get_user_info(self, callback, session, users):
if users is None:
callback(None)
return
callback({
"name": users[0]["name"],
"first_name": users[0]["first_name"],
"last_name": users[0]["last_name"],
"uid": users[0]["uid"],
"locale": users[0]["locale"],
"session_key": session["session_key"],
"session_expires": session["expires"],
})
def _parse_response(self, callback, response):
if response.error:
logging.warning("HTTP error from Facebook: %s", response.error)
callback(None)
return
try:
json = escape.json_decode(response.body)
except:
logging.warning("Invalid JSON from Facebook: %r", response.body)
callback(None)
return
if isinstance(json, dict) and json.get("error_code"):
logging.warning("Facebook error: %d: %r", json["error_code"],
json.get("error_msg"))
callback(None)
return
callback(json)
def _signature(self, args):
parts = ["%s=%s" % (n, args[n]) for n in sorted(args.keys())]
body = "".join(parts) + self.settings["facebook_secret"]
if isinstance(body, unicode): body = body.encode("utf-8")
return hashlib.md5(body).hexdigest()
def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth signature for the given request.
See http://oauth.net/core/1.0/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [_oauth_escape(consumer_token["secret"])]
key_elems.append(_oauth_escape(token["secret"] if token else ""))
key = "&".join(key_elems)
hash = hmac.new(key, base_string, hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth_escape(val):
if isinstance(val, unicode):
val = val.encode("utf-8")
return urllib.quote(val, safe="~")
def _oauth_parse_response(body):
p = cgi.parse_qs(body, keep_blank_values=False)
token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0])
# Add the extra parameters the Provider included to the token
special = ("oauth_token", "oauth_token_secret")
token.update((k, p[k][0]) for k in p if k not in special)
return token
class YahooMixin(OpenIdMixin):
_OPENID_ENDPOINT = 'https://open.login.yahooapis.com/openid/op/auth'
|
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import copy
import sys
import time
from webkitpy.layout_tests.port import DeviceFailure, Driver, DriverOutput, Port
from webkitpy.layout_tests.port.base import VirtualTestSuite
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.models import test_run_results
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.crashlogs import CrashLogs
# This sets basic expectations for a test. Each individual expectation
# can be overridden by a keyword argument in TestList.add().
class TestInstance(object):
def __init__(self, name):
self.name = name
self.base = name[(name.rfind("/") + 1):name.rfind(".")]
self.crash = False
self.web_process_crash = False
self.exception = False
self.keyboard = False
self.error = ''
self.timeout = False
self.is_reftest = False
self.device_failure = False
self.leak = False
# The values of each field are treated as raw byte strings. They
# will be converted to unicode strings where appropriate using
# FileSystem.read_text_file().
self.actual_text = self.base + '-txt'
self.actual_checksum = self.base + '-checksum'
# We add the '\x8a' for the image file to prevent the value from
# being treated as UTF-8 (the character is invalid)
self.actual_image = self.base + '\x8a' + '-png' + 'tEXtchecksum\x00' + self.actual_checksum
self.expected_text = self.actual_text
self.expected_image = self.actual_image
self.actual_audio = None
self.expected_audio = None
# This is an in-memory list of tests, what we want them to produce, and
# what we want to claim are the expected results.
class TestList(object):
def __init__(self):
self.tests = {}
def add(self, name, **kwargs):
test = TestInstance(name)
for key, value in kwargs.items():
test.__dict__[key] = value
self.tests[name] = test
def add_reftest(self, name, reference_name, same_image, crash=False):
self.add(name, actual_checksum='xxx', actual_image='XXX', is_reftest=True, crash=crash)
if same_image:
self.add(reference_name, actual_checksum='xxx', actual_image='XXX', is_reftest=True)
else:
self.add(reference_name, actual_checksum='yyy', actual_image='YYY', is_reftest=True)
def keys(self):
return self.tests.keys()
def __contains__(self, item):
return item in self.tests
def __getitem__(self, item):
return self.tests[item]
#
# These numbers may need to be updated whenever we add or delete tests. This includes virtual tests.
#
TOTAL_TESTS = 113
TOTAL_SKIPS = 29
UNEXPECTED_PASSES = 1
UNEXPECTED_FAILURES = 26
def unit_test_list():
tests = TestList()
tests.add('failures/expected/crash.html', crash=True)
tests.add('failures/expected/exception.html', exception=True)
tests.add('failures/expected/device_failure.html', device_failure=True)
tests.add('failures/expected/timeout.html', timeout=True)
tests.add('failures/expected/leak.html', leak=True)
tests.add('failures/expected/missing_text.html', expected_text=None)
tests.add('failures/expected/needsrebaseline.html', actual_text='needsrebaseline text')
tests.add('failures/expected/needsmanualrebaseline.html', actual_text='needsmanualrebaseline text')
tests.add('failures/expected/image.html',
actual_image='image_fail-pngtEXtchecksum\x00checksum_fail',
expected_image='image-pngtEXtchecksum\x00checksum-png')
tests.add('failures/expected/image_checksum.html',
actual_checksum='image_checksum_fail-checksum',
actual_image='image_checksum_fail-png')
tests.add('failures/expected/audio.html',
actual_audio=base64.b64encode('audio_fail-wav'), expected_audio='audio-wav',
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/expected/keyboard.html', keyboard=True)
tests.add('failures/expected/missing_check.html',
expected_image='missing_check-png')
tests.add('failures/expected/missing_image.html', expected_image=None)
tests.add('failures/expected/missing_audio.html', expected_audio=None,
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/expected/missing_text.html', expected_text=None)
tests.add('failures/expected/newlines_leading.html',
expected_text="\nfoo\n", actual_text="foo\n")
tests.add('failures/expected/newlines_trailing.html',
expected_text="foo\n\n", actual_text="foo\n")
tests.add('failures/expected/newlines_with_excess_CR.html',
expected_text="foo\r\r\r\n", actual_text="foo\n")
tests.add('failures/expected/text.html', actual_text='text_fail-png')
tests.add('failures/expected/crash_then_text.html')
tests.add('failures/expected/skip_text.html', actual_text='text diff')
tests.add('failures/flaky/text.html')
tests.add('failures/unexpected/missing_text.html', expected_text=None)
tests.add('failures/unexpected/missing_check.html', expected_image='missing-check-png')
tests.add('failures/unexpected/missing_image.html', expected_image=None)
tests.add('failures/unexpected/missing_render_tree_dump.html', actual_text="""layer at (0,0) size 800x600
RenderView at (0,0) size 800x600
layer at (0,0) size 800x34
RenderBlock {HTML} at (0,0) size 800x34
RenderBody {BODY} at (8,8) size 784x18
RenderText {#text} at (0,0) size 133x18
text run at (0,0) width 133: "This is an image test!"
""", expected_text=None)
tests.add('failures/unexpected/crash.html', crash=True)
tests.add('failures/unexpected/crash-with-stderr.html', crash=True,
error="mock-std-error-output")
tests.add('failures/unexpected/web-process-crash-with-stderr.html', web_process_crash=True,
error="mock-std-error-output")
tests.add('failures/unexpected/pass.html')
tests.add('failures/unexpected/text-checksum.html',
actual_text='text-checksum_fail-txt',
actual_checksum='text-checksum_fail-checksum')
tests.add('failures/unexpected/text-image-checksum.html',
actual_text='text-image-checksum_fail-txt',
actual_image='text-image-checksum_fail-pngtEXtchecksum\x00checksum_fail',
actual_checksum='text-image-checksum_fail-checksum')
tests.add('failures/unexpected/checksum-with-matching-image.html',
actual_checksum='text-image-checksum_fail-checksum')
tests.add('failures/unexpected/skip_pass.html')
tests.add('failures/unexpected/text.html', actual_text='text_fail-txt')
tests.add('failures/unexpected/text_then_crash.html')
tests.add('failures/unexpected/timeout.html', timeout=True)
tests.add('failures/unexpected/leak.html', leak=True)
tests.add('http/tests/passes/text.html')
tests.add('http/tests/passes/image.html')
tests.add('http/tests/ssl/text.html')
tests.add('passes/args.html')
tests.add('passes/error.html', error='stuff going to stderr')
tests.add('passes/image.html')
tests.add('passes/audio.html',
actual_audio=base64.b64encode('audio-wav'), expected_audio='audio-wav',
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('passes/platform_image.html')
tests.add('passes/checksum_in_image.html',
expected_image='tEXtchecksum\x00checksum_in_image-checksum')
tests.add('passes/skipped/skip.html')
# Note that here the checksums don't match but the images do, so this test passes "unexpectedly".
# See https://bugs.webkit.org/show_bug.cgi?id=69444 .
tests.add('failures/unexpected/checksum.html', actual_checksum='checksum_fail-checksum')
# Text output files contain "\r\n" on Windows. This may be
# helpfully filtered to "\r\r\n" by our Python/Cygwin tooling.
tests.add('passes/text.html',
expected_text='\nfoo\n\n', actual_text='\nfoo\r\n\r\r\n')
# For reftests.
tests.add_reftest('passes/reftest.html', 'passes/reftest-expected.sky', same_image=True)
# This adds a different virtual reference to ensure that that also works.
tests.add('virtual/passes/reftest-expected.sky', actual_checksum='xxx', actual_image='XXX', is_reftest=True)
tests.add_reftest('passes/mismatch.html', 'passes/mismatch-expected-mismatch.sky', same_image=False)
tests.add_reftest('passes/svgreftest.svg', 'passes/svgreftest-expected.svg', same_image=True)
tests.add_reftest('passes/xhtreftest.xht', 'passes/xhtreftest-expected.sky', same_image=True)
tests.add_reftest('passes/phpreftest.php', 'passes/phpreftest-expected-mismatch.svg', same_image=False)
tests.add_reftest('failures/expected/reftest.html', 'failures/expected/reftest-expected.sky', same_image=False)
tests.add_reftest('failures/expected/mismatch.html', 'failures/expected/mismatch-expected-mismatch.sky', same_image=True)
tests.add_reftest('failures/unexpected/crash-reftest.html', 'failures/unexpected/crash-reftest-expected.sky', same_image=True, crash=True)
tests.add_reftest('failures/unexpected/reftest.html', 'failures/unexpected/reftest-expected.sky', same_image=False)
tests.add_reftest('failures/unexpected/mismatch.html', 'failures/unexpected/mismatch-expected-mismatch.sky', same_image=True)
tests.add('failures/unexpected/reftest-nopixel.html', actual_checksum=None, actual_image=None, is_reftest=True)
tests.add('failures/unexpected/reftest-nopixel-expected.sky', actual_checksum=None, actual_image=None, is_reftest=True)
tests.add('reftests/foo/test.html')
tests.add('reftests/foo/test-ref.html')
tests.add('reftests/foo/multiple-match-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-match-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-mismatch-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-mismatch-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-both-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-both-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/matching-ref.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/mismatching-ref.html', actual_checksum='def', actual_image='def')
tests.add('reftests/foo/second-mismatching-ref.html', actual_checksum='ghi', actual_image='ghi')
# The following files shouldn't be treated as reftests
tests.add_reftest('reftests/foo/unlistedtest.html', 'reftests/foo/unlistedtest-expected.sky', same_image=True)
tests.add('reftests/foo/reference/bar/common.html')
tests.add('reftests/foo/reftest/bar/shared.html')
tests.add('websocket/tests/passes/text.html')
# For testing that we don't run tests under platform/. Note that these don't contribute to TOTAL_TESTS.
tests.add('platform/test-mac-leopard/http/test.html')
tests.add('platform/test-win-win7/http/test.html')
# For testing if perf tests are running in a locked shard.
tests.add('perf/foo/test.html')
tests.add('perf/foo/test-ref.html')
# For testing --pixel-test-directories.
tests.add('failures/unexpected/pixeldir/image_in_pixeldir.html',
actual_image='image_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
expected_image='image_in_pixeldir-pngtEXtchecksum\x00checksum-png')
tests.add('failures/unexpected/image_not_in_pixeldir.html',
actual_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
expected_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum-png')
# For testing that virtual test suites don't expand names containing themselves
# See webkit.org/b/97925 and base_unittest.PortTest.test_tests().
tests.add('passes/test-virtual-passes.html')
tests.add('passes/passes/test-virtual-passes.html')
return tests
# Here we use a non-standard location for the layout tests, to ensure that
# this works. The path contains a '.' in the name because we've seen bugs
# related to this before.
LAYOUT_TEST_DIR = '/test.checkout/tests'
PERF_TEST_DIR = '/test.checkout/PerformanceTests'
# Here we synthesize an in-memory filesystem from the test list
# in order to fully control the test output and to demonstrate that
# we don't need a real filesystem to run the tests.
def add_unit_tests_to_mock_filesystem(filesystem):
# Add the test_expectations file.
filesystem.maybe_make_directory('/mock-checkout/tests')
if not filesystem.exists('/mock-checkout/tests/TestExpectations'):
filesystem.write_text_file('/mock-checkout/tests/TestExpectations', """
Bug(test) failures/expected/crash.html [ Crash ]
Bug(test) failures/expected/crash_then_text.html [ Failure ]
Bug(test) failures/expected/image.html [ ImageOnlyFailure ]
Bug(test) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
Bug(test) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]
Bug(test) failures/expected/audio.html [ Failure ]
Bug(test) failures/expected/image_checksum.html [ ImageOnlyFailure ]
Bug(test) failures/expected/mismatch.html [ ImageOnlyFailure ]
Bug(test) failures/expected/missing_check.html [ Missing Pass ]
Bug(test) failures/expected/missing_image.html [ Missing Pass ]
Bug(test) failures/expected/missing_audio.html [ Missing Pass ]
Bug(test) failures/expected/missing_text.html [ Missing Pass ]
Bug(test) failures/expected/newlines_leading.html [ Failure ]
Bug(test) failures/expected/newlines_trailing.html [ Failure ]
Bug(test) failures/expected/newlines_with_excess_CR.html [ Failure ]
Bug(test) failures/expected/reftest.html [ ImageOnlyFailure ]
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/timeout.html [ Timeout ]
Bug(test) failures/expected/keyboard.html [ WontFix ]
Bug(test) failures/expected/exception.html [ WontFix ]
Bug(test) failures/expected/device_failure.html [ WontFix ]
Bug(test) failures/expected/leak.html [ Leak ]
Bug(test) failures/unexpected/pass.html [ Failure ]
Bug(test) passes/skipped/skip.html [ Skip ]
Bug(test) passes/text.html [ Pass ]
""")
filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/reftests/foo')
filesystem.write_text_file(LAYOUT_TEST_DIR + '/reftests/foo/reftest.list', """
== test.html test-ref.html
== multiple-match-success.html mismatching-ref.html
== multiple-match-success.html matching-ref.html
== multiple-match-failure.html mismatching-ref.html
== multiple-match-failure.html second-mismatching-ref.html
!= multiple-mismatch-success.html mismatching-ref.html
!= multiple-mismatch-success.html second-mismatching-ref.html
!= multiple-mismatch-failure.html mismatching-ref.html
!= multiple-mismatch-failure.html matching-ref.html
== multiple-both-success.html matching-ref.html
== multiple-both-success.html mismatching-ref.html
!= multiple-both-success.html second-mismatching-ref.html
== multiple-both-failure.html matching-ref.html
!= multiple-both-failure.html second-mismatching-ref.html
!= multiple-both-failure.html matching-ref.html
""")
# FIXME: This test was only being ignored because of missing a leading '/'.
# Fixing the typo causes several tests to assert, so disabling the test entirely.
# Add in a file should be ignored by port.find_test_files().
#files[LAYOUT_TEST_DIR + '/userscripts/resources/iframe.html'] = 'iframe'
def add_file(test, suffix, contents):
dirname = filesystem.join(LAYOUT_TEST_DIR, test.name[0:test.name.rfind('/')])
base = test.base
filesystem.maybe_make_directory(dirname)
filesystem.write_binary_file(filesystem.join(dirname, base + suffix), contents)
# Add each test and the expected output, if any.
test_list = unit_test_list()
for test in test_list.tests.values():
add_file(test, test.name[test.name.rfind('.'):], '')
if test.is_reftest:
continue
if test.actual_audio:
add_file(test, '-expected.wav', test.expected_audio)
continue
add_file(test, '-expected.txt', test.expected_text)
add_file(test, '-expected.png', test.expected_image)
filesystem.write_text_file(filesystem.join(LAYOUT_TEST_DIR, 'virtual', 'passes', 'args-expected.txt'), 'args-txt --virtual-arg')
# Clear the list of written files so that we can watch what happens during testing.
filesystem.clear_written_files()
class TestPort(Port):
port_name = 'test'
default_port_name = 'test-mac-leopard'
"""Test implementation of the Port interface."""
ALL_BASELINE_VARIANTS = (
'test-linux-x86_64',
'test-mac-snowleopard', 'test-mac-leopard',
'test-win-win7', 'test-win-xp',
)
FALLBACK_PATHS = {
'xp': ['test-win-win7', 'test-win-xp'],
'win7': ['test-win-win7'],
'leopard': ['test-mac-leopard', 'test-mac-snowleopard'],
'snowleopard': ['test-mac-snowleopard'],
'lucid': ['test-linux-x86_64', 'test-win-win7'],
}
@classmethod
def determine_full_port_name(cls, host, options, port_name):
if port_name == 'test':
return TestPort.default_port_name
return port_name
def __init__(self, host, port_name=None, **kwargs):
Port.__init__(self, host, port_name or TestPort.default_port_name, **kwargs)
self._tests = unit_test_list()
self._flakes = set()
# FIXME: crbug.com/279494. This needs to be in the "real layout tests
# dir" in a mock filesystem, rather than outside of the checkout, so
# that tests that want to write to a TestExpectations file can share
# this between "test" ports and "real" ports. This is the result of
# rebaseline_unittest.py having tests that refer to "real" port names
# and real builders instead of fake builders that point back to the
# test ports. rebaseline_unittest.py needs to not mix both "real" ports
# and "test" ports
self._generic_expectations_path = '/mock-checkout/tests/TestExpectations'
self._results_directory = None
self._operating_system = 'mac'
if self._name.startswith('test-win'):
self._operating_system = 'win'
elif self._name.startswith('test-linux'):
self._operating_system = 'linux'
version_map = {
'test-win-xp': 'xp',
'test-win-win7': 'win7',
'test-mac-leopard': 'leopard',
'test-mac-snowleopard': 'snowleopard',
'test-linux-x86_64': 'lucid',
}
self._version = version_map[self._name]
def repository_paths(self):
"""Returns a list of (repository_name, repository_path) tuples of its depending code base."""
# FIXME: We override this just to keep the perf tests happy.
return [('blink', self.layout_tests_dir())]
def buildbot_archives_baselines(self):
return self._name != 'test-win-xp'
def default_pixel_tests(self):
return True
def _path_to_driver(self):
# This routine shouldn't normally be called, but it is called by
# the mock_drt Driver. We return something, but make sure it's useless.
return 'MOCK _path_to_driver'
def default_child_processes(self):
return 1
def check_build(self, needs_http, printer):
return test_run_results.OK_EXIT_STATUS
def check_sys_deps(self, needs_http):
return test_run_results.OK_EXIT_STATUS
def default_configuration(self):
return 'Release'
def diff_image(self, expected_contents, actual_contents):
diffed = actual_contents != expected_contents
if not actual_contents and not expected_contents:
return (None, None)
if not actual_contents or not expected_contents:
return (True, None)
if diffed:
return ("< %s\n---\n> %s\n" % (expected_contents, actual_contents), None)
return (None, None)
def layout_tests_dir(self):
return LAYOUT_TEST_DIR
def perf_tests_dir(self):
return PERF_TEST_DIR
def webkit_base(self):
return '/test.checkout'
def _skipped_tests_for_unsupported_features(self, test_list):
return set(['failures/expected/skip_text.html',
'failures/unexpected/skip_pass.html',
'virtual/skipped'])
def name(self):
return self._name
def operating_system(self):
return self._operating_system
def _path_to_wdiff(self):
return None
def default_results_directory(self):
return '/tmp/layout-test-results'
def setup_test_run(self):
pass
def _driver_class(self):
return TestDriver
def start_sky_server(self, additional_dirs, number_of_drivers):
pass
def start_websocket_server(self):
pass
def acquire_http_lock(self):
pass
def stop_sky_server(self):
pass
def stop_websocket_server(self):
pass
def release_http_lock(self):
pass
def path_to_apache(self):
return "/usr/sbin/httpd"
def path_to_apache_config_file(self):
return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'httpd.conf')
def path_to_generic_test_expectations_file(self):
return self._generic_expectations_path
def _port_specific_expectations_files(self):
return [self._filesystem.join(self._webkit_baseline_path(d), 'TestExpectations') for d in ['test', 'test-win-xp']]
def all_test_configurations(self):
"""Returns a sequence of the TestConfigurations the port supports."""
# By default, we assume we want to test every graphics type in
# every configuration on every system.
test_configurations = []
for version, architecture in self._all_systems():
for build_type in self._all_build_types():
test_configurations.append(TestConfiguration(
version=version,
architecture=architecture,
build_type=build_type))
return test_configurations
def _all_systems(self):
return (('leopard', 'x86'),
('snowleopard', 'x86'),
('xp', 'x86'),
('win7', 'x86'),
('lucid', 'x86'),
('lucid', 'x86_64'))
def _all_build_types(self):
return ('debug', 'release')
def configuration_specifier_macros(self):
"""To avoid surprises when introducing new macros, these are intentionally fixed in time."""
return {'mac': ['leopard', 'snowleopard'], 'win': ['xp', 'win7'], 'linux': ['lucid']}
def all_baseline_variants(self):
return self.ALL_BASELINE_VARIANTS
def virtual_test_suites(self):
return [
VirtualTestSuite('passes', 'passes', ['--virtual-arg'], use_legacy_naming=True),
VirtualTestSuite('skipped', 'failures/expected', ['--virtual-arg2'], use_legacy_naming=True),
]
class TestDriver(Driver):
"""Test/Dummy implementation of the driver interface."""
next_pid = 1
def __init__(self, *args, **kwargs):
super(TestDriver, self).__init__(*args, **kwargs)
self.started = False
self.pid = 0
def cmd_line(self, pixel_tests, per_test_args):
pixel_tests_flag = '-p' if pixel_tests else ''
return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_drt_flag', []) + per_test_args
def run_test(self, driver_input, stop_when_done):
if not self.started:
self.started = True
self.pid = TestDriver.next_pid
TestDriver.next_pid += 1
start_time = time.time()
test_name = driver_input.test_name
test_args = driver_input.args or []
test = self._port._tests[test_name]
if test.keyboard:
raise KeyboardInterrupt
if test.exception:
raise ValueError('exception from ' + test_name)
if test.device_failure:
raise DeviceFailure('device failure in ' + test_name)
audio = None
actual_text = test.actual_text
crash = test.crash
web_process_crash = test.web_process_crash
if 'flaky/text.html' in test_name and not test_name in self._port._flakes:
self._port._flakes.add(test_name)
actual_text = 'flaky text failure'
if 'crash_then_text.html' in test_name:
if test_name in self._port._flakes:
actual_text = 'text failure'
else:
self._port._flakes.add(test_name)
crashed_process_name = self._port.driver_name()
crashed_pid = 1
crash = True
if 'text_then_crash.html' in test_name:
if test_name in self._port._flakes:
crashed_process_name = self._port.driver_name()
crashed_pid = 1
crash = True
else:
self._port._flakes.add(test_name)
actual_text = 'text failure'
if actual_text and test_args and test_name == 'passes/args.html':
actual_text = actual_text + ' ' + ' '.join(test_args)
if test.actual_audio:
audio = base64.b64decode(test.actual_audio)
crashed_process_name = None
crashed_pid = None
if crash:
crashed_process_name = self._port.driver_name()
crashed_pid = 1
elif web_process_crash:
crashed_process_name = 'WebProcess'
crashed_pid = 2
crash_log = ''
if crashed_process_name:
crash_logs = CrashLogs(self._port.host)
crash_log = crash_logs.find_newest_log(crashed_process_name, None) or ''
if stop_when_done:
self.stop()
if test.actual_checksum == driver_input.image_hash:
image = None
else:
image = test.actual_image
return DriverOutput(actual_text, image, test.actual_checksum, audio,
crash=(crash or web_process_crash), crashed_process_name=crashed_process_name,
crashed_pid=crashed_pid, crash_log=crash_log,
test_time=time.time() - start_time, timeout=test.timeout, error=test.error, pid=self.pid,
leak=test.leak)
def stop(self):
self.started = False
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [None,
nn_ops.conv2d_backprop_filter(
grad, array_ops.shape(op.inputs[1]), op.inputs[2],
op.get_attr("strides"), op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"), op.get_attr("data_format")),
nn_ops.conv2d(
grad, op.inputs[1], op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
Returns:
gradient w.r.t the input to the softmax
"""
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax -
array_ops.reshape(math_ops.reduce_sum(grad_softmax * softmax, [1]),
[-1, 1]))
* softmax)
return grad_x
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad, reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops._relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops._relu6_grad(grad, op.inputs[0])
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops._elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return gen_nn_ops._softplus_grad(grad, op.inputs[0])
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops._softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._relu_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
return [nn_ops.conv2d_backprop_input(array_ops.shape(op.inputs[0]),
op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d_backprop_filter(op.inputs[0],
array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [gen_nn_ops._lrn_grad(grad, op.inputs[0], op.outputs[0],
depth_radius, bias, alpha, beta)]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops._avg_pool_grad(array_ops.shape(op.inputs[0]), grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"))
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops._max_pool_grad(op.inputs[0], op.outputs[0], grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"))
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
|
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
from oslo.serialization import jsonutils
from neutronclient.common import exceptions
from neutronclient.common import utils
from neutronclient.i18n import _
from neutronclient.neutron import v2_0 as neutronV20
def _format_allocation_pools(subnet):
try:
return '\n'.join([jsonutils.dumps(pool) for pool in
subnet['allocation_pools']])
except (TypeError, KeyError):
return ''
def _format_dns_nameservers(subnet):
try:
return '\n'.join([jsonutils.dumps(server) for server in
subnet['dns_nameservers']])
except (TypeError, KeyError):
return ''
def _format_host_routes(subnet):
try:
return '\n'.join([jsonutils.dumps(route) for route in
subnet['host_routes']])
except (TypeError, KeyError):
return ''
def add_updatable_arguments(parser):
parser.add_argument(
'--name',
help=_('Name of this subnet.'))
parser.add_argument(
'--gateway', metavar='GATEWAY_IP',
help=_('Gateway IP of this subnet.'))
parser.add_argument(
'--no-gateway',
action='store_true',
help=_('No distribution of gateway.'))
parser.add_argument(
'--allocation-pool', metavar='start=IP_ADDR,end=IP_ADDR',
action='append', dest='allocation_pools', type=utils.str2dict,
help=_('Allocation pool IP addresses for this subnet '
'(This option can be repeated).'))
parser.add_argument(
'--allocation_pool',
action='append', dest='allocation_pools', type=utils.str2dict,
help=argparse.SUPPRESS)
parser.add_argument(
'--host-route', metavar='destination=CIDR,nexthop=IP_ADDR',
action='append', dest='host_routes', type=utils.str2dict,
help=_('Additional route (This option can be repeated).'))
parser.add_argument(
'--dns-nameserver', metavar='DNS_NAMESERVER',
action='append', dest='dns_nameservers',
help=_('DNS name server for this subnet '
'(This option can be repeated).'))
parser.add_argument(
'--disable-dhcp',
action='store_true',
help=_('Disable DHCP for this subnet.'))
parser.add_argument(
'--enable-dhcp',
action='store_true',
help=_('Enable DHCP for this subnet.'))
def updatable_args2body(parsed_args, body, for_create=True):
if parsed_args.gateway and parsed_args.no_gateway:
raise exceptions.CommandError(_("--gateway option and "
"--no-gateway option can "
"not be used same time"))
if parsed_args.disable_dhcp and parsed_args.enable_dhcp:
raise exceptions.CommandError(_("--enable-dhcp and --disable-dhcp can "
"not be used in the same command."))
if parsed_args.no_gateway:
body['subnet'].update({'gateway_ip': None})
if parsed_args.gateway:
body['subnet'].update({'gateway_ip': parsed_args.gateway})
if parsed_args.name:
body['subnet'].update({'name': parsed_args.name})
if parsed_args.disable_dhcp:
body['subnet'].update({'enable_dhcp': False})
if parsed_args.enable_dhcp:
body['subnet'].update({'enable_dhcp': True})
if parsed_args.allocation_pools:
body['subnet']['allocation_pools'] = parsed_args.allocation_pools
if parsed_args.host_routes:
body['subnet']['host_routes'] = parsed_args.host_routes
if parsed_args.dns_nameservers:
body['subnet']['dns_nameservers'] = parsed_args.dns_nameservers
if for_create and parsed_args.ipv6_ra_mode:
if parsed_args.ip_version == 4:
raise exceptions.CommandError(_("--ipv6-ra-mode is invalid "
"when --ip-version is 4"))
body['subnet']['ipv6_ra_mode'] = parsed_args.ipv6_ra_mode
if for_create and parsed_args.ipv6_address_mode:
if parsed_args.ip_version == 4:
raise exceptions.CommandError(_("--ipv6-address-mode is "
"invalid when --ip-version "
"is 4"))
body['subnet']['ipv6_address_mode'] = parsed_args.ipv6_address_mode
class ListSubnet(neutronV20.ListCommand):
"""List subnets that belong to a given tenant."""
resource = 'subnet'
_formatters = {'allocation_pools': _format_allocation_pools,
'dns_nameservers': _format_dns_nameservers,
'host_routes': _format_host_routes, }
list_columns = ['id', 'name', 'cidr', 'allocation_pools']
pagination_support = True
sorting_support = True
class ShowSubnet(neutronV20.ShowCommand):
"""Show information of a given subnet."""
resource = 'subnet'
class CreateSubnet(neutronV20.CreateCommand):
"""Create a subnet for a given tenant."""
resource = 'subnet'
def add_known_arguments(self, parser):
add_updatable_arguments(parser)
parser.add_argument(
'--ip-version',
type=int,
default=4, choices=[4, 6],
help=_('IP version to use, default is 4.'))
parser.add_argument(
'--ip_version',
type=int,
choices=[4, 6],
help=argparse.SUPPRESS)
parser.add_argument(
'network_id', metavar='NETWORK',
help=_('Network ID or name this subnet belongs to.'))
parser.add_argument(
'cidr', metavar='CIDR',
help=_('CIDR of subnet to create.'))
parser.add_argument(
'--ipv6-ra-mode',
choices=['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'],
help=_('IPv6 RA (Router Advertisement) mode.'))
parser.add_argument(
'--ipv6-address-mode',
choices=['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'],
help=_('IPv6 address mode.'))
def args2body(self, parsed_args):
if parsed_args.ip_version == 4 and parsed_args.cidr.endswith('/32'):
self.log.warning(_("An IPv4 subnet with a /32 CIDR will have "
"only one usable IP address so the device "
"attached to it will not have any IP "
"connectivity."))
_network_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'network', parsed_args.network_id)
body = {'subnet': {'cidr': parsed_args.cidr,
'network_id': _network_id,
'ip_version': parsed_args.ip_version, }, }
updatable_args2body(parsed_args, body)
if parsed_args.tenant_id:
body['subnet'].update({'tenant_id': parsed_args.tenant_id})
return body
class DeleteSubnet(neutronV20.DeleteCommand):
"""Delete a given subnet."""
resource = 'subnet'
class UpdateSubnet(neutronV20.UpdateCommand):
"""Update subnet's information."""
resource = 'subnet'
def add_known_arguments(self, parser):
add_updatable_arguments(parser)
def args2body(self, parsed_args):
body = {'subnet': {}}
updatable_args2body(parsed_args, body, for_create=False)
return body
|
|
import logging
import sys
import os
import boto3
import click
from botocore.exceptions import ClientError, BotoCoreError, ProfileNotFound
from cfn_sphere import StackActionHandler
from cfn_sphere import __version__
from cfn_sphere.aws.cfn import CloudFormation
from cfn_sphere.aws.kms import KMS
from cfn_sphere.exceptions import CfnSphereException
from cfn_sphere.file_generator import FileGenerator
from cfn_sphere.file_loader import FileLoader
from cfn_sphere.stack_configuration import Config
from cfn_sphere.template.transformer import CloudFormationTemplateTransformer
from cfn_sphere.util import convert_file, get_logger, get_latest_version, kv_list_to_dict, get_resources_dir
LOGGER = get_logger(root=True)
def get_first_account_alias_or_account_id():
try:
return boto3.client('iam').list_account_aliases()["AccountAliases"][0]
except IndexError:
return boto3.client('sts').get_caller_identity()["Arn"].split(":")[4]
except ProfileNotFound:
LOGGER.error(
"The AWS_PROFILE env var is set to '{0}' but this profile is not found in your ~/.aws/config".format(
os.environ.get("AWS_PROFILE")))
sys.exit(1)
except (BotoCoreError, ClientError) as e:
LOGGER.error(e)
sys.exit(1)
except Exception as e:
LOGGER.error("Unknown error occurred loading users account alias")
LOGGER.exception(e)
LOGGER.info("Please report at https://github.com/cfn-sphere/cfn-sphere/issues!")
sys.exit(1)
def check_update_available():
latest_version = get_latest_version()
if latest_version and __version__ != latest_version:
click.confirm(
"There is an update available (v: {0}).\n"
"Changelog: https://github.com/cfn-sphere/cfn-sphere/issues?q=milestone%3A{0}+\n"
"Do you want to continue?".format(latest_version), abort=True)
@click.group(help="This tool manages AWS CloudFormation templates "
"and stacks by providing an application scope and useful tooling.")
@click.version_option(version=__version__)
def cli(name=None):
pass
@cli.command(help="Sync AWS resources with definition file")
@click.argument('config', type=click.Path(exists=True))
@click.option('--parameter', '-p', default=None, envvar='CFN_SPHERE_PARAMETERS', type=click.STRING, multiple=True,
help="Stack parameter to overwrite, eg: --parameter stack1.p1=v1")
@click.option('--suffix', '-s', default=None, envvar='CFN_SPHERE_SUFFIX', type=click.STRING,
help="Append a suffix to all stacks within a stack config file e.g. --suffix '-dev'")
@click.option('--debug', '-d', is_flag=True, default=False, envvar='CFN_SPHERE_DEBUG', help="Debug output")
@click.option('--confirm', '-c', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes")
@click.option('--yes', '-y', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes (alias for -c/--confirm")
def sync(config, parameter, suffix, debug, confirm, yes):
confirm = confirm or yes
if debug:
LOGGER.setLevel(logging.DEBUG)
boto3.set_stream_logger(name='boto3', level=logging.DEBUG)
boto3.set_stream_logger(name='botocore', level=logging.DEBUG)
else:
LOGGER.setLevel(logging.INFO)
if confirm:
LOGGER.info("This action will modify AWS infrastructure in account: {0}".format(
get_first_account_alias_or_account_id()))
else:
check_update_available()
click.confirm('This action will modify AWS infrastructure in account: {0}\nAre you sure?'.format(
get_first_account_alias_or_account_id()), abort=True)
try:
config = Config(config_file=config, cli_params=parameter, stack_name_suffix=suffix)
StackActionHandler(config).create_or_update_stacks()
except CfnSphereException as e:
LOGGER.error(e)
if debug:
LOGGER.exception(e)
sys.exit(1)
except Exception as e:
LOGGER.error("Failed with unexpected error")
LOGGER.exception(e)
LOGGER.info("Please report at https://github.com/cfn-sphere/cfn-sphere/issues!")
sys.exit(1)
@cli.command(help="Delete all stacks in a stack configuration")
@click.argument('config', type=click.Path(exists=True))
@click.option('--suffix', '-s', default=None, envvar='CFN_SPHERE_SUFFIX', type=click.STRING,
help="Append a suffix to all stacks within a stack config file e.g. --suffix '-dev'")
@click.option('--debug', '-d', is_flag=True, default=False, envvar='CFN_SPHERE_DEBUG', help="Debug output")
@click.option('--confirm', '-c', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes")
@click.option('--yes', '-y', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes (alias for -c/--confirm")
def delete(config, suffix, debug, confirm, yes):
confirm = confirm or yes
if debug:
LOGGER.setLevel(logging.DEBUG)
else:
LOGGER.setLevel(logging.INFO)
if not confirm:
check_update_available()
click.confirm('This action will delete all stacks in {0} from account: {1}\nAre you sure?'.format(
config, get_first_account_alias_or_account_id()), abort=True)
try:
config = Config(config, stack_name_suffix=suffix)
StackActionHandler(config).delete_stacks()
except CfnSphereException as e:
LOGGER.error(e)
if debug:
LOGGER.exception(e)
sys.exit(1)
except Exception as e:
LOGGER.error("Failed with unexpected error")
LOGGER.exception(e)
LOGGER.info("Please report at https://github.com/cfn-sphere/cfn-sphere/issues!")
sys.exit(1)
@cli.command(help="Convert JSON to YAML or vice versa")
@click.argument('template_file', type=click.Path(exists=True))
@click.option('--debug', '-d', is_flag=True, default=False, envvar='CFN_SPHERE_DEBUG', help="Debug output")
@click.option('--confirm', '-c', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes")
@click.option('--yes', '-y', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes (alias for -c/--confirm")
def convert(template_file, debug, confirm, yes):
confirm = confirm or yes
if not confirm:
check_update_available()
if debug:
LOGGER.setLevel(logging.DEBUG)
try:
click.echo(convert_file(template_file))
except Exception as e:
LOGGER.error("Error converting {0}:".format(template_file))
LOGGER.exception(e)
sys.exit(1)
@cli.command(name='render_template', help="Render template as it would be used to create/update a stack")
@click.argument('template_file', type=click.Path(exists=True))
@click.option('--confirm', '-c', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes")
@click.option('--yes', '-y', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes (alias for -c/--confirm")
def render_template(template_file, confirm, yes):
confirm = confirm or yes
if not confirm:
check_update_available()
loader = FileLoader()
template = loader.get_cloudformation_template(template_file, None)
template = CloudFormationTemplateTransformer.transform_template(template)
click.echo(template.get_pretty_template_json())
@cli.command(name='validate_template', help="Validate template with CloudFormation API", )
@click.argument('template_file', type=click.Path(exists=True))
@click.option('--confirm', '-c', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes")
@click.option('--yes', '-y', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes (alias for -c/--confirm")
def validate_template(template_file, confirm, yes):
confirm = confirm or yes
if not confirm:
check_update_available()
try:
loader = FileLoader()
template = loader.get_cloudformation_template(template_file, None)
template = CloudFormationTemplateTransformer.transform_template(template)
CloudFormation().validate_template(template)
click.echo("Template is valid")
except CfnSphereException as e:
LOGGER.error(e)
sys.exit(1)
except Exception as e:
LOGGER.error("Failed with unexpected error")
LOGGER.exception(e)
LOGGER.info("Please report at https://github.com/cfn-sphere/cfn-sphere/issues!")
sys.exit(1)
@cli.command(name='create_template', help="Create a basic yaml template sceleton")
@click.argument('path', type=click.Path(exists=False))
@click.option('--confirm', '-c', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes")
@click.option('--yes', '-y', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes (alias for -c/--confirm")
def create_template(path, confirm, yes):
confirm = confirm or yes
if not confirm:
check_update_available()
try:
working_dir = os.getcwd()
resources_dir = get_resources_dir()
if str(path).lower().endswith("json"):
template_source_path = os.path.join(resources_dir, "template-sceleton.json")
else:
template_source_path = os.path.join(resources_dir, "template-sceleton.yml")
description = click.prompt('Stack description to be used in the template', type=str)
FileGenerator(working_dir).render_file(template_source_path, path, {"description": description})
click.echo("Template created at {0}".format(path))
except CfnSphereException as e:
LOGGER.error(e)
sys.exit(1)
except Exception as e:
LOGGER.error("Failed with unexpected error")
LOGGER.exception(e)
LOGGER.info("Please report at https://github.com/cfn-sphere/cfn-sphere/issues!")
sys.exit(1)
@cli.command('start_project', help="Start a new project with simple config and an example template")
@click.option('--confirm', '-c', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes")
@click.option('--yes', '-y', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes (alias for -c/--confirm")
def start_project(confirm, yes):
confirm = confirm or yes
if not confirm:
check_update_available()
try:
region = click.prompt('AWS Region?', type=str, default="eu-west-1")
subdir = click.prompt('Project dir? (leave empty to use current dir)', type=str, default=".")
working_dir = os.getcwd()
resources_dir = get_resources_dir()
config_source_path = os.path.join(resources_dir, "stack_config.yml.jinja2")
config_dest_path = os.path.join(subdir, "stacks.yml")
template_source_path = os.path.join(resources_dir, "queue.yml")
template_dest_path = os.path.join(subdir, "templates", "queue.yml")
context = {
"region": region,
"template_url": "templates/queue.yml"
}
FileGenerator(working_dir).render_file(config_source_path, config_dest_path, context)
FileGenerator(working_dir).render_file(template_source_path, template_dest_path, {})
click.echo(
"I created a simple stack config ({0}) and a template ({1}).".format(config_dest_path, template_dest_path))
click.echo("Modify it to match your requirements and run 'cf sync {0}' to create the stack(s)".format(
config_dest_path))
except CfnSphereException as e:
LOGGER.error(e)
sys.exit(1)
except Exception as e:
LOGGER.error("Failed with unexpected error")
LOGGER.exception(e)
LOGGER.info("Please report at https://github.com/cfn-sphere/cfn-sphere/issues!")
sys.exit(1)
@cli.command(help="Encrypt a given string with AWS Key Management Service")
@click.argument('region', type=str)
@click.argument('keyid', type=str)
@click.argument('cleartext', type=str)
@click.option('--confirm', '-c', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes")
@click.option('--context', '-c', default=None, envvar='CFN_SPHERE_CONTEXT', type=click.STRING, multiple=True,
help="Context for encryption, passed as kv pairs, e.g. --context key=value")
@click.option('--yes', '-y', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes (alias for -c/--confirm")
def encrypt(region, keyid, cleartext, context, confirm, yes):
confirm = confirm or yes
if not confirm:
check_update_available()
try:
cipertext = KMS(region).encrypt(keyid, cleartext, kv_list_to_dict(context))
click.echo("Ciphertext: {0}".format(cipertext))
except CfnSphereException as e:
LOGGER.error(e)
sys.exit(1)
except Exception as e:
LOGGER.error("Failed with unexpected error")
LOGGER.exception(e)
LOGGER.info("Please report at https://github.com/cfn-sphere/cfn-sphere/issues!")
sys.exit(1)
@cli.command(help="Decrypt a given ciphertext with AWS Key Management Service")
@click.argument('region', type=str)
@click.argument('ciphertext', type=str)
@click.option('--context', '-c', default=None, envvar='CFN_SPHERE_CONTEXT', type=click.STRING, multiple=True,
help="Context for decryption, passed as kv pairs, e.g. --context key=value")
@click.option('--confirm', '-c', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes")
@click.option('--yes', '-y', is_flag=True, default=False, envvar='CFN_SPHERE_CONFIRM',
help="Override user confirm dialog with yes (alias for -c/--confirm")
def decrypt(region, ciphertext, context, confirm, yes):
confirm = confirm or yes
if not confirm:
check_update_available()
try:
cleartext = KMS(region).decrypt(ciphertext, kv_list_to_dict(context))
click.echo("Cleartext: {0}".format(cleartext))
except CfnSphereException as e:
LOGGER.error(e)
sys.exit(1)
except Exception as e:
LOGGER.error("Failed with unexpected error")
LOGGER.exception(e)
LOGGER.info("Please report at https://github.com/cfn-sphere/cfn-sphere/issues!")
sys.exit(1)
def main():
cli()
|
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The entry point for the Shelf methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from protorpc import message_types
from google.appengine.api import datastore_errors
import endpoints
from loaner.web_app.backend.api import auth
from loaner.web_app.backend.api import permissions
from loaner.web_app.backend.api import root_api
from loaner.web_app.backend.api.messages import shelf_messages
from loaner.web_app.backend.lib import api_utils
from loaner.web_app.backend.lib import search_utils
from loaner.web_app.backend.lib import user
from loaner.web_app.backend.models import device_model
from loaner.web_app.backend.models import shelf_model
_SHELF_DOES_NOT_EXIST_MSG = (
'The shelf with location: %s does not exist. Please double '
'check the location.')
_DEVICE_DOES_NOT_EXIST_MSG = (
'The device_identifier: %s is either not enrolled or an invalid serial '
'number has been entered.')
@root_api.ROOT_API.api_class(resource_name='shelf', path='shelf')
class ShelfApi(root_api.Service):
"""This class is for the Shelf API."""
@auth.method(
shelf_messages.EnrollShelfRequest,
message_types.VoidMessage,
name='enroll',
path='enroll',
http_method='POST',
permission=permissions.Permissions.MODIFY_SHELF)
def enroll(self, request):
"""Enrolls a shelf in the program."""
user_email = user.get_user_email()
self.check_xsrf_token(self.request_state)
try:
shelf_model.Shelf.enroll(
user_email=user_email,
friendly_name=request.friendly_name,
location=request.location,
latitude=request.latitude,
longitude=request.longitude,
altitude=request.altitude,
capacity=request.capacity,
audit_notification_enabled=request.audit_notification_enabled,
responsible_for_audit=request.responsible_for_audit,
audit_interval_override=request.audit_interval_override,
)
except (shelf_model.EnrollmentError, datastore_errors.BadValueError) as err:
raise endpoints.BadRequestException(str(err))
return message_types.VoidMessage()
@auth.method(
shelf_messages.ShelfRequest,
shelf_messages.Shelf,
name='get',
path='get',
http_method='POST',
permission=permissions.Permissions.READ_SHELVES)
def get(self, request):
"""Gets a shelf based on location."""
self.check_xsrf_token(self.request_state)
return api_utils.build_shelf_message_from_model(get_shelf(request))
@auth.method(
shelf_messages.ShelfRequest,
message_types.VoidMessage,
name='disable',
path='disable',
http_method='POST',
permission=permissions.Permissions.MODIFY_SHELF)
def disable(self, request):
"""Disables a shelf by its location."""
self.check_xsrf_token(self.request_state)
user_email = user.get_user_email()
shelf = get_shelf(request)
shelf.disable(user_email)
return message_types.VoidMessage()
@auth.method(
shelf_messages.UpdateShelfRequest,
message_types.VoidMessage,
name='update',
path='update',
http_method='POST',
permission=permissions.Permissions.MODIFY_SHELF)
def update(self, request):
"""Gets a shelf using location to update its properties."""
self.check_xsrf_token(self.request_state)
user_email = user.get_user_email()
shelf = get_shelf(request.shelf_request)
kwargs = api_utils.to_dict(request, shelf_model.Shelf)
shelf.edit(user_email=user_email, **kwargs)
return message_types.VoidMessage()
@auth.method(
shelf_messages.Shelf,
shelf_messages.ListShelfResponse,
name='list',
path='list',
http_method='POST',
permission=permissions.Permissions.READ_SHELVES)
def list_shelves(self, request):
"""Lists enabled or all shelves based on any shelf attribute."""
self.check_xsrf_token(self.request_state)
query, sort_options, returned_fields = (
search_utils.set_search_query_options(request.query))
if not query:
query = search_utils.to_query(request, shelf_model.Shelf)
cursor = search_utils.get_search_cursor(request.page_token)
search_results = shelf_model.Shelf.search(
query_string=query, query_limit=request.page_size,
cursor=cursor, sort_options=sort_options,
returned_fields=returned_fields)
new_search_cursor = None
if search_results.cursor:
new_search_cursor = search_results.cursor.web_safe_string
shelves_messages = []
for document in search_results.results:
message = search_utils.document_to_message(
document, shelf_messages.Shelf())
message.shelf_request = shelf_messages.ShelfRequest()
message.shelf_request.urlsafe_key = document.doc_id
message.shelf_request.location = message.location
shelves_messages.append(message)
return shelf_messages.ListShelfResponse(
shelves=shelves_messages,
has_additional_results=bool(new_search_cursor),
page_token=new_search_cursor)
@auth.method(
shelf_messages.ShelfAuditRequest,
message_types.VoidMessage,
name='audit',
path='audit',
http_method='POST',
permission=permissions.Permissions.AUDIT_SHELF)
def audit(self, request):
"""Performs an audit on a shelf based on location."""
self.check_xsrf_token(self.request_state)
shelf = get_shelf(request.shelf_request)
user_email = user.get_user_email()
devices_on_shelf = []
shelf_string_query = 'shelf: {}'.format(shelf.key.urlsafe())
devices_retrieved_on_shelf = device_model.Device.search(shelf_string_query)
for device_identifier in request.device_identifiers:
device = device_model.Device.get(identifier=device_identifier)
if not device:
raise endpoints.NotFoundException(
_DEVICE_DOES_NOT_EXIST_MSG % device_identifier)
if device.shelf:
if device.shelf == shelf.key:
devices_on_shelf.append(device.key.urlsafe())
logging.info('Device %s is already on shelf.', device.identifier)
continue
try:
device.move_to_shelf(shelf=shelf, user_email=user_email)
devices_on_shelf.append(device.key)
except device_model.UnableToMoveToShelfError as err:
raise endpoints.BadRequestException(str(err))
for device in devices_retrieved_on_shelf.results:
if device.doc_id not in devices_on_shelf:
api_utils.get_ndb_key(device.doc_id).get().remove_from_shelf(
shelf=shelf, user_email=user_email)
shelf.audit(user_email=user_email, num_of_devices=len(devices_on_shelf))
return message_types.VoidMessage()
def get_shelf(request):
"""Gets a shelf using the location.
Args:
request: shelf_messages.ShelfRequest, the request message for a shelf.
Returns:
Shelf object.
Raises:
endpoints.NotFoundException when a shelf can not be found.
"""
if request.urlsafe_key:
shelf = api_utils.get_ndb_key(request.urlsafe_key).get()
else:
shelf = shelf_model.Shelf.get(location=request.location)
if not shelf:
raise endpoints.NotFoundException(
_SHELF_DOES_NOT_EXIST_MSG % request.location)
return shelf
|
|
from toolz.functoolz import (thread_first, thread_last, memoize, curry,
compose, pipe, complement, do, juxt)
from toolz.functoolz import _num_required_args
from operator import add, mul, itemgetter
from toolz.utils import raises
from functools import partial
from toolz.compatibility import reduce
def iseven(x):
return x % 2 == 0
def isodd(x):
return x % 2 == 1
def inc(x):
return x + 1
def double(x):
return 2 * x
def test_thread_first():
assert thread_first(2) == 2
assert thread_first(2, inc) == 3
assert thread_first(2, inc, inc) == 4
assert thread_first(2, double, inc) == 5
assert thread_first(2, (add, 5), double) == 14
def test_thread_last():
assert list(thread_last([1, 2, 3], (map, inc), (filter, iseven))) == [2, 4]
assert list(thread_last([1, 2, 3], (map, inc), (filter, isodd))) == [3]
assert thread_last(2, (add, 5), double) == 14
def test_memoize():
fn_calls = [0] # Storage for side effects
def f(x, y):
""" A docstring """
fn_calls[0] += 1
return x + y
mf = memoize(f)
assert mf(2, 3) == mf(2, 3)
assert fn_calls == [1] # function was only called once
assert mf.__doc__ == f.__doc__
assert raises(TypeError, lambda: mf(1, {}))
def test_memoize_kwargs():
fn_calls = [0] # Storage for side effects
def f(x, y=0):
return x + y
mf = memoize(f)
assert mf(1) == f(1)
assert mf(1, 2) == f(1, 2)
assert mf(1, y=2) == f(1, y=2)
assert mf(1, y=3) == f(1, y=3)
def test_memoize_curried():
@curry
def f(x, y=0):
return x + y
f2 = f(y=1)
fm2 = memoize(f2)
assert fm2(3) == f2(3)
assert fm2(3) == f2(3)
def test_memoize_partial():
def f(x, y=0):
return x + y
f2 = partial(f, y=1)
fm2 = memoize(f2)
assert fm2(3) == f2(3)
assert fm2(3) == f2(3)
def test_memoize_key_signature():
# Single argument should not be tupled as a key. No keywords.
mf = memoize(lambda x: False, cache={1: True})
assert mf(1) is True
assert mf(2) is False
# Single argument must be tupled if signature has varargs. No keywords.
mf = memoize(lambda x, *args: False, cache={(1,): True, (1, 2): 2})
assert mf(1) is True
assert mf(2) is False
assert mf(1, 1) is False
assert mf(1, 2) == 2
assert mf((1, 2)) is False
# More than one argument is always tupled. No keywords.
mf = memoize(lambda x, y: False, cache={(1, 2): True})
assert mf(1, 2) is True
assert mf(1, 3) is False
assert raises(TypeError, lambda: mf((1, 2)))
# Nullary function (no inputs) uses empty tuple as the key
mf = memoize(lambda: False, cache={(): True})
assert mf() is True
# Single argument must be tupled if there are keyword arguments, because
# keyword arguments may be passed as unnamed args.
mf = memoize(lambda x, y=0: False,
cache={((1,), frozenset((('y', 2),))): 2,
((1, 2), None): 3})
assert mf(1, y=2) == 2
assert mf(1, 2) == 3
assert mf(2, y=2) is False
assert mf(2, 2) is False
assert mf(1) is False
assert mf((1, 2)) is False
# Keyword-only signatures must still have an "args" tuple.
mf = memoize(lambda x=0: False, cache={(None, frozenset((('x', 1),))): 1,
((1,), None): 2})
assert mf() is False
assert mf(x=1) == 1
assert mf(1) == 2
def test_memoize_curry_cache():
@memoize(cache={1: True})
def f(x):
return False
assert f(1) is True
assert f(2) is False
def test_memoize_key():
@memoize(key=lambda args, kwargs: args[0])
def f(x, y, *args, **kwargs):
return x + y
assert f(1, 2) == 3
assert f(1, 3) == 3
def test_curry_simple():
cmul = curry(mul)
double = cmul(2)
assert callable(double)
assert double(10) == 20
assert repr(cmul) == repr(mul)
cmap = curry(map)
assert list(cmap(inc)([1, 2, 3])) == [2, 3, 4]
assert raises(TypeError, lambda: curry({1: 2}))
def test_curry_kwargs():
def f(a, b, c=10):
return (a + b) * c
f = curry(f)
assert f(1, 2, 3) == 9
assert f(1)(2, 3) == 9
assert f(1, 2) == 30
assert f(1, c=3)(2) == 9
assert f(c=3)(1, 2) == 9
def g(a=1, b=10, c=0):
return a + b + c
cg = curry(g, b=2)
assert cg() == 3
assert cg(b=3) == 4
assert cg(a=0) == 2
assert cg(a=0, b=1) == 1
assert cg(0) == 2 # pass "a" as arg, not kwarg
assert raises(TypeError, lambda: cg(1, 2)) # pass "b" as arg AND kwarg
def test_curry_passes_errors():
@curry
def f(a, b):
if not isinstance(a, int):
raise TypeError()
return a + b
assert f(1, 2) == 3
assert raises(TypeError, lambda: f('1', 2))
assert raises(TypeError, lambda: f('1')(2))
assert raises(TypeError, lambda: f(1, 2, 3))
def test_curry_docstring():
def f(x, y):
""" A docstring """
return x
g = curry(f)
assert g.__doc__ == f.__doc__
assert str(g) == str(f)
assert f(1, 2) == g(1, 2)
def test_curry_is_like_partial():
def foo(a, b, c=1):
return a + b + c
p, c = partial(foo, 1, c=2), curry(foo)(1, c=2)
assert p.keywords == c.keywords
assert p.args == c.args
assert p(3) == c(3)
p, c = partial(foo, 1), curry(foo)(1)
assert p.keywords == c.keywords
assert p.args == c.args
assert p(3) == c(3)
assert p(3, c=2) == c(3, c=2)
p, c = partial(foo, c=1), curry(foo)(c=1)
assert p.keywords == c.keywords
assert p.args == c.args
assert p(1, 2) == c(1, 2)
def test_curry_is_idempotent():
def foo(a, b, c=1):
return a + b + c
f = curry(foo, 1, c=2)
g = curry(f)
assert isinstance(f, curry)
assert isinstance(g, curry)
assert not isinstance(g.func, curry)
assert not hasattr(g.func, 'func')
assert f.func == g.func
assert f.args == g.args
assert f.keywords == g.keywords
def test_curry_attributes_readonly():
def foo(a, b, c=1):
return a + b + c
f = curry(foo, 1, c=2)
assert raises(AttributeError, lambda: setattr(f, 'args', (2,)))
assert raises(AttributeError, lambda: setattr(f, 'keywords', {'c': 3}))
assert raises(AttributeError, lambda: setattr(f, 'func', f))
def test_curry_attributes_writable():
def foo(a, b, c=1):
return a + b + c
f = curry(foo, 1, c=2)
f.__name__ = 'newname'
f.__doc__ = 'newdoc'
assert f.__name__ == 'newname'
assert f.__doc__ == 'newdoc'
if hasattr(f, 'func_name'):
assert f.__name__ == f.func_name
def test_curry_comparable():
def foo(a, b, c=1):
return a + b + c
f1 = curry(foo, 1, c=2)
f2 = curry(foo, 1, c=2)
g1 = curry(foo, 1, c=3)
h1 = curry(foo, c=2)
h2 = h1(c=2)
h3 = h1()
assert f1 == f2
assert not (f1 != f2)
assert f1 != g1
assert not (f1 == g1)
assert f1 != h1
assert h1 == h2
assert h1 == h3
# test function comparison works
def bar(a, b, c=1):
return a + b + c
b1 = curry(bar, 1, c=2)
assert b1 != f1
assert set([f1, f2, g1, h1, h2, h3, b1, b1()]) == set([f1, g1, h1, b1])
# test unhashable input
unhash1 = curry(foo, [])
assert raises(TypeError, lambda: hash(unhash1))
unhash2 = curry(foo, c=[])
assert raises(TypeError, lambda: hash(unhash2))
def test__num_required_args():
assert _num_required_args(map) is None
assert _num_required_args(lambda x: x) == 1
assert _num_required_args(lambda x, y: x) == 2
def foo(x, y, z=2):
pass
assert _num_required_args(foo) == 2
def test_compose():
assert compose()(0) == 0
assert compose(inc)(0) == 1
assert compose(double, inc)(0) == 2
assert compose(str, iseven, inc, double)(3) == "False"
assert compose(str, add)(1, 2) == '3'
def f(a, b, c=10):
return (a + b) * c
assert compose(str, inc, f)(1, 2, c=3) == '10'
def test_pipe():
assert pipe(1, inc) == 2
assert pipe(1, inc, inc) == 3
assert pipe(1, double, inc, iseven) is False
def test_complement():
# No args:
assert complement(lambda: False)()
assert not complement(lambda: True)()
# Single arity:
assert complement(iseven)(1)
assert not complement(iseven)(2)
assert complement(complement(iseven))(2)
assert not complement(complement(isodd))(2)
# Multiple arities:
both_even = lambda a, b: iseven(a) and iseven(b)
assert complement(both_even)(1, 2)
assert not complement(both_even)(2, 2)
# Generic truthiness:
assert complement(lambda: "")()
assert complement(lambda: 0)()
assert complement(lambda: None)()
assert complement(lambda: [])()
assert not complement(lambda: "x")()
assert not complement(lambda: 1)()
assert not complement(lambda: [1])()
def test_do():
inc = lambda x: x + 1
assert do(inc, 1) == 1
log = []
assert do(log.append, 1) == 1
assert log == [1]
def test_juxt_generator_input():
data = list(range(10))
juxtfunc = juxt(itemgetter(2*i) for i in range(5))
assert tuple(juxtfunc(data)) == (0, 2, 4, 6, 8)
assert tuple(juxtfunc(data)) == (0, 2, 4, 6, 8)
|
|
import sys
from local_config import config
locals().update(config)
sys.path.append("../../")
import os
from cv_bridge import CvBridge, CvBridgeError
import sys
import std_msgs.msg
from sensor_msgs.msg import Image
import rospy
import threading
import signal
import cv2
import numpy as np
import configparser
import csv
import shutil
import time
from PIL import Image as myImage
import scipy.misc
from std_msgs.msg import Float32MultiArray
from ros_teleoperate.msg import al5d_state
global_config = configparser.ConfigParser()
global_config.read('../../conf.ini')
def signal_handler(signal, frame):
global TNR
TNR.end_thread = True
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
class TestNetworkRobot(object):
def __init__(self, image_size, path, robot_command_file, cameras_topic, cache_size=40, channel_num=3, cameras_switch=[False, True, False],
camera_num=3):
rospy.init_node('test_network')
self.image_size = image_size
self.task = config['task']
self.record_path = path
self.robot_command_file = robot_command_file
self.last_cameras_time = [rospy.get_time()] * camera_num
self.last_cameras_printed_ts = [0] * camera_num
self.last_robot_printed_ts = 0
self.robot_execution_delay = 3
self.last_robot_time = rospy.get_time()
self.cache_size = cache_size
self.end_thread = False
self.channel_num = channel_num
self.cameras_topic = cameras_topic
self.cameras_switch = cameras_switch
self.camera_num = camera_num
self.last_command = []
self.command_msg = Float32MultiArray()
self.leap_al5d_msg = [None] * self.cache_size
self.last_cameras_ts = np.empty((camera_num, self.cache_size))
self.al5d_cache_index = 0
self.new_image_captured = False
self.cameras_images = np.empty(
(self.camera_num, self.cache_size, self.image_size, self.image_size, self.channel_num))
self.joints = np.empty((self.cache_size, 8))
for i in range(self.cache_size):
self.joints[i] = [0.0, 1.0, 0.7670000195503235, 0.7925000190734863, 0.6265000104904175, 0.4659999907016754, 0.6579999923706055, 0.0]
self.last_robot_ts = [0] * self.cache_size
self.images_cache_index = [0] * self.camera_num
self.all_caches_filled = [False] * self.camera_num
self.last_command_file_modified = 0
self.bridge = CvBridge()
self.pause = False
for i, isEnable in enumerate(self.cameras_switch):
if isEnable:
self.create_folders(os.path.join(self.record_path, 'camera-' + str(i) + '/'))
rospy.Subscriber(self.cameras_topic[i], Image, self.cameras_callback, callback_args=i)
self.command_publisher = rospy.Publisher('/robot_command', Float32MultiArray, queue_size=100)
rospy.Subscriber("/leap_al5d_info", al5d_state, self.leap_al5d_callback)
# self.thread_images = threading.Thread(target=self.update_images)
# self.thread_images.start()
self.thread_commands = threading.Thread(target=self.update_robot_command)
self.thread_commands.start()
time.sleep(3)
def get_next_batch(self, batch_size, camera_id, flip=True):
if np.all(self.all_caches_filled[camera_id]):
images_t = self.cameras_images[camera_id][::-1]
camera_ts = [str(int(x)) for x in self.last_cameras_ts[camera_id][::-1]]
joint_ts = [str(int(x)) for x in self.last_robot_ts[::-1]]
# print joint_ts
# print camera_ts
next_batch_images = np.empty((batch_size, self.image_size, self.image_size, self.channel_num))
next_batch_joints = np.empty((batch_size, 7))
next_batch_images[:] = self.cameras_images[camera_id][-batch_size:]
# print self.joints[-4:]
next_batch_joints[:] = self.joints[-batch_size:, 1:]
# batch_index = 0
# image_index = 0
# print joint_ts
# print camera_ts
# while batch_index < batch_size:
# index = self.find_element(joint_ts, camera_ts[image_index])
# if index != None:
# # print camera_ts[image_index], 'found'
# # print index
# next_batch_images[batch_index] = images_t[image_index]
#
# js = self.joints[::-1]
# next_batch_joints[batch_index] = js[index][1:]
#
# batch_index += 1
# image_index += 1
# else:
# # print camera_ts[image_index], 'not found'
# continue
# for i, el in enumerate(self.cameras_images[camera_id]):
# # moving axis to use plt: i.e [4,100,100] to [100,100,4]
# img = self.cameras_images[camera_id][i]
# img = img.astype(np.uint8)
# print img.dtype, np.max(img), np.min(img), np.shape(img)
# img = myImage.fromarray(img, "RGB")
# img.show()
# raw_input()
next_batch_images = np.asarray(next_batch_images, dtype=np.float32)
if flip:
next_batch_images = np.flip(next_batch_images, axis=2)
next_batch_images = next_batch_images / 127.5 - 1
return next_batch_images, next_batch_joints
else:
return None, None
def find_element(self, arr, element):
for i, a in enumerate(arr):
if int(a) > int(element):
continue
elif a == element:
return i
else:
return None
def cameras_callback(self, msg, i):
if not (rospy.get_time() - self.last_cameras_time[i] < 0.08) and not self.pause:
self.last_cameras_time[i] = rospy.get_time()
img = self.bridge.imgmsg_to_cv2(msg, "bgr8")
img = np.array(img, dtype=np.float)
img = cv2.resize(img, (self.image_size, self.image_size))
if self.images_cache_index[i] < self.cache_size:
self.cameras_images[i][self.images_cache_index[i]] = img
self.last_cameras_ts[i][self.images_cache_index[i]] = str(int(str(msg.header.stamp)[0:-8]))
self.images_cache_index[i] += 1
else:
self.all_caches_filled[i] = True
self.cameras_images[i][0] = img
self.cameras_images = np.roll(self.cameras_images, -1, axis=1)
self.last_cameras_ts[i][0] = str(int(str(msg.header.stamp)[0:-8]))
self.last_cameras_ts = np.roll(self.last_cameras_ts, -1, axis=1)
self.new_image_captured = True
# self.update_images()
if np.all(self.all_caches_filled[i]):
self.pause = True
# np.save(os.path.join(self.record_path, 'camera-' + str(i) + '/'), img)
def update_images(self):
# while not self.end_thread:
if self.new_image_captured and not self.pause:
# np.save(os.path.join(self.record_path, 'joints'), self.joints)
for i, isEnable in enumerate(self.cameras_switch):
# if isEnable:
# np.save(os.path.join(self.record_path, 'camera-' + str(i) + '/images'), self.cameras_images[i])
# np.save(os.path.join(self.record_path, 'camera-' + str(i) + '/ts'), self.last_cameras_ts[i])
# self.write_to_file()
for j in range(self.cache_size):
self.save_image(self.cameras_images[i][j], i, j)
if np.all(self.all_caches_filled):
self.new_image_captured = False
def leap_al5d_callback(self, msg):
if not (rospy.get_time() - self.last_robot_time < 0.08):
self.last_robot_time = rospy.get_time()
if self.al5d_cache_index < self.cache_size:
ts = str(int(str(msg.header.stamp)[:-8]) + self.robot_execution_delay)
self.last_robot_ts[self.al5d_cache_index] = ts
self.leap_al5d_msg[self.al5d_cache_index] = msg.data
self.joints[self.al5d_cache_index] = [ts] + [x for x in msg.data.data]
self.al5d_cache_index += 1
else:
ts = str(int(str(msg.header.stamp)[:-8]) + self.robot_execution_delay)
self.leap_al5d_msg = np.roll(self.leap_al5d_msg, -1, axis=0)
self.leap_al5d_msg[-1] = msg.data
self.last_robot_ts = np.roll(self.last_robot_ts, -1, axis=0)
self.last_robot_ts[-1] = ts
self.joints = np.roll(self.joints, -1, axis=0)
self.joints[-1] = [ts] + [x for x in msg.data.data]
# raw_input()
# print [str(x) for x in self.joints[:, 0]], ts
# raw_input()
def save_image(self, img_arr, camera_id, image_name):
cv2.imwrite(os.path.join(self.record_path, 'camera-' + str(camera_id) + '/' + str(image_name) + '.jpg'),
img_arr, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
def write_to_file(self):
if not (self.al5d_cache_index < self.cache_size):
with open(os.path.join(self.record_path, 'joints.txt'), 'w+') as f:
str_to_append = ""
for i in range(self.cache_size):
str_to_append +=str(self.last_robot_ts[i]) + ',' + str(self.task) + ',' + str(2) + ','
data = [x for x in self.leap_al5d_msg[i].data]
self.last_robot_printed_ts = self.last_robot_ts[i]
sys.stdout.write('\rTimestep: ' + str(self.last_robot_ts[i]))
sys.stdout.flush()
str_to_append = str_to_append + ','.join(str(e) for e in data) + '\n'
f.write(str_to_append)
def update_robot_command(self):
while not self.end_thread:
if os.path.exists(self.robot_command_file):
last_modified = os.stat(self.robot_command_file)[8]
self.read_command()
if last_modified > self.last_command_file_modified:
time.sleep(1)
self.pause = False
self.last_command_file_modified = os.stat(self.robot_command_file)[8]
def read_command(self):
with open(self.robot_command_file, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in reader:
# print row
com = np.asarray(row[:-1], dtype=np.float32)
com = np.roll(com, -1, axis=0)
self.command_msg.data = com
self.command_publisher.publish(self.command_msg)
self.last_command = row
# print self.command_msg
break
def create_folders(self, foldername):
if not os.path.exists(foldername):
os.makedirs(foldername)
def delete_folder_content(self, folder):
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
# elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
if __name__ == '__main__':
global TNR
TNR = TestNetworkRobot(config['image_size'], config['record_path'], config['robot_command_file'], config['camera_topics'], cache_size=10, cameras_switch=[False, True, False])
# time.sleep(4)
# images, joints = TNR.get_next_batch(4, 1)
# print(np.shape(images), np.shape(joints))
rospy.spin()
|
|
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_almost_equal,
assert_array_equal, assert_allclose,
assert_array_less)
import pytest
from scipy.signal import resample as sp_resample, butter, freqz, sosfreqz
from mne import create_info
from mne.fixes import fft, fftfreq, nullcontext
from mne.io import RawArray, read_raw_fif
from mne.io.pick import _DATA_CH_TYPES_SPLIT
from mne.filter import (filter_data, resample, _resample_stim_channels,
construct_iir_filter, notch_filter, detrend,
_overlap_add_filter, _smart_pad, design_mne_c_filter,
estimate_ringing_samples, create_filter,
_length_factors)
from mne.utils import (sum_squared, run_tests_if_main,
catch_logging, requires_mne, run_subprocess)
def test_filter_array():
"""Test filtering an array."""
for data in (np.zeros((11, 1, 10)), np.zeros((9, 1, 10))):
filter_data(data, 512., 8, 12, method='iir',
iir_params=dict(ftype='butterworth', order=2))
@requires_mne
def test_mne_c_design(tmpdir):
"""Test MNE-C filter design."""
tempdir = str(tmpdir)
temp_fname = op.join(tempdir, 'test_raw.fif')
out_fname = op.join(tempdir, 'test_c_raw.fif')
x = np.zeros((1, 10001))
x[0, 5000] = 1.
time_sl = slice(5000 - 4096, 5000 + 4097)
sfreq = 1000.
RawArray(x, create_info(1, sfreq, 'eeg')).save(temp_fname)
tols = dict(rtol=1e-4, atol=1e-4)
cmd = ('mne_process_raw', '--projoff', '--raw', temp_fname,
'--save', out_fname)
run_subprocess(cmd)
h = design_mne_c_filter(sfreq, None, 40)
h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
assert_allclose(h, h_c, **tols)
run_subprocess(cmd + ('--highpass', '5', '--highpassw', '2.5'))
h = design_mne_c_filter(sfreq, 5, 40, 2.5)
h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
assert_allclose(h, h_c, **tols)
run_subprocess(cmd + ('--lowpass', '1000', '--highpass', '10'))
h = design_mne_c_filter(sfreq, 10, None, verbose=True)
h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
assert_allclose(h, h_c, **tols)
def test_estimate_ringing():
"""Test our ringing estimation function."""
# Actual values might differ based on system, so let's be approximate
for kind in ('ba', 'sos'):
for thresh, lims in ((0.1, (30, 60)), # 47
(0.01, (300, 600)), # 475
(0.001, (3000, 6000)), # 4758
(0.0001, (30000, 60000))): # 37993
n_ring = estimate_ringing_samples(butter(3, thresh, output=kind))
assert lims[0] <= n_ring <= lims[1], (
'%s %s: %s <= %s <= %s'
% (kind, thresh, lims[0], n_ring, lims[1]))
with pytest.warns(RuntimeWarning, match='properly estimate'):
assert estimate_ringing_samples(butter(4, 0.00001)) == 100000
def test_1d_filter():
"""Test our private overlap-add filtering function."""
# make some random signals and filters
rng = np.random.RandomState(0)
for n_signal in (1, 2, 3, 5, 10, 20, 40):
x = rng.randn(n_signal)
for n_filter in (1, 2, 3, 5, 10, 11, 20, 21, 40, 41, 100, 101):
for filter_type in ('identity', 'random'):
if filter_type == 'random':
h = rng.randn(n_filter)
else: # filter_type == 'identity'
h = np.concatenate([[1.], np.zeros(n_filter - 1)])
# ensure we pad the signal the same way for both filters
n_pad = n_filter - 1
x_pad = _smart_pad(x, (n_pad, n_pad))
for phase in ('zero', 'linear', 'zero-double'):
# compute our expected result the slow way
if phase == 'zero':
# only allow zero-phase for odd-length filters
if n_filter % 2 == 0:
pytest.raises(RuntimeError, _overlap_add_filter,
x[np.newaxis], h, phase=phase)
continue
shift = (len(h) - 1) // 2
x_expected = np.convolve(x_pad, h)
x_expected = x_expected[shift:len(x_expected) - shift]
elif phase == 'zero-double':
shift = len(h) - 1
x_expected = np.convolve(x_pad, h)
x_expected = np.convolve(x_expected[::-1], h)[::-1]
x_expected = x_expected[shift:len(x_expected) - shift]
shift = 0
else:
shift = 0
x_expected = np.convolve(x_pad, h)
x_expected = x_expected[:len(x_expected) - len(h) + 1]
# remove padding
if n_pad > 0:
x_expected = x_expected[n_pad:len(x_expected) - n_pad]
assert len(x_expected) == len(x)
# make sure we actually set things up reasonably
if filter_type == 'identity':
out = x_pad.copy()
out = out[shift + n_pad:]
out = out[:len(x)]
out = np.concatenate((out, np.zeros(max(len(x) -
len(out), 0))))
assert len(out) == len(x)
assert_allclose(out, x_expected)
assert len(x_expected) == len(x)
# compute our version
for n_fft in (None, 32, 128, 129, 1023, 1024, 1025, 2048):
# need to use .copy() b/c signal gets modified inplace
x_copy = x[np.newaxis, :].copy()
min_fft = 2 * n_filter - 1
if phase == 'zero-double':
min_fft = 2 * min_fft - 1
if n_fft is not None and n_fft < min_fft:
pytest.raises(ValueError, _overlap_add_filter,
x_copy, h, n_fft, phase=phase)
else:
x_filtered = _overlap_add_filter(
x_copy, h, n_fft, phase=phase)[0]
assert_allclose(x_filtered, x_expected, atol=1e-13)
def test_iir_stability():
"""Test IIR filter stability check."""
sig = np.random.RandomState(0).rand(1000)
sfreq = 1000
# This will make an unstable filter, should throw RuntimeError
pytest.raises(RuntimeError, filter_data, sig, sfreq, 0.6, None,
method='iir', iir_params=dict(ftype='butter', order=8,
output='ba'))
# This one should work just fine
filter_data(sig, sfreq, 0.6, None, method='iir',
iir_params=dict(ftype='butter', order=8, output='sos'))
# bad system type
pytest.raises(ValueError, filter_data, sig, sfreq, 0.6, None, method='iir',
iir_params=dict(ftype='butter', order=8, output='foo'))
# missing ftype
pytest.raises(RuntimeError, filter_data, sig, sfreq, 0.6, None,
method='iir', iir_params=dict(order=8, output='sos'))
# bad ftype
pytest.raises(RuntimeError, filter_data, sig, sfreq, 0.6, None,
method='iir',
iir_params=dict(order=8, ftype='foo', output='sos'))
# missing gstop
pytest.raises(RuntimeError, filter_data, sig, sfreq, 0.6, None,
method='iir', iir_params=dict(gpass=0.5, output='sos'))
# can't pass iir_params if method='fft'
pytest.raises(ValueError, filter_data, sig, sfreq, 0.1, None,
method='fft', iir_params=dict(ftype='butter', order=2,
output='sos'))
# method must be string
pytest.raises(TypeError, filter_data, sig, sfreq, 0.1, None,
method=1)
# unknown method
pytest.raises(ValueError, filter_data, sig, sfreq, 0.1, None,
method='blah')
# bad iir_params
pytest.raises(TypeError, filter_data, sig, sfreq, 0.1, None,
method='iir', iir_params='blah')
pytest.raises(ValueError, filter_data, sig, sfreq, 0.1, None,
method='fir', iir_params=dict())
# should pass because default trans_bandwidth is not relevant
iir_params = dict(ftype='butter', order=2, output='sos')
x_sos = filter_data(sig, 250, 0.5, None, method='iir',
iir_params=iir_params)
iir_params_sos = construct_iir_filter(iir_params, f_pass=0.5, sfreq=250,
btype='highpass')
x_sos_2 = filter_data(sig, 250, 0.5, None, method='iir',
iir_params=iir_params_sos)
assert_allclose(x_sos[100:-100], x_sos_2[100:-100])
x_ba = filter_data(sig, 250, 0.5, None, method='iir',
iir_params=dict(ftype='butter', order=2, output='ba'))
# Note that this will fail for higher orders (e.g., 6) showing the
# hopefully decreased numerical error of SOS
assert_allclose(x_sos[100:-100], x_ba[100:-100])
line_freqs = tuple(range(60, 241, 60))
@pytest.mark.parametrize('method, filter_length, line_freq, tol', [
('spectrum_fit', 'auto', None, 2), # 'auto' same as None on 0.21
('spectrum_fit', None, None, 2),
('spectrum_fit', '10s', None, 2),
('spectrum_fit', 'auto', line_freqs, 1),
('fft', 'auto', line_freqs, 1),
('fft', 8192, line_freqs, 1),
])
def test_notch_filters(method, filter_length, line_freq, tol):
"""Test notch filters."""
# let's use an ugly, prime sfreq for fun
rng = np.random.RandomState(0)
sfreq = 487
sig_len_secs = 21
t = np.arange(0, int(round(sig_len_secs * sfreq))) / sfreq
# make a "signal"
a = rng.randn(int(sig_len_secs * sfreq))
orig_power = np.sqrt(np.mean(a ** 2))
# make line noise
a += np.sum([np.sin(2 * np.pi * f * t) for f in line_freqs], axis=0)
# only allow None line_freqs with 'spectrum_fit' mode
pytest.raises(ValueError, notch_filter, a, sfreq, None, 'fft')
pytest.raises(ValueError, notch_filter, a, sfreq, None, 'iir')
if method == 'spectrum_fit' and filter_length == 'auto':
ctx = pytest.deprecated_call(match='will change to 10.')
else:
ctx = nullcontext()
with catch_logging() as log_file:
with ctx:
b = notch_filter(a, sfreq, line_freq, filter_length,
method=method, verbose=True)
if line_freq is None:
out = [line.strip().split(':')[0]
for line in log_file.getvalue().split('\n')
if line.startswith(' ')]
assert len(out) == 4, 'Detected frequencies not logged properly'
out = np.array(out, float)
assert_array_almost_equal(out, line_freqs)
new_power = np.sqrt(sum_squared(b) / b.size)
assert_almost_equal(new_power, orig_power, tol)
def test_resample():
"""Test resampling."""
rng = np.random.RandomState(0)
x = rng.normal(0, 1, (10, 10, 10))
x_rs = resample(x, 1, 2, 10)
assert x.shape == (10, 10, 10)
assert x_rs.shape == (10, 10, 5)
x_2 = x.swapaxes(0, 1)
x_2_rs = resample(x_2, 1, 2, 10)
assert_array_equal(x_2_rs.swapaxes(0, 1), x_rs)
x_3 = x.swapaxes(0, 2)
x_3_rs = resample(x_3, 1, 2, 10, 0)
assert_array_equal(x_3_rs.swapaxes(0, 2), x_rs)
# make sure we cast to array if necessary
assert_array_equal(resample([0., 0.], 2, 1), [0., 0., 0., 0.])
def test_resample_scipy():
"""Test resampling against SciPy."""
n_jobs_test = (1, 'cuda')
for window in ('boxcar', 'hann'):
for N in (100, 101, 102, 103):
x = np.arange(N).astype(float)
err_msg = '%s: %s' % (N, window)
x_2_sp = sp_resample(x, 2 * N, window=window)
for n_jobs in n_jobs_test:
x_2 = resample(x, 2, 1, 0, window=window, n_jobs=n_jobs)
assert_allclose(x_2, x_2_sp, atol=1e-12, err_msg=err_msg)
new_len = int(round(len(x) * (1. / 2.)))
x_p5_sp = sp_resample(x, new_len, window=window)
for n_jobs in n_jobs_test:
x_p5 = resample(x, 1, 2, 0, window=window, n_jobs=n_jobs)
assert_allclose(x_p5, x_p5_sp, atol=1e-12, err_msg=err_msg)
@pytest.mark.parametrize('n_jobs', (2, 'cuda'))
def test_n_jobs(n_jobs):
"""Test resampling against SciPy."""
x = np.random.RandomState(0).randn(4, 100)
y1 = resample(x, 2, 1, n_jobs=1)
y2 = resample(x, 2, 1, n_jobs=n_jobs)
assert_allclose(y1, y2)
y1 = filter_data(x, 100., 0, 40, n_jobs=1)
y2 = filter_data(x, 100., 0, 40, n_jobs=n_jobs)
assert_allclose(y1, y2)
def test_resamp_stim_channel():
"""Test resampling of stim channels."""
# Downsampling
assert_array_equal(
_resample_stim_channels([1, 0, 0, 0, 2, 0, 0, 0], 1, 2),
[[1, 0, 2, 0]])
assert_array_equal(
_resample_stim_channels([1, 0, 0, 0, 2, 0, 0, 0], 1, 1.5),
[[1, 0, 0, 2, 0]])
assert_array_equal(
_resample_stim_channels([1, 0, 0, 1, 2, 0, 0, 1], 1, 2),
[[1, 1, 2, 1]])
# Upsampling
assert_array_equal(
_resample_stim_channels([1, 2, 3], 2, 1), [[1, 1, 2, 2, 3, 3]])
assert_array_equal(
_resample_stim_channels([1, 2, 3], 2.5, 1), [[1, 1, 1, 2, 2, 3, 3, 3]])
# Proper number of samples in stim channel resampling from io/base.py
data_chunk = np.zeros((1, 315600))
for new_data_len in (52598, 52599, 52600, 52601, 315599, 315600):
new_data = _resample_stim_channels(data_chunk, new_data_len,
data_chunk.shape[1])
assert new_data.shape[1] == new_data_len
def test_resample_raw():
"""Test resampling using RawArray."""
x = np.zeros((1, 1001))
sfreq = 2048.
raw = RawArray(x, create_info(1, sfreq, 'eeg'))
raw.resample(128, npad=10)
data = raw.get_data()
assert data.shape == (1, 63)
@pytest.mark.slowtest
def test_filters():
"""Test low-, band-, high-pass, and band-stop filters plus resampling."""
rng = np.random.RandomState(0)
sfreq = 100
sig_len_secs = 15
a = rng.randn(2, sig_len_secs * sfreq)
# let's test our catchers
for fl in ['blah', [0, 1], 1000.5, '10ss', '10']:
pytest.raises((ValueError, TypeError),
filter_data, a, sfreq, 4, 8, None, fl,
1.0, 1.0, fir_design='firwin')
for nj in ['blah', 0.5]:
pytest.raises(ValueError, filter_data, a, sfreq, 4, 8, None, 1000,
1.0, 1.0, n_jobs=nj, phase='zero', fir_design='firwin')
pytest.raises(ValueError, filter_data, a, sfreq, 4, 8, None, 100,
1., 1., fir_window='foo')
pytest.raises(ValueError, filter_data, a, sfreq, 4, 8, None, 10,
1., 1., fir_design='firwin') # too short
# > Nyq/2
pytest.raises(ValueError, filter_data, a, sfreq, 4, sfreq / 2., None,
100, 1.0, 1.0, fir_design='firwin')
pytest.raises(ValueError, filter_data, a, sfreq, -1, None, None,
100, 1.0, 1.0, fir_design='firwin')
# these should work
create_filter(None, sfreq, None, None)
create_filter(a, sfreq, None, None, fir_design='firwin')
create_filter(a, sfreq, None, None, method='iir')
# check our short-filter warning:
with pytest.warns(RuntimeWarning, match='attenuation'):
# Warning for low attenuation
filter_data(a, sfreq, 1, 8, filter_length=256, fir_design='firwin2')
with pytest.warns(RuntimeWarning, match='Increase filter_length'):
# Warning for too short a filter
filter_data(a, sfreq, 1, 8, filter_length='0.5s', fir_design='firwin2')
# try new default and old default
freqs = fftfreq(a.shape[-1], 1. / sfreq)
A = np.abs(fft(a))
kwargs = dict(fir_design='firwin')
for fl in ['auto', '10s', '5000ms', 1024, 1023]:
bp = filter_data(a, sfreq, 4, 8, None, fl, 1.0, 1.0, **kwargs)
bs = filter_data(a, sfreq, 8 + 1.0, 4 - 1.0, None, fl, 1.0, 1.0,
**kwargs)
lp = filter_data(a, sfreq, None, 8, None, fl, 10, 1.0, n_jobs=2,
**kwargs)
hp = filter_data(lp, sfreq, 4, None, None, fl, 1.0, 10, **kwargs)
assert_allclose(hp, bp, rtol=1e-3, atol=2e-3)
assert_allclose(bp + bs, a, rtol=1e-3, atol=1e-3)
# Sanity check ttenuation
mask = (freqs > 5.5) & (freqs < 6.5)
assert_allclose(np.mean(np.abs(fft(bp)[:, mask]) / A[:, mask]),
1., atol=0.02)
assert_allclose(np.mean(np.abs(fft(bs)[:, mask]) / A[:, mask]),
0., atol=0.2)
# now the minimum-phase versions
bp = filter_data(a, sfreq, 4, 8, None, fl, 1.0, 1.0,
phase='minimum', **kwargs)
bs = filter_data(a, sfreq, 8 + 1.0, 4 - 1.0, None, fl, 1.0, 1.0,
phase='minimum', **kwargs)
assert_allclose(np.mean(np.abs(fft(bp)[:, mask]) / A[:, mask]),
1., atol=0.11)
assert_allclose(np.mean(np.abs(fft(bs)[:, mask]) / A[:, mask]),
0., atol=0.3)
# and since these are low-passed, downsampling/upsampling should be close
n_resamp_ignore = 10
bp_up_dn = resample(resample(bp, 2, 1, n_jobs=2), 1, 2, n_jobs=2)
assert_array_almost_equal(bp[n_resamp_ignore:-n_resamp_ignore],
bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
# note that on systems without CUDA, this line serves as a test for a
# graceful fallback to n_jobs=1
bp_up_dn = resample(resample(bp, 2, 1, n_jobs='cuda'), 1, 2, n_jobs='cuda')
assert_array_almost_equal(bp[n_resamp_ignore:-n_resamp_ignore],
bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
# test to make sure our resamling matches scipy's
bp_up_dn = sp_resample(sp_resample(bp, 2 * bp.shape[-1], axis=-1,
window='boxcar'),
bp.shape[-1], window='boxcar', axis=-1)
assert_array_almost_equal(bp[n_resamp_ignore:-n_resamp_ignore],
bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
# make sure we don't alias
t = np.array(list(range(sfreq * sig_len_secs))) / float(sfreq)
# make sinusoid close to the Nyquist frequency
sig = np.sin(2 * np.pi * sfreq / 2.2 * t)
# signal should disappear with 2x downsampling
sig_gone = resample(sig, 1, 2)[n_resamp_ignore:-n_resamp_ignore]
assert_array_almost_equal(np.zeros_like(sig_gone), sig_gone, 2)
# let's construct some filters
iir_params = dict(ftype='cheby1', gpass=1, gstop=20, output='ba')
iir_params = construct_iir_filter(iir_params, 40, 80, 1000, 'low')
# this should be a third order filter
assert iir_params['a'].size - 1 == 3
assert iir_params['b'].size - 1 == 3
iir_params = dict(ftype='butter', order=4, output='ba')
iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low')
assert iir_params['a'].size - 1 == 4
assert iir_params['b'].size - 1 == 4
iir_params = dict(ftype='cheby1', gpass=1, gstop=20)
iir_params = construct_iir_filter(iir_params, 40, 80, 1000, 'low')
# this should be a third order filter, which requires 2 SOS ((2, 6))
assert iir_params['sos'].shape == (2, 6)
iir_params = dict(ftype='butter', order=4, output='sos')
iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low')
assert iir_params['sos'].shape == (2, 6)
# check that picks work for 3d array with one channel and picks=[0]
a = rng.randn(5 * sfreq, 5 * sfreq)
b = a[:, None, :]
a_filt = filter_data(a, sfreq, 4, 8, None, 400, 2.0, 2.0,
fir_design='firwin')
b_filt = filter_data(b, sfreq, 4, 8, [0], 400, 2.0, 2.0,
fir_design='firwin')
assert_array_equal(a_filt[:, None, :], b_filt)
# check for n-dimensional case
a = rng.randn(2, 2, 2, 2)
with pytest.warns(RuntimeWarning, match='longer'):
pytest.raises(ValueError, filter_data, a, sfreq, 4, 8,
np.array([0, 1]), 100, 1.0, 1.0)
# check corner case (#4693)
want_length = int(round(_length_factors['hamming'] * 1000. / 0.5))
want_length += (want_length % 2 == 0)
assert want_length == 6601
h = create_filter(
np.empty(10000), 1000., l_freq=None, h_freq=55.,
h_trans_bandwidth=0.5, method='fir', phase='zero-double',
fir_design='firwin', verbose=True)
assert len(h) == 6601
h = create_filter(
np.empty(10000), 1000., l_freq=None, h_freq=55.,
h_trans_bandwidth=0.5, method='fir', phase='zero',
fir_design='firwin', filter_length='7s', verbose=True)
assert len(h) == 7001
h = create_filter(
np.empty(10000), 1000., l_freq=None, h_freq=55.,
h_trans_bandwidth=0.5, method='fir', phase='zero-double',
fir_design='firwin', filter_length='7s', verbose=True)
assert len(h) == 8193 # next power of two
def test_filter_auto():
"""Test filter auto parameters."""
# test that our overlap-add filtering doesn't introduce strange
# artifacts (from mne_analyze mailing list 2015/06/25)
N = 300
sfreq = 100.
lp = 10.
sine_freq = 1.
x = np.ones(N)
t = np.arange(N) / sfreq
x += np.sin(2 * np.pi * sine_freq * t)
x_orig = x.copy()
for pad in ('reflect_limited', 'reflect', 'edge'):
for fir_design in ('firwin2', 'firwin'):
kwargs = dict(fir_design=fir_design, pad=pad)
x = x_orig.copy()
x_filt = filter_data(x, sfreq, None, lp, **kwargs)
assert_array_equal(x, x_orig)
n_edge = 10
assert_allclose(x[n_edge:-n_edge], x_filt[n_edge:-n_edge],
atol=1e-2)
assert_array_equal(x_filt, filter_data(x, sfreq, None, lp, None,
**kwargs))
assert_array_equal(x, x_orig)
assert_array_equal(x_filt, filter_data(x, sfreq, None, lp,
**kwargs))
assert_array_equal(x, x_orig)
assert_array_equal(x_filt, filter_data(x, sfreq, None, lp,
copy=False, **kwargs))
assert_array_equal(x, x_filt)
# degenerate conditions
pytest.raises(ValueError, filter_data, x, -sfreq, 1, 10)
pytest.raises(ValueError, filter_data, x, sfreq, 1, sfreq * 0.75)
with pytest.raises(ValueError, match='Data to be filtered must be real'):
filter_data(x.astype(np.float32), sfreq, None, 10)
with pytest.raises(ValueError, match='Data to be filtered must be real'):
filter_data(1j, 1000., None, 40.)
def test_cuda_fir():
"""Test CUDA-based filtering."""
# Using `n_jobs='cuda'` on a non-CUDA system should be fine,
# as it should fall back to using n_jobs=1.
rng = np.random.RandomState(0)
sfreq = 500
sig_len_secs = 20
a = rng.randn(sig_len_secs * sfreq)
kwargs = dict(fir_design='firwin')
with catch_logging() as log_file:
for fl in ['auto', '10s', 2048]:
args = [a, sfreq, 4, 8, None, fl, 1.0, 1.0]
bp = filter_data(*args, **kwargs)
bp_c = filter_data(*args, n_jobs='cuda', verbose='info', **kwargs)
assert_array_almost_equal(bp, bp_c, 12)
args = [a, sfreq, 8 + 1.0, 4 - 1.0, None, fl, 1.0, 1.0]
bs = filter_data(*args, **kwargs)
bs_c = filter_data(*args, n_jobs='cuda', verbose='info', **kwargs)
assert_array_almost_equal(bs, bs_c, 12)
args = [a, sfreq, None, 8, None, fl, 1.0]
lp = filter_data(*args, **kwargs)
lp_c = filter_data(*args, n_jobs='cuda', verbose='info', **kwargs)
assert_array_almost_equal(lp, lp_c, 12)
args = [lp, sfreq, 4, None, None, fl, 1.0]
hp = filter_data(*args, **kwargs)
hp_c = filter_data(*args, n_jobs='cuda', verbose='info', **kwargs)
assert_array_almost_equal(hp, hp_c, 12)
# check to make sure we actually used CUDA
out = log_file.getvalue().split('\n')[:-1]
# triage based on whether or not we actually expected to use CUDA
from mne.cuda import _cuda_capable # allow above funs to set it
tot = 12 if _cuda_capable else 0
assert sum(['Using CUDA for FFT FIR filtering' in o for o in out]) == tot
if not _cuda_capable:
pytest.skip('CUDA not enabled')
def test_cuda_resampling():
"""Test CUDA resampling."""
rng = np.random.RandomState(0)
for window in ('boxcar', 'triang'):
for N in (997, 1000): # one prime, one even
a = rng.randn(2, N)
for fro, to in ((1, 2), (2, 1), (1, 3), (3, 1)):
a1 = resample(a, fro, to, n_jobs=1, npad='auto',
window=window)
a2 = resample(a, fro, to, n_jobs='cuda', npad='auto',
window=window)
assert_allclose(a1, a2, rtol=1e-7, atol=1e-14)
assert_array_almost_equal(a1, a2, 14)
assert_array_equal(resample(np.zeros(2), 2, 1, n_jobs='cuda'), np.zeros(4))
def test_detrend():
"""Test zeroth and first order detrending."""
x = np.arange(10)
assert_array_almost_equal(detrend(x, 1), np.zeros_like(x))
x = np.ones(10)
assert_array_almost_equal(detrend(x, 0), np.zeros_like(x))
@pytest.mark.parametrize('output', ('ba', 'sos'))
@pytest.mark.parametrize('ftype', ('butter', 'bessel', 'ellip'))
@pytest.mark.parametrize('btype', ('lowpass', 'bandpass'))
@pytest.mark.parametrize('order', (1, 4))
def test_reporting_iir(ftype, btype, order, output):
"""Test IIR filter reporting."""
fs = 1000.
l_freq = 1. if btype == 'bandpass' else None
iir_params = dict(ftype=ftype, order=order, output=output)
rs = 20 if order == 1 else 80
if ftype == 'ellip':
iir_params['rp'] = 3 # dB
iir_params['rs'] = rs # attenuation
pass_tol = np.log10(iir_params['rp']) + 0.01
else:
pass_tol = 0.2
with catch_logging() as log:
x = create_filter(None, fs, l_freq, 40., method='iir',
iir_params=iir_params, verbose=True)
order_eff = order * (1 + (btype == 'bandpass'))
if output == 'ba':
assert len(x['b']) == order_eff + 1
log = log.getvalue()
keys = [
'IIR',
'zero-phase',
'two-pass forward and reverse',
'non-causal',
btype,
ftype,
'Filter order %d' % (order_eff * 2,),
'Cutoff ' if btype == 'lowpass' else 'Cutoffs ',
]
dB_decade = -27.74
if ftype == 'ellip':
dB_cutoff = -6.0
elif order == 1 or ftype == 'butter':
dB_cutoff = -6.02
else:
assert ftype == 'bessel'
assert order == 4
dB_cutoff = -15.16
if btype == 'lowpass':
keys += ['%0.2f dB' % (dB_cutoff,)]
for key in keys:
assert key.lower() in log.lower()
# Verify some of the filter properties
if output == 'ba':
w, h = freqz(x['b'], x['a'], worN=10000)
else:
w, h = sosfreqz(x['sos'], worN=10000)
w *= fs / (2 * np.pi)
h = np.abs(h)
# passband
passes = [np.argmin(np.abs(w - 20))]
# stopband
decades = [np.argmin(np.abs(w - 400.))] # one decade
# transition
edges = [np.argmin(np.abs(w - 40.))]
# put these where they belong based on filter type
assert w[0] == 0.
idx_0p1 = np.argmin(np.abs(w - 0.1))
idx_1 = np.argmin(np.abs(w - 1.))
if btype == 'bandpass':
edges += [idx_1]
decades += [idx_0p1]
else:
passes += [idx_0p1, idx_1]
edge_val = 10 ** (dB_cutoff / 40.)
assert_allclose(h[edges], edge_val, atol=0.01)
assert_allclose(h[passes], 1., atol=pass_tol)
if ftype == 'butter' and btype == 'lowpass':
attenuation = dB_decade * order
assert_allclose(h[decades], 10 ** (attenuation / 20.), rtol=0.01)
elif ftype == 'ellip':
assert_array_less(h[decades], 10 ** (-rs / 20))
@pytest.mark.parametrize('phase', ('zero', 'zero-double', 'minimum'))
@pytest.mark.parametrize('fir_window', ('hamming', 'blackman'))
@pytest.mark.parametrize('btype', ('lowpass', 'bandpass'))
def test_reporting_fir(phase, fir_window, btype):
"""Test FIR filter reporting."""
l_freq = 1. if btype == 'bandpass' else None
fs = 1000.
with catch_logging() as log:
x = create_filter(None, fs, l_freq, 40, method='fir',
phase=phase, fir_window=fir_window, verbose=True)
n_taps = len(x)
log = log.getvalue()
keys = ['FIR',
btype,
fir_window.capitalize(),
'Filter length: %d samples' % (n_taps,),
'passband ripple',
'stopband attenuation',
]
if phase == 'minimum':
keys += [' causal ']
else:
keys += [' non-causal ', ' dB cutoff frequency: 45.00 Hz']
if btype == 'bandpass':
keys += [' dB cutoff frequency: 0.50 Hz']
for key in keys:
assert key in log
if phase == 'zero':
assert '-6 dB cutoff' in log
elif phase == 'zero-double':
assert '-12 dB cutoff' in log
else:
# XXX Eventually we should figure out where the resulting point is,
# since the minimum-phase process will change it. For now we don't
# report it.
assert phase == 'minimum'
# Verify some of the filter properties
if phase == 'zero-double':
x = np.convolve(x, x) # effectively what happens
w, h = freqz(x, worN=10000)
w *= fs / (2 * np.pi)
h = np.abs(h)
# passband
passes = [np.argmin(np.abs(w - f)) for f in (1, 20, 40)]
# stopband
stops = [np.argmin(np.abs(w - 50.))]
# transition
mids = [np.argmin(np.abs(w - 45.))]
# put these where they belong based on filter type
assert w[0] == 0.
idx_0 = 0
idx_0p5 = np.argmin(np.abs(w - 0.5))
if btype == 'bandpass':
stops += [idx_0]
mids += [idx_0p5]
else:
passes += [idx_0, idx_0p5]
assert_allclose(h[passes], 1., atol=0.01)
attenuation = -20 if phase == 'minimum' else -50
assert_allclose(h[stops], 0., atol=10 ** (attenuation / 20.))
if phase != 'minimum': # haven't worked out the math for this yet
expected = 0.25 if phase == 'zero-double' else 0.5
assert_allclose(h[mids], expected, atol=0.01)
def test_filter_picks():
"""Test filter picking."""
data = np.random.RandomState(0).randn(3, 1000)
fs = 1000.
kwargs = dict(l_freq=None, h_freq=40.)
filt = filter_data(data, fs, **kwargs)
# don't include seeg or stim in this list because they are in the one below
# to ensure default cases are treated properly
for kind in ('eeg', 'grad', 'emg', 'misc'):
for picks in (None, [-2], kind, 'k'):
# With always at least one data channel
info = create_info(['s', 'k', 't'], fs, ['seeg', kind, 'stim'])
raw = RawArray(data.copy(), info)
raw.filter(picks=picks, **kwargs)
if picks is None:
if kind in _DATA_CH_TYPES_SPLIT: # should be included
want = np.concatenate((filt[:2], data[2:]))
else: # shouldn't
want = np.concatenate((filt[:1], data[1:]))
else: # just the kind of interest ([-2], kind, 'j' should be eq.)
want = np.concatenate((data[:1], filt[1:2], data[2:]))
assert_allclose(raw.get_data(), want)
# Now with sometimes no data channels
info = create_info(['k', 't'], fs, [kind, 'stim'])
raw = RawArray(data[1:].copy(), info.copy())
if picks is None and kind not in _DATA_CH_TYPES_SPLIT:
with pytest.raises(ValueError, match='yielded no channels'):
raw.filter(picks=picks, **kwargs)
else:
raw.filter(picks=picks, **kwargs)
want = want[1:]
assert_allclose(raw.get_data(), want)
run_tests_if_main()
|
|
import os
import subprocess
from StringIO import StringIO
import pycurl
from bs4 import BeautifulSoup
from RequestHandler import RequestHandler
class MotifAuthorizationManager(object):
URL_AUTH_STEP1 = "https://auth.motifinvesting.com/authenticate"
URL_AUTH_STEP2 = "https://trader.motifinvesting.com/two-factor-auth"
URL_AUTH_STEP3 = "https://trader.motifinvesting.com/two-factor-auth/send"
URL_AUTH_STEP4 = "https://trader.motifinvesting.com/two-factor-auth/confirm"
URL_SETTINGS = "https://trader.motifinvesting.com/account/settings"
def __init__(self, username=None, password=None, phone=None, cookieJar="cookies.txt"):
if username != None:
if isinstance(username, str):
self.username = username
else:
raise ValueError("MotifAuthorizationManager.__init__: Username must be a string.")
else:
self.username = ""
if password != None:
if isinstance(password, str):
self.password = password
else:
raise ValueError("MotifAuthorizationManager.__init__: Password must be a string.")
else:
self.password = ""
if phone != None:
if isinstance(phone, str):
self.phone = phone
else:
raise ValueError("MotifAuthorizationManager.__init__: Phone must be a string.")
else:
self.phone = ""
if cookieJar != "cookies.txt":
if isinstance(cookieJar, str):
self.__ensurePathExists(cookieJar)
self.cookieJar = cookieJar
else:
raise ValueError("MotifAuthorizationManager.__init__: cookieJar must be a string.")
else:
self.cookieJar = cookieJar
self.rh = RequestHandler(self.cookieJar)
def setUsername(self, username=None):
if username != None:
if isinstance(username, str):
self.username = username
else:
raise ValueError("MotifAuthorizationManager.setUsername: Username must be a string.")
else:
raise ValueError("MotifAuthorizationManager.setUsername: Username not optional.")
def getUsername(self):
return self.username
def setPassword(self, password=None):
if password != None:
if isinstance(password, str):
self.password = password
else:
raise ValueError("MotifAuthorizationManager.setPassword: Password must be a string.")
else:
raise ValueError("MotifAuthorizationManager.setPassword: Password not optional.")
def setPhone(self, phone=None):
if phone != None:
if isinstance(phone, str):
self.phone = phone
else:
raise ValueError("MotifAuthorizationManager.setPhone: Phone must be a string.")
else:
raise ValueError("MotifAuthorizationManager.setPhone: Phone not optional.")
def getPhone(self):
return self.phone
def setCookieJar(self, cookieJar=None):
if cookieJar != None:
if isinstance(cookieJar, str):
self.__ensurePathExists(cookieJar)
self.cookieJar = cookieJar
self.rh.setCookieJar(self.cookieJar)
return True
else:
raise ValueError("MotifAuthorizationManager.setCookieJar: cookieJar must be a string.")
else:
raise ValueError("MotifAuthorizationManager.setCookieJar: cookieJar not optional.")
def getCookieJar(self):
return self.cookieJar
def __ensurePathExists(self, path):
truePath = os.path.dirname(path)
if not os.path.exists(truePath):
os.makedirs(truePath)
def __getNonceFromSoup(self, soup):
scriptResults = soup('script',{'type' : 'text/javascript'})
for line in scriptResults:
for result in line.stripped_strings:
result = result.split('\n')
for tag in result:
if "nonce" in tag:
return tag.split("\"")[1].strip()
def authorizeUser(self):
########### STEP 1 ############
try:
curlOut = subprocess.check_output(["curl",
"-c", self.cookieJar,
"-d", "email=" + self.username + "&password=" + self.password,
self.URL_AUTH_STEP1])
except Exception, e:
raise ValueError("MotifAuthorizationManager.authorizeUser: Step 1 Failed With \"" + str(e) + "\"" )
########### STEP 2 ###########
nonce = None
try:
curlOut = subprocess.check_output(["curl",
"-b", self.cookieJar,
self.URL_AUTH_STEP2])
soup = BeautifulSoup(curlOut, "html.parser")
nonce = self.__getNonceFromSoup(soup)
except Exception, e:
raise ValueError("MotifAuthorizationManager.authorizeUser: Step 2 Failed With \"" + str(e) + "\"" )
if nonce == None:
raise ValueError("MotifAuthorizationManager.authorizeUser: Failed to get Nonce" )
############ STEP 3 ############
try:
curlOut = subprocess.check_output(["curl",
"-b", self.cookieJar,
"-H", "X-Motif-Page: TWO_FACTOR_AUTH",
"-H", "Origin: https://trader.motifinvesting.com",
"-H", "X-Requested-With: XMLHttpRequest",
"-H", "X-Motif-Nonce: " + nonce + "",
"-H", "X-FirePHP-Version: 0.0.6",
"-A", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36",
"-e", "https://trader.motifinvesting.com/two-factor-auth",
"--data-raw", "phoneNumber=" + self.phone + "&authType=text&Nonce=" + nonce + "&Page=TWO_FACTOR_AUTH",
self.URL_AUTH_STEP3])
except Exception, e:
raise ValueError("MotifAuthorizationManager.authorizeUser: Step 3 Failed With \"" + str(e) + "\"" )
pin = str(input("Enter Authorization Pin: "))
try:
curlOut = subprocess.check_output(["curl",
"-b", self.cookieJar, "-c", self.cookieJar,
"-H", "X-Motif-Page: TWO_FACTOR_AUTH",
"-H", "Origin: https://trader.motifinvesting.com",
"-H", "X-Requested-With: XMLHttpRequest",
"-H", "X-Motif-Nonce: " + nonce + "",
"-H", "X-FirePHP-Version: 0.0.6",
"-A", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36",
"-e", "https://trader.motifinvesting.com/two-factor-auth",
"--data-raw", "confirmCode=" + pin + "&Nonce=" + nonce + "&Page=TWO_FACTOR_AUTH",
self.URL_AUTH_STEP4])
except Exception, e:
raise ValueError("MotifAuthorizationManager.authorizeUser: Step 3 Failed With \"" + str(e) + "\"" )
def isUserAuthorized(self):
statusCode = self.rh.gethtml(self.URL_SETTINGS)[0]
if statusCode >= 200 and statusCode < 300:
return True
else:
return False
|
|
"""Tests for cam module."""
import socket
from collections import OrderedDict
from unittest.mock import MagicMock, patch
import pytest
from leicacam.cam import CAM, bytes_as_dict, tuples_as_bytes, tuples_as_dict
# pylint: disable=redefined-outer-name, unnecessary-pass,
def flush():
"""Flush the socket."""
pass
@pytest.fixture
def mock_socket():
"""Return a mock echo socket."""
echo_socket = EchoSocket()
echo_socket.close = MagicMock()
return echo_socket
@pytest.fixture
def cam(mock_socket):
"""Yield a CAM instance with a mock socket."""
with patch("socket.socket") as mock_socket_class:
mock_socket_class.return_value = mock_socket
mock_cam = CAM()
mock_cam.flush = flush
yield mock_cam
class EchoSocket:
"""Dummy echo socket for mocking."""
msg = ""
def send(self, msg):
"""Send a message."""
self.msg = msg
return len(msg)
def recv(self, buffer_size):
"""Receive a message."""
return self.msg[0:buffer_size]
def connect(self, where):
"""Connect to the socket."""
pass
def settimeout(self, timeout):
"""Set a timeout."""
pass
def close(self):
"""Close the socket."""
pass
# TEST
# key (here cli) overridden if defined several times
# prefix added
# types (integer, float) should be converted to strings
def test_echo(cam):
"""Prefix + command sent should be same as echoed socket message."""
cmd = [
("cli", "custom"),
("cmd", "enableall"),
("value", "true"),
("integer", 1234),
("float", 0.00234),
]
cam.send(cmd)
response = cam.receive()[0]
sent = tuples_as_dict(cam.prefix + cmd)
assert sent == response
def test_send_bytes(cam):
"""Test send a bytes string."""
cmd = b"/cmd:enableall /value:true"
cam.send(cmd)
response = cam.receive()[0]
sent = bytes_as_dict(cam.prefix_bytes + cmd)
assert sent == response
def test_flush():
"""Test flush method."""
cmd = b"/cmd:startscan\n"
mock_recv = MagicMock()
mock_recv.side_effect = [cmd, socket.error()]
with patch("socket.socket") as mock_socket_class:
mock_socket = MagicMock()
mock_socket_class.return_value = mock_socket
cam = CAM()
cam.socket.recv = mock_recv
cam.flush()
assert len(mock_recv.mock_calls) == 2
_, args, _ = mock_recv.mock_calls[0]
assert args == (1024,)
def test_receive_error(cam):
"""Test receive method when a socket error happens."""
cam.socket.recv = MagicMock()
cam.socket.recv.side_effect = socket.error()
response = cam.receive()
assert isinstance(response, list)
assert not response
def test_commands(cam):
"""Short hand commands should work as intended."""
# get_information
cmd = cam.prefix + [("cmd", "getinfo"), ("dev", "stage")]
information = cam.get_information()
should_be = tuples_as_dict(cmd)
assert information == should_be
# start_scan
cmd = cam.prefix + [("cmd", "startscan")]
response = cam.start_scan()
should_be = tuples_as_dict(cmd)
assert response == should_be
# stop_scan
cmd = cam.prefix + [("cmd", "stopscan")]
response = cam.stop_scan()
should_be = tuples_as_dict(cmd)
assert response == should_be
# autofocus_scan
cmd = cam.prefix + [("cmd", "autofocusscan")]
response = cam.autofocus_scan()
should_be = tuples_as_dict(cmd)
assert response == should_be
# pause_scan
cmd = cam.prefix + [("cmd", "pausescan")]
response = cam.pause_scan()
should_be = tuples_as_dict(cmd)
assert response == should_be
# enable
cmd = [
("cmd", "enable"),
("slide", str(0)),
("wellx", str(1)),
("welly", str(1)),
("fieldx", str(1)),
("fieldy", str(1)),
("value", "true"),
]
cmd = cam.prefix + cmd
response = cam.enable()
should_be = tuples_as_dict(cmd)
assert response == should_be
# disable
cmd = [
("cmd", "enable"),
("slide", str(0)),
("wellx", str(1)),
("welly", str(1)),
("fieldx", str(1)),
("fieldy", str(1)),
("value", "false"),
]
cmd = cam.prefix + cmd
response = cam.disable()
should_be = tuples_as_dict(cmd)
assert response == should_be
# enable_all
cmd = [("cmd", "enableall"), ("value", "true")]
cmd = cam.prefix + cmd
response = cam.enable_all()
should_be = tuples_as_dict(cmd)
assert response == should_be
# disable_all
cmd = [("cmd", "enableall"), ("value", "false")]
cmd = cam.prefix + cmd
response = cam.disable_all()
should_be = tuples_as_dict(cmd)
assert response == should_be
# save_template
cmd = [
("sys", "0"),
("cmd", "save"),
("fil", "{ScanningTemplate}leicacam.xml"),
]
cmd = cam.prefix + cmd
response = cam.save_template()
should_be = tuples_as_dict(cmd)
assert response == should_be
def test_load(cam):
"""load_template should strip path and .xml from filename."""
response = cam.load_template("test")
assert response["fil"] == "{ScanningTemplate}test"
response = cam.load_template("test.xml")
assert response["fil"] == "{ScanningTemplate}test"
response = cam.load_template("/path/to/{ScanningTemplate}test.xml")
assert response["fil"] == "{ScanningTemplate}test"
def test_wait_for_timeout(cam):
"""Test wait_for when timeout expires."""
cmd = "cmd"
value = "stopscan"
response = cam.wait_for(cmd, value, 0)
assert response == OrderedDict()
def test_wait_for_long_timeout(cam, mock_socket):
"""Test wait_for when timeout expires."""
cmd = "cmd"
value = "stopscan"
timeout = 1
mock_socket.recv = MagicMock()
mock_socket.recv.return_value = b""
time_patch = patch("leicacam.cam.time", side_effect=[0, 0, 120])
sleep_patch = patch("leicacam.cam.sleep")
with sleep_patch, time_patch:
response = cam.wait_for(cmd, value, timeout)
assert response == OrderedDict()
def test_wait_for_any_value(cam):
"""Test wait_for a command and any value."""
cmd = [("cmd", "startscan")]
cam.send(cmd)
response = cam.wait_for("cmd", None)
cmd = cam.prefix + cmd
should_be = tuples_as_dict(cmd)
assert response == should_be
def test_close(cam, mock_socket):
"""Test closing the socket."""
cam.close()
assert mock_socket.close.call_count == 1
def test_receive_colon_string(cam):
"""Test bytes_as_dict function receiving a string with colon."""
cmd = [("relpath", "C:\\image.ome.tif")]
cam.socket.recv = MagicMock()
cam.socket.recv.return_value = tuples_as_bytes(cmd)
response = cam.receive()
assert isinstance(response, list)
for msg in response:
assert msg == OrderedDict(cmd)
def test_receive_bad_string(cam):
"""Test bytes_as_dict function receiving an incomplete command."""
cmd = [("cmd", "enableall")]
cmd_string = "/cmd:enableall /value"
cam.socket.recv = MagicMock()
cam.socket.recv.return_value = cmd_string.encode()
response = cam.receive()
assert isinstance(response, list)
for msg in response:
assert msg == OrderedDict(cmd)
def test_receive_terminate_null_byte(cam):
"""Test _parse_receive function parsing a message with null byte."""
start_cmd = [("cmd", "startscan")]
stop_cmd = [("cmd", "stopscan")]
all_cmds = [OrderedDict(start_cmd), OrderedDict(stop_cmd)]
cmd_byte = b"/cmd:startscan\x00/cmd:stopscan\r\n"
cam.socket.recv = MagicMock()
cam.socket.recv.return_value = cmd_byte
response = cam.receive()
assert isinstance(response, list)
assert response == all_cmds
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Heuristic guessing of statistical types based on data.
The heuristics implemented here are ad-hoc, and do not implement any
sort of Bayesian model selection. They are based on crude attempts to
parse data as numbers, and on fixed parameters for distinguishing
nominal and numerical data. No columns are ever guessed to be
cyclic.
"""
import collections
import math
import bayeslite.core as core
from bayeslite.sqlite3_util import sqlite3_quote_name
from bayeslite.util import casefold
from bayeslite.util import unique
def bayesdb_guess_population(bdb, population, table,
ifnotexists=None, **kwargs):
"""Heuristically guess a population schema for `table`.
Based on the data in `table`, create a population named
`population`.
:param bool ifnotexists: if true or ``None`` and `population`
already exists, do nothing.
:param dict kwargs: options to pass through to bayesdb_guess_stattypes.
In addition to statistical types, the overrides may specify
``key`` or ``ignore``, in which case those columns will not be
modeled at all.
"""
# Fill in default arguments.
if ifnotexists is None:
ifnotexists = False
with bdb.savepoint():
if core.bayesdb_has_population(bdb, population):
if ifnotexists:
return
else:
raise ValueError('Population exists: %r' % (population,))
qt = sqlite3_quote_name(table)
cursor = bdb.sql_execute('SELECT * FROM %s' % (qt,))
column_names = [d[0] for d in cursor.description]
rows = cursor.fetchall()
stattypes = [st[0] for st in
bayesdb_guess_stattypes(column_names, rows, **kwargs)]
# Convert the `key` column to an `ignore`.
replace = lambda s: 'ignore' if s == 'key' else s
column_names, stattypes = unzip([
(cn, replace(st)) for cn, st in zip(
column_names, stattypes)
])
if len([s for s in stattypes if s != 'ignore']) == 0:
raise ValueError(
'Table has no modeled columns: %s'
% (repr(table),))
qp = sqlite3_quote_name(population)
qcns = map(sqlite3_quote_name, column_names)
qsts = map(sqlite3_quote_name, stattypes)
qs = ';'.join(qcn + ' ' + qst for qcn, qst in zip(qcns, qsts))
bdb.execute('CREATE POPULATION %s FOR %s(%s)' % (qp, qt, qs))
def unzip(l): # ???
xs = []
ys = []
for x, y in l:
xs.append(x)
ys.append(y)
return xs, ys
def bayesdb_guess_stattypes(column_names, rows, null_values=None,
numcat_count=None, numcat_ratio=None, distinct_ratio=None,
nullify_ratio=None, overrides=None):
"""Heuristically guess statistical types for the data in `rows`.
Return a list of (statistical type, reason) corresponding to the columns
named in the list `column_names`.
:param set null_values: values to nullify.
:param int numcat_count: number of distinct values below which
columns whose values can all be parsed as numbers will be
considered nominal anyway
:param real numcat_ratio: ratio of distinct values to total values
below which columns whose values can all be parsed as numbers
will be considered nominal anyway
:param real distinct_ratio: ratio of distinct values to total values
above which a column will be ignored as a pseudo-key
(only if count > numcat_count).
:param real nullify_ratio: ratio of count of the most numerous value to
total number of values above which the most numerous value should be
nullified (set to 1 to turn off).
:param list overrides: list of ``(name, stattype)``, overriding
any guessed statistical type for columns by those names
In addition to statistical types, the overrides may specify
``key`` or ``ignore``.
"""
# Fill in default arguments.
if null_values is None:
null_values = set(("", "N/A", "none", "None"))
if numcat_count is None:
numcat_count = 20
if numcat_ratio is None:
numcat_ratio = 0.02
if distinct_ratio is None:
distinct_ratio = 0.9
if nullify_ratio is None:
nullify_ratio = 0.9
if overrides is None:
overrides = []
# Build a set of the column names.
column_name_set = set()
duplicates = set()
for name in column_names:
if casefold(name) in column_name_set:
duplicates.add(name)
column_name_set.add(casefold(name))
if 0 < len(duplicates):
raise ValueError(
'Duplicate column names: %s'
% (repr(list(duplicates),)))
# Build a map for the overrides.
#
# XXX Support more than just stattype: allow arbitrary column
# descriptions.
override_map = {}
unknown = set()
duplicates = set()
for name, stattype in overrides:
if casefold(name) not in column_name_set:
unknown.add(name)
continue
if casefold(name) in override_map:
duplicates.add(name)
continue
override_map[casefold(name)] = casefold(stattype)
if 0 < len(unknown):
raise ValueError(
'Unknown columns overridden: %s'
% (repr(list(unknown)),))
if 0 < len(duplicates):
raise ValueError(
'Duplicate columns overridden: %s'
% (repr(list(duplicates)),))
# Sanity-check the inputs.
ncols = len(column_names)
assert ncols == len(unique(map(casefold, column_names)))
for ri, row in enumerate(rows):
if len(row) < ncols:
raise ValueError(
'Row %d: Too few columns: %d < %d'
% (ri, len(row), ncols))
if len(row) > ncols:
raise ValueError(
'Row %d: Too many columns: %d > %d'
% (ri, len(row), ncols))
# Find a key first, if it has been specified as an override.
key = None
duplicate_keys = set()
for ci, column_name in enumerate(column_names):
if casefold(column_name) in override_map:
if override_map[casefold(column_name)] == 'key':
if key is not None:
duplicate_keys.add(column_name)
continue
column = [row[ci] for row in rows]
ints = integerify(column)
if ints:
column = ints
if not keyable_p(column):
raise ValueError(
'Column non-unique but specified as key: %s'
% (repr(column_name),))
key = column_name
if 0 < len(duplicate_keys):
raise ValueError(
'Multiple columns overridden as keys: %s'
% (repr(list(duplicate_keys)),))
# Now go through and guess the other column stattypes or use the override.
stattypes = []
for ci, column_name in enumerate(column_names):
if casefold(column_name) in override_map:
stattype = override_map[casefold(column_name)]
reason = 'User override.'
else:
column = nullify(null_values, rows, ci)
[stattype, reason] = guess_column_stattype(
column,
distinct_ratio=distinct_ratio,
nullify_ratio=nullify_ratio,
numcat_count=numcat_count,
numcat_ratio=numcat_ratio,
have_key=(key is not None)
)
if stattype == 'key':
key = column_name
stattypes.append([stattype, reason])
return stattypes
def guess_column_stattype(column, reason='', **kwargs):
counts = count_values(column)
if None in counts:
del counts[None]
if len(counts) < 2:
return [
'ignore',
'%s There is only one unique value.' % (reason,)
]
(most_numerous_key, most_numerous_count) = sorted(
counts.items(), key=lambda item: item[1], reverse=True)[0]
if most_numerous_count / float(len(column)) > kwargs['nullify_ratio']:
column = [None if v == most_numerous_key else v for v in column]
return guess_column_stattype(
column,
'%s More than %d percent of the values are the same, so the '
'statistical type was guessed based on the remainder of the '
'values.' % (reason, int(100 * kwargs['nullify_ratio']),),
**kwargs
)
numericable = True
ints = integerify(column)
if ints:
column = ints
else:
floats = floatify(column)
if floats:
column = floats
else:
numericable = False
if not kwargs['have_key'] and keyable_p(column):
return [
'key',
'%s This was the first column in the table with all distinct '
'integers or strings.' % (reason,)
]
elif numericable and \
numerical_p(column, kwargs['numcat_count'], kwargs['numcat_ratio']):
return [
'numerical',
'%s There are at least %d unique numerical values, '
'and they account for at least %d percent of all '
'values in the column.'
% (reason, kwargs['numcat_count'],
int(100 * kwargs['numcat_ratio']))
]
elif (len(counts) > kwargs['numcat_count'] and
len(counts) / float(len(column)) > kwargs['distinct_ratio']):
return [
'ignore',
'%s There are more than %d distinct values and they account '
'for more than %d percent of the values in the column, so the '
'column is ignored as a pseudo-key.'
% (reason, kwargs['numcat_count'],
int(100 * kwargs['distinct_ratio']))
]
else:
if numericable:
return [
'nominal',
'%s There are fewer than %d distinct numerical '
'values, or the ratio of distinct values to total values '
'is less than %d percent.'
% (reason, kwargs['numcat_count'],
int(100 * kwargs['numcat_ratio']),)
]
else:
return [
'nominal',
'%s The values are nonnumerical.' % (reason,)
]
def nullify(null_values, rows, ci):
return [row[ci] if row[ci] not in null_values else None for row in rows]
def integerify(column):
result = []
if float in [v.__class__ for v in column ]:
return None
try:
result = [int(v) for v in column]
except (ValueError, TypeError):
return None
return result
def floatify(column):
result = []
try:
result = [float(v) if v is not None else float('NaN') for v in column]
except (ValueError, TypeError):
return None
return result
def keyable_p(column):
# `unique' can't cope with NaNs, so reject them early.
if any(v is None or (isinstance(v, float) and math.isnan(v))
for v in column):
return False
try:
column_floats = [float(v) for v in column]
if not all(v.is_integer() for v in column_floats):
return False
return len(column) == len(unique(column))
except ValueError:
return len(column) == len(unique(column))
def numerical_p(column, count_cutoff, ratio_cutoff):
nu = len(unique([v for v in column if not math.isnan(v)]))
if nu <= count_cutoff:
return False
if float(nu) / float(len(column)) <= ratio_cutoff:
return False
return True
def count_values(column):
counts = collections.defaultdict(int)
for v in column:
counts[v] += 1
return counts
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for API tests."""
# NOTE: Ported from ceilometer/tests/api.py (subsequently moved to
# ceilometer/tests/api/__init__.py). This should be oslo'ified:
# https://bugs.launchpad.net/ironic/+bug/1255115.
# NOTE(deva): import auth_token so we can override a config option
from keystonemiddleware import auth_token # noqa
import mock
from oslo_config import cfg
import pecan
import pecan.testing
from six.moves.urllib import parse as urlparse
from magnum.api import hooks
from magnum.tests.unit.db import base
PATH_PREFIX = '/v1'
class FunctionalTest(base.DbTestCase):
"""Used for functional tests of Pecan controllers where you need to
test your literal application and its integration with the
framework.
"""
def setUp(self):
super(FunctionalTest, self).setUp()
cfg.CONF.set_override("auth_version", "v2.0",
group='keystone_authtoken')
cfg.CONF.set_override("admin_user", "admin",
group='keystone_authtoken')
self.app = self._make_app()
def reset_pecan():
pecan.set_config({}, overwrite=True)
self.addCleanup(reset_pecan)
p = mock.patch('magnum.api.controllers.v1.Controller._check_version')
self._check_version = p.start()
self.addCleanup(p.stop)
def _make_app(self, enable_acl=False):
# Determine where we are so we can set up paths in the config
root_dir = self.get_path()
self.config = {
'app': {
'root': 'magnum.api.controllers.root.RootController',
'modules': ['magnum.api'],
'static_root': '%s/public' % root_dir,
'template_path': '%s/api/templates' % root_dir,
'enable_acl': enable_acl,
'acl_public_routes': ['/', '/v1'],
'hooks': [
hooks.ContextHook(),
hooks.RPCHook(),
hooks.NoExceptionTracebackHook(),
],
},
}
return pecan.testing.load_test_app(self.config)
def _request_json(self, path, params, expect_errors=False, headers=None,
method="post", extra_environ=None, status=None,
path_prefix=PATH_PREFIX):
"""Sends simulated HTTP request to Pecan test app.
:param path: url path of target service
:param params: content for wsgi.input of request
:param expect_errors: Boolean value; whether an error is expected based
on request
:param headers: a dictionary of headers to send along with the request
:param method: Request method type. Appropriate method function call
should be used rather than passing attribute in.
:param extra_environ: a dictionary of environ variables to send along
with the request
:param status: expected status code of response
:param path_prefix: prefix of the url path
"""
full_path = path_prefix + path
print('%s: %s %s' % (method.upper(), full_path, params))
response = getattr(self.app, "%s_json" % method)(
str(full_path),
params=params,
headers=headers,
status=status,
extra_environ=extra_environ,
expect_errors=expect_errors
)
print('GOT:%s' % response)
return response
def put_json(self, path, params, expect_errors=False, headers=None,
extra_environ=None, status=None):
"""Sends simulated HTTP PUT request to Pecan test app.
:param path: url path of target service
:param params: content for wsgi.input of request
:param expect_errors: Boolean value; whether an error is expected based
on request
:param headers: a dictionary of headers to send along with the request
:param extra_environ: a dictionary of environ variables to send along
with the request
:param status: expected status code of response
"""
return self._request_json(path=path, params=params,
expect_errors=expect_errors,
headers=headers, extra_environ=extra_environ,
status=status, method="put")
def post_json(self, path, params, expect_errors=False, headers=None,
extra_environ=None, status=None):
"""Sends simulated HTTP POST request to Pecan test app.
:param path: url path of target service
:param params: content for wsgi.input of request
:param expect_errors: Boolean value; whether an error is expected based
on request
:param headers: a dictionary of headers to send along with the request
:param extra_environ: a dictionary of environ variables to send along
with the request
:param status: expected status code of response
"""
return self._request_json(path=path, params=params,
expect_errors=expect_errors,
headers=headers, extra_environ=extra_environ,
status=status, method="post")
def patch_json(self, path, params, expect_errors=False, headers=None,
extra_environ=None, status=None):
"""Sends simulated HTTP PATCH request to Pecan test app.
:param path: url path of target service
:param params: content for wsgi.input of request
:param expect_errors: Boolean value; whether an error is expected based
on request
:param headers: a dictionary of headers to send along with the request
:param extra_environ: a dictionary of environ variables to send along
with the request
:param status: expected status code of response
"""
return self._request_json(path=path, params=params,
expect_errors=expect_errors,
headers=headers, extra_environ=extra_environ,
status=status, method="patch")
def delete(self, path, expect_errors=False, headers=None,
extra_environ=None, status=None, path_prefix=PATH_PREFIX):
"""Sends simulated HTTP DELETE request to Pecan test app.
:param path: url path of target service
:param expect_errors: Boolean value; whether an error is expected based
on request
:param headers: a dictionary of headers to send along with the request
:param extra_environ: a dictionary of environ variables to send along
with the request
:param status: expected status code of response
:param path_prefix: prefix of the url path
"""
full_path = path_prefix + path
print('DELETE: %s' % (full_path))
response = self.app.delete(str(full_path),
headers=headers,
status=status,
extra_environ=extra_environ,
expect_errors=expect_errors)
print('GOT:%s' % response)
return response
def get_json(self, path, expect_errors=False, headers=None,
extra_environ=None, q=None, path_prefix=PATH_PREFIX,
**params):
"""Sends simulated HTTP GET request to Pecan test app.
:param path: url path of target service
:param expect_errors: Boolean value;whether an error is expected based
on request
:param headers: a dictionary of headers to send along with the request
:param extra_environ: a dictionary of environ variables to send along
with the request
:param q: list of queries consisting of: field, value, op, and type
keys
:param path_prefix: prefix of the url path
:param params: content for wsgi.input of request
"""
if q is None:
q = []
full_path = path_prefix + path
query_params = {'q.field': [],
'q.value': [],
'q.op': [],
}
for query in q:
for name in ['field', 'op', 'value']:
query_params['q.%s' % name].append(query.get(name, ''))
all_params = {}
all_params.update(params)
if q:
all_params.update(query_params)
print('GET: %s %r' % (full_path, all_params))
response = self.app.get(full_path,
params=all_params,
headers=headers,
extra_environ=extra_environ,
expect_errors=expect_errors)
if not expect_errors:
response = response.json
print('GOT:%s' % response)
return response
def validate_link(self, link, bookmark=False):
"""Checks if the given link can get correct data."""
# removes the scheme and net location parts of the link
url_parts = list(urlparse.urlparse(link))
url_parts[0] = url_parts[1] = ''
# bookmark link should not have the version in the URL
if bookmark and url_parts[2].startswith(PATH_PREFIX):
return False
full_path = urlparse.urlunparse(url_parts)
try:
self.get_json(full_path, path_prefix='')
return True
except Exception:
return False
|
|
# -*- coding: utf-8 -*-
import re
import copy
import lxml.cssselect
import lxml.etree
import parslepy.funcs
class Selector(object):
"""
Class of objects returned by :class:`.SelectorHandler` instances'
(and subclasses) :meth:`~.SelectorHandler.make` method.
"""
def __init__(self, selector):
self.selector = selector
def __repr__(self):
return "<Selector: inner=%s>" % self.selector
class SelectorHandler(object):
"""
Called when building abstract Parsley trees
and when etracting object values during the actual parsing
of documents
This should be subclassed to implement the selector processing logic
you need for your Parsley handling.
All 3 methods, :meth:`~.SelectorHandler.make`, :meth:`~.SelectorHandler.select`
and :meth:`~.SelectorHandler.extract` MUST be overridden
"""
DEBUG = False
def __init__(self, debug=False):
if debug:
self.DEBUG = True
def make(self, selection_string):
"""
Interpret a selection_string as a selector
for elements or element attributes in a (semi-)structured document.
In case of XPath selectors, this can also be a function call.
:param selection_string: a string representing a selector
:rtype: :class:`.Selector`
"""
raise NotImplementedError
def select(self, document, selector):
"""
Apply the selector on the document
:param document: lxml-parsed document
:param selector: input :class:`.Selector` to apply on the document
:rtype: lxml.etree.Element list
"""
raise NotImplementedError
def extract(self, document, selector):
"""
Apply the selector on the document
and return a value for the matching elements (text content or
element attributes)
:param document: lxml-parsed document
:param selector: input :class:`.Selector` to apply on the document
:rtype: depends on the selector (string, boolean value, ...)
Return value can be single- or multi-valued.
"""
raise NotImplementedError
class XPathSelectorHandler(SelectorHandler):
"""
This selector only accepts XPath selectors.
It understands what lxml.etree.XPath understands, that is XPath 1.0
expressions
"""
EXPECTED_NON_ELEMENT_TYPES = [
bool,
int,
float,
str,
]
try:
unicode # Python 2.x
EXPECTED_NON_ELEMENT_TYPES.append(unicode)
except NameError:
pass
LOCAL_NAMESPACE = 'local-parslepy'
LOCAL_XPATH_EXTENSIONS = {
(LOCAL_NAMESPACE, 'text') : parslepy.funcs.xpathtostring,
(LOCAL_NAMESPACE, 'textnl') : parslepy.funcs.xpathtostringnl,
# aliases
(LOCAL_NAMESPACE, 'str') : parslepy.funcs.xpathtostring,
(LOCAL_NAMESPACE, 'strnl') : parslepy.funcs.xpathtostringnl,
(LOCAL_NAMESPACE, 'nl') : parslepy.funcs.xpathtostringnl,
(LOCAL_NAMESPACE, 'html') : parslepy.funcs.xpathtohtml,
(LOCAL_NAMESPACE, 'xml') : parslepy.funcs.xpathtoxml,
(LOCAL_NAMESPACE, 'strip') : parslepy.funcs.xpathstrip,
(LOCAL_NAMESPACE, 'attrname') : parslepy.funcs.xpathattrname,
(LOCAL_NAMESPACE, 'attrnames') : parslepy.funcs.xpathattrname, # alias that's probably a better fit
}
EXSLT_NAMESPACES={
'date': 'http://exslt.org/dates-and-times',
'math': 'http://exslt.org/math',
're': 'http://exslt.org/regular-expressions',
'set': 'http://exslt.org/sets',
'str': 'http://exslt.org/strings',
}
_extension_router = {}
SMART_STRINGS = False
SMART_STRINGS_FUNCTIONS = [
(LOCAL_NAMESPACE, 'attrname'),
(LOCAL_NAMESPACE, 'attrnames'),
]
_selector_cache = {}
def __init__(self, namespaces=None, extensions=None, context=None, debug=False):
"""
:param namespaces: namespace mapping as :class:`dict`
:param extensions: extension :class:`dict`
:param context: user-context passed to XPath extension functions
`namespaces` and `extensions` dicts should have the same format
as for `lxml`_:
see http://lxml.de/xpathxslt.html#namespaces-and-prefixes
and `<http://lxml.de/extensions.html#xpath-extension-functions>`_
Extension functions have a slightly different signature than
pure-lxml extension functions: they must expect a user-context
as first argument; all other arguments are the same as for
`lxml` extensions.
`context` will be passed as first argument to extension functions
registered through `extensions`.
Alternative: user-context can also be passed to :meth:`parslepy.base.Parselet.parse`
"""
super(XPathSelectorHandler, self).__init__(debug=debug)
# support EXSLT extensions
self.namespaces = copy.copy(self.EXSLT_NAMESPACES)
# add local XPath extension functions
self._add_parsley_ns(self.namespaces)
self.extensions = copy.copy(self.LOCAL_XPATH_EXTENSIONS)
# add user-defined extensions
self._user_extensions = None
self.context = context
if namespaces:
self.namespaces.update(namespaces)
if extensions:
self._user_extensions = extensions
self._process_extensions(extensions)
# some functions need smart_strings=True
self._set_smart_strings_regexps()
def _test_smart_strings_needed(self, selector):
return any([r.search(selector)
for r in self.smart_strings_regexps])
def _get_smart_strings_regexps(self, ns, fname):
# find out what prefixes match the supplied namespace
prefix_matches = []
for prefix, namespace in self.namespaces.items():
if namespace == ns:
prefix_matches.append(prefix)
return [re.compile("%s:%s\(" % (p, fname)) for p in prefix_matches]
def _set_smart_strings_regexps(self):
self.smart_strings_regexps = []
# smart_strings for built-in extensions
for (ns, fname) in self.SMART_STRINGS_FUNCTIONS:
self.smart_strings_regexps.extend(
self._get_smart_strings_regexps(ns, fname))
# smart_strings for user_defined extensions
if self._user_extensions:
for (ns, fname) in self._user_extensions:
self.smart_strings_regexps.extend(
self._get_smart_strings_regexps(ns, fname))
def _make_xpathextension(self, ns, fname):
def xpath_ext(*args):
return self._extension_router[(ns, fname)](self.context, *args)
extension_name = str("xpext_%s_%d" % (fname, hash(ns)))
xpath_ext.__doc__ = "docstring for %s" % extension_name
xpath_ext.__name__ = extension_name
setattr(self, xpath_ext.__name__, xpath_ext)
return xpath_ext
def _process_extensions(self, extensions):
for (ns, fname), func in extensions.items():
self._extension_router[(ns, fname)] = func
self.extensions[(ns, fname)] = self._make_xpathextension(ns=ns, fname=fname)
@classmethod
def _add_parsley_ns(cls, namespace_dict):
"""
Extend XPath evaluation with Parsley extensions' namespace
"""
namespace_dict.update({
'parslepy' : cls.LOCAL_NAMESPACE,
'parsley' : cls.LOCAL_NAMESPACE,
})
return namespace_dict
def make(self, selection):
"""
XPath expression can also use EXSLT functions (as long as they are
understood by libxslt)
"""
cached = self._selector_cache.get(selection)
if cached:
return cached
try:
selector = lxml.etree.XPath(selection,
namespaces = self.namespaces,
extensions = self.extensions,
smart_strings=(self.SMART_STRINGS
or self._test_smart_strings_needed(selection)),
)
except lxml.etree.XPathSyntaxError as syntax_error:
syntax_error.msg += ": %s" % selection
raise syntax_error
except Exception as e:
if self.DEBUG:
print(repr(e), selection)
raise
# wrap it/cache it
self._selector_cache[selection] = Selector(selector)
return self._selector_cache[selection]
@classmethod
def select(cls, document, selector):
try:
return selector.selector(document)
except Exception as e:
if cls.DEBUG:
print(str(e))
return
def extract(self, document, selector, debug_offset=''):
"""
Try and convert matching Elements to unicode strings.
If this fails, the selector evaluation probably already
returned some string(s) of some sort, or boolean value,
or int/float, so return that instead.
"""
selected = self.select(document, selector)
if selected is not None:
if isinstance(selected, (list, tuple)):
# FIXME: return None or return empty list?
if not len(selected):
return
return [self._extract_single(m) for m in selected]
else:
return self._extract_single(selected)
# selector did not match anything
else:
if self.DEBUG:
print(debug_offset, "selector did not match anything; return None")
return None
def _default_element_extract(self, element):
"""
Overridable method to change how matching Elements
are represented in output
"""
return parslepy.funcs.extract_text(element)
def _extract_single(self, retval):
# XPath compiled expressions (and CSSSelect translations)
# can return different types
# See http://lxml.de/xpathxslt.html#xpath-return-values
# - True or False, when the XPath expression
# has a boolean result
# - a float, when the XPath expression has a numeric result
# (integer or float)
# - a 'smart' string (as described below),
# when the XPath expression has a string result.
# - a list of items, when the XPath expression has a list as result.
# The items may include Elements
# (also comments and processing instructions),
# strings and tuples.
#
# Note that in the default implementation,
# smart strings are disabled
if type(retval) == lxml.etree._Element:
return self._default_element_extract(retval)
elif type(retval) == lxml.etree._Comment:
return self._default_element_extract(retval)
elif isinstance(retval, tuple(self.EXPECTED_NON_ELEMENT_TYPES)):
return retval
else:
raise Warning("unusual type %s" % type(retval))
return retval
try:
from cssselect import HTMLTranslator
from cssselect.xpath import _unicode_safe_getattr, XPathExpr
class CssTranslator(HTMLTranslator):
def xpath_pseudo_element(self, xpath, pseudo_element):
try:
from cssselect.parser import FunctionalPseudoElement
from cssselect.xpath import _unicode_safe_getattr, XPathExpr
if isinstance(pseudo_element, FunctionalPseudoElement):
method = 'xpath_%s_functional_pseudo_element' % (
pseudo_element.name.replace('-', '_'))
method = _unicode_safe_getattr(self, method, None)
if not method:
raise ExpressionError(
"The functional pseudo-element ::%s() is unknown"
% pseudo_element.name)
xpath = method(xpath, pseudo_element.arguments)
else:
method = 'xpath_%s_simple_pseudo_element' % (
pseudo_element.replace('-', '_'))
method = _unicode_safe_getattr(self, method, None)
if not method:
raise ExpressionError(
"The pseudo-element ::%s is unknown"
% pseudo_element)
xpath = method(xpath)
except ImportError:
pass
return xpath
# functional pseudo-element:
# element's attribute by name
def xpath_attr_functional_pseudo_element(self, xpath, arguments):
attribute_name = arguments[0].value
other = XPathExpr('@%s' % attribute_name, '', )
return xpath.join('/', other)
# pseudo-element:
# element's text() nodes
def xpath_text_simple_pseudo_element(self, xpath):
other = XPathExpr('text()', '', )
return xpath.join('/', other)
# pseudo-element:
# element's comment() nodes
def xpath_comment_simple_pseudo_element(self, xpath):
other = XPathExpr('comment()', '', )
return xpath.join('/', other)
css_translator = CssTranslator()
def css_to_xpath(css):
return css_translator.css_to_xpath(css)
except ImportError:
def css_to_xpath(css):
return lxml.cssselect.css_to_xpath(css)
class DefaultSelectorHandler(XPathSelectorHandler):
"""
Default selector logic, loosely based on the original
`Parsley` implementation.
This handler understands what cssselect and lxml.etree.XPath understands,
that is (roughly) XPath 1.0 and CSS3 for things that dont need browser context
"""
# newer lxml version (>3) raise SelectorSyntaxError (directly from cssselect)
# for invalid CSS selectors
# but older lxml (2.3.8 for example) have cssselect included
# and for some selectors raise AssertionError and TypeError instead
CSSSELECT_SYNTAXERROR_EXCEPTIONS = set([
# we could use lxml.cssselect.SelectorError (parent class for both),
# but for lxml<3, they're not related
lxml.cssselect.SelectorSyntaxError,
# for unsupported pseudo-class or XPath namespaces prefix syntax
lxml.cssselect.ExpressionError,
])
# this is to add AssertionError and TypeError if lxml < 3.0.0
for s in ('#a.', '//h1'):
try:
lxml.cssselect.CSSSelector(s)
except Exception as e:
CSSSELECT_SYNTAXERROR_EXCEPTIONS.add(type(e))
# example: "a img @src" (fetch the 'src' attribute of an IMG tag)
# other example: "im|img @im|src" when using namespace prefixes
REGEX_ENDING_ATTRIBUTE = re.compile(r'^(?P<expr>.+)\s+(?P<attr>@[\:|\w_\d-]+)$')
def make(self, selection):
"""
Scopes and selectors are tested in this order:
* is this a CSS selector with an appended @something attribute?
* is this a regular CSS selector?
* is this an XPath expression?
XPath expression can also use EXSLT functions (as long as they are
understood by libxslt)
"""
cached = self._selector_cache.get(selection)
if cached:
return cached
namespaces = self.EXSLT_NAMESPACES
self._add_parsley_ns(namespaces)
try:
# CSS with attribute? (non-standard but convenient)
# CSS selector cannot select attributes
# this "<css selector> @<attr>" syntax is a Parsley extension
# construct CSS selector and append attribute to XPath expression
m = self.REGEX_ENDING_ATTRIBUTE.match(selection)
if m:
# the selector should be a regular CSS selector
cssxpath = css_to_xpath(m.group("expr"))
# if "|" is used for namespace prefix reference,
# convert it to XPath prefix syntax
attribute = m.group("attr").replace('|', ':')
cssxpath = "%s/%s" % (cssxpath, attribute)
else:
cssxpath = css_to_xpath(selection)
selector = lxml.etree.XPath(
cssxpath,
namespaces = self.namespaces,
extensions = self.extensions,
smart_strings=(self.SMART_STRINGS
or self._test_smart_strings_needed(selection)),
)
except tuple(self.CSSSELECT_SYNTAXERROR_EXCEPTIONS) as syntax_error:
if self.DEBUG:
print(repr(syntax_error), selection)
print("Try interpreting as XPath selector")
try:
selector = lxml.etree.XPath(selection,
namespaces = self.namespaces,
extensions = self.extensions,
smart_strings=(self.SMART_STRINGS
or self._test_smart_strings_needed(selection)),
)
except lxml.etree.XPathSyntaxError as syntax_error:
syntax_error.msg += ": %s" % selection
raise syntax_error
except Exception as e:
if self.DEBUG:
print(repr(e), selection)
raise
# for exception when trying to convert <cssselector> @<attribute> syntax
except lxml.etree.XPathSyntaxError as syntax_error:
syntax_error.msg += ": %s" % selection
raise syntax_error
except Exception as e:
if self.DEBUG:
print(repr(e), selection)
raise
# wrap it/cache it
self._selector_cache[selection] = Selector(selector)
return self._selector_cache[selection]
|
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package utils
# Module caffe2.python.utils
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from future.utils import viewitems
from google.protobuf.message import DecodeError, Message
from google.protobuf import text_format
import sys
import collections
import functools
import numpy as np
from six import integer_types, binary_type, text_type
def CaffeBlobToNumpyArray(blob):
if (blob.num != 0):
# old style caffe blob.
return (np.asarray(blob.data, dtype=np.float32)
.reshape(blob.num, blob.channels, blob.height, blob.width))
else:
# new style caffe blob.
return (np.asarray(blob.data, dtype=np.float32)
.reshape(blob.shape.dim))
def Caffe2TensorToNumpyArray(tensor):
if tensor.data_type == caffe2_pb2.TensorProto.FLOAT:
return np.asarray(
tensor.float_data, dtype=np.float32).reshape(tensor.dims)
elif tensor.data_type == caffe2_pb2.TensorProto.DOUBLE:
return np.asarray(
tensor.double_data, dtype=np.float64).reshape(tensor.dims)
elif tensor.data_type == caffe2_pb2.TensorProto.INT32:
return np.asarray(
tensor.int32_data, dtype=np.int).reshape(tensor.dims) # pb.INT32=>np.int use int32_data
elif tensor.data_type == caffe2_pb2.TensorProto.INT16:
return np.asarray(
tensor.int32_data, dtype=np.int16).reshape(tensor.dims) # pb.INT16=>np.int16 use int32_data
elif tensor.data_type == caffe2_pb2.TensorProto.UINT16:
return np.asarray(
tensor.int32_data, dtype=np.uint16).reshape(tensor.dims) # pb.UINT16=>np.uint16 use int32_data
elif tensor.data_type == caffe2_pb2.TensorProto.INT8:
return np.asarray(
tensor.int32_data, dtype=np.int8).reshape(tensor.dims) # pb.INT8=>np.int8 use int32_data
elif tensor.data_type == caffe2_pb2.TensorProto.UINT8:
return np.asarray(
tensor.int32_data, dtype=np.uint8).reshape(tensor.dims) # pb.UINT8=>np.uint8 use int32_data
else:
# TODO: complete the data type: bool, float16, byte, int64, string
raise RuntimeError(
"Tensor data type not supported yet: " + str(tensor.data_type))
def NumpyArrayToCaffe2Tensor(arr, name=None):
tensor = caffe2_pb2.TensorProto()
tensor.dims.extend(arr.shape)
if name:
tensor.name = name
if arr.dtype == np.float32:
tensor.data_type = caffe2_pb2.TensorProto.FLOAT
tensor.float_data.extend(list(arr.flatten().astype(float)))
elif arr.dtype == np.float64:
tensor.data_type = caffe2_pb2.TensorProto.DOUBLE
tensor.double_data.extend(list(arr.flatten().astype(np.float64)))
elif arr.dtype == np.int or arr.dtype == np.int32:
tensor.data_type = caffe2_pb2.TensorProto.INT32
tensor.int32_data.extend(arr.flatten().astype(np.int).tolist())
elif arr.dtype == np.int16:
tensor.data_type = caffe2_pb2.TensorProto.INT16
tensor.int32_data.extend(list(arr.flatten().astype(np.int16))) # np.int16=>pb.INT16 use int32_data
elif arr.dtype == np.uint16:
tensor.data_type = caffe2_pb2.TensorProto.UINT16
tensor.int32_data.extend(list(arr.flatten().astype(np.uint16))) # np.uint16=>pb.UNIT16 use int32_data
elif arr.dtype == np.int8:
tensor.data_type = caffe2_pb2.TensorProto.INT8
tensor.int32_data.extend(list(arr.flatten().astype(np.int8))) # np.int8=>pb.INT8 use int32_data
elif arr.dtype == np.uint8:
tensor.data_type = caffe2_pb2.TensorProto.UINT8
tensor.int32_data.extend(list(arr.flatten().astype(np.uint8))) # np.uint8=>pb.UNIT8 use int32_data
else:
# TODO: complete the data type: bool, float16, byte, int64, string
raise RuntimeError(
"Numpy data type not supported yet: " + str(arr.dtype))
return tensor
def MakeArgument(key, value):
"""Makes an argument based on the value type."""
argument = caffe2_pb2.Argument()
argument.name = key
iterable = isinstance(value, collections.Iterable)
# Fast tracking common use case where a float32 array of tensor parameters
# needs to be serialized. The entire array is guaranteed to have the same
# dtype, so no per-element checking necessary and no need to convert each
# element separately.
if isinstance(value, np.ndarray) and value.dtype.type is np.float32:
argument.floats.extend(value.flatten().tolist())
return argument
if isinstance(value, np.ndarray):
value = value.flatten().tolist()
elif isinstance(value, np.generic):
# convert numpy scalar to native python type
value = np.asscalar(value)
if type(value) is float:
argument.f = value
elif type(value) in integer_types or type(value) is bool:
# We make a relaxation that a boolean variable will also be stored as
# int.
argument.i = value
elif isinstance(value, binary_type):
argument.s = value
elif isinstance(value, text_type):
argument.s = value.encode('utf-8')
elif isinstance(value, caffe2_pb2.NetDef):
argument.n.CopyFrom(value)
elif isinstance(value, Message):
argument.s = value.SerializeToString()
elif iterable and all(type(v) in [float, np.float_] for v in value):
argument.floats.extend(
v.item() if type(v) is np.float_ else v for v in value
)
elif iterable and all(
type(v) in integer_types or type(v) in [bool, np.int_] for v in value
):
argument.ints.extend(
v.item() if type(v) is np.int_ else v for v in value
)
elif iterable and all(
isinstance(v, binary_type) or isinstance(v, text_type) for v in value
):
argument.strings.extend(
v.encode('utf-8') if isinstance(v, text_type) else v
for v in value
)
elif iterable and all(isinstance(v, caffe2_pb2.NetDef) for v in value):
argument.nets.extend(value)
elif iterable and all(isinstance(v, Message) for v in value):
argument.strings.extend(v.SerializeToString() for v in value)
else:
if iterable:
raise ValueError(
"Unknown iterable argument type: key={} value={}, value "
"type={}[{}]".format(
key, value, type(value), set(type(v) for v in value)
)
)
else:
raise ValueError(
"Unknown argument type: key={} value={}, value type={}".format(
key, value, type(value)
)
)
return argument
def TryReadProtoWithClass(cls, s):
"""Reads a protobuffer with the given proto class.
Inputs:
cls: a protobuffer class.
s: a string of either binary or text protobuffer content.
Outputs:
proto: the protobuffer of cls
Throws:
google.protobuf.message.DecodeError: if we cannot decode the message.
"""
obj = cls()
try:
text_format.Parse(s, obj)
return obj
except text_format.ParseError:
obj.ParseFromString(s)
return obj
def GetContentFromProto(obj, function_map):
"""Gets a specific field from a protocol buffer that matches the given class
"""
for cls, func in viewitems(function_map):
if type(obj) is cls:
return func(obj)
def GetContentFromProtoString(s, function_map):
for cls, func in viewitems(function_map):
try:
obj = TryReadProtoWithClass(cls, s)
return func(obj)
except DecodeError:
continue
else:
raise DecodeError("Cannot find a fit protobuffer class.")
def ConvertProtoToBinary(proto_class, filename, out_filename):
"""Convert a text file of the given protobuf class to binary."""
proto = TryReadProtoWithClass(proto_class, open(filename).read())
with open(out_filename, 'w') as fid:
fid.write(proto.SerializeToString())
def GetGPUMemoryUsageStats():
"""Get GPU memory usage stats from CUDAContext. This requires flag
--caffe2_gpu_memory_tracking to be enabled"""
from caffe2.python import workspace, core
workspace.RunOperatorOnce(
core.CreateOperator(
"GetGPUMemoryUsage",
[],
["____mem____"],
device_option=core.DeviceOption(caffe2_pb2.CUDA, 0),
),
)
b = workspace.FetchBlob("____mem____")
return {
'total_by_gpu': b[0, :],
'max_by_gpu': b[1, :],
'total': np.sum(b[0, :]),
'max_total': np.sum(b[1, :])
}
def ResetBlobs(blobs):
from caffe2.python import workspace, core
workspace.RunOperatorOnce(
core.CreateOperator(
"Free",
list(blobs),
list(blobs),
device_option=core.DeviceOption(caffe2_pb2.CPU),
),
)
class DebugMode(object):
'''
This class allows to drop you into an interactive debugger
if there is an unhandled exception in your python script
Example of usage:
def main():
# your code here
pass
if __name__ == '__main__':
from caffe2.python.utils import DebugMode
DebugMode.run(main)
'''
@classmethod
def run(cls, func):
try:
return func()
except KeyboardInterrupt:
raise
except Exception:
import pdb
print(
'Entering interactive debugger. Type "bt" to print '
'the full stacktrace. Type "help" to see command listing.')
print(sys.exc_info()[1])
print
pdb.post_mortem()
sys.exit(1)
raise
def raiseIfNotEqual(a, b, msg):
if a != b:
raise Exception("{}. {} != {}".format(msg, a, b))
def debug(f):
'''
Use this method to decorate your function with DebugMode's functionality
Example:
@debug
def test_foo(self):
raise Exception("Bar")
'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
def func():
return f(*args, **kwargs)
DebugMode.run(func)
return wrapper
|
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from future.moves.xmlrpc import client as xmlrpc_client
import os
import mock
from flexget.plugins.clients.rtorrent import RTorrent
torrent_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'private.torrent')
torrent_url = 'file:///%s' % torrent_file
torrent_info_hash = '09977FE761B8D293AD8A929CCAF2E9322D525A6C'
with open(torrent_file, 'rb') as tor_file:
torrent_raw = tor_file.read()
def compare_binary(obj1, obj2):
# Used to compare xmlrpclib.binary objects within a mocked call
if not isinstance(obj1, type(obj2)):
return False
if obj1.data != obj2.data:
return False
return True
class Matcher(object):
def __init__(self, compare, some_obj):
self.compare = compare
self.some_obj = some_obj
def __eq__(self, other):
return self.compare(self.some_obj, other)
@mock.patch('flexget.plugins.clients.rtorrent.xmlrpc_client.ServerProxy')
class TestRTorrentClient(object):
def test_version(self, mocked_proxy):
mocked_client = mocked_proxy()
mocked_client.system.client_version.return_value = '0.9.4'
client = RTorrent('http://localhost/RPC2')
assert client.version == [0, 9, 4]
assert mocked_client.system.client_version.called
def test_load(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.execute.throw.return_value = 0
mocked_proxy.load.raw_start.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.load(
torrent_raw,
fields={'priority': 3, 'directory': '/data/downloads', 'custom1': 'testing'},
start=True,
mkdir=True,
)
assert resp == 0
# Ensure mkdir was called
mocked_proxy.execute.throw.assert_called_with('', 'mkdir', '-p', '/data/downloads')
# Ensure load was called
assert mocked_proxy.load.raw_start.called
match_binary = Matcher(compare_binary, xmlrpc_client.Binary(torrent_raw))
called_args = mocked_proxy.load.raw_start.call_args_list[0][0]
assert len(called_args) == 5
assert '' == called_args[0]
assert match_binary in called_args
fields = [p for p in called_args[2:]]
assert len(fields) == 3
assert 'd.directory.set=\\/data\\/downloads' in fields
assert 'd.custom1.set=testing' in fields
assert 'd.priority.set=3' in fields
def test_torrent(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.system.multicall.return_value = [
['/data/downloads'], ['private.torrent'], [torrent_info_hash], ['test_custom1'], [123456]
]
client = RTorrent('http://localhost/RPC2')
torrent = client.torrent(torrent_info_hash, fields=['custom1', 'down_rate']) # Required fields should be added
assert isinstance(torrent, dict)
assert torrent.get('base_path') == '/data/downloads'
assert torrent.get('hash') == torrent_info_hash
assert torrent.get('custom1') == 'test_custom1'
assert torrent.get('name') == 'private.torrent'
assert torrent.get('down_rate') == 123456
assert mocked_proxy.system.multicall.called_with(([
{'params': (torrent_info_hash,), 'methodName': 'd.base_path'},
{'params': (torrent_info_hash,), 'methodName': 'd.name'},
{'params': (torrent_info_hash,), 'methodName': 'd.hash'},
{'params': (torrent_info_hash,), 'methodName': 'd.custom1'},
{'params': (torrent_info_hash,), 'methodName': 'd.down.rate'},
]))
def test_torrents(self, mocked_proxy):
mocked_proxy = mocked_proxy()
hash1 = '09977FE761AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
hash2 = '09977FE761BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB'
mocked_proxy.d.multicall.return_value = (
['/data/downloads', 'private.torrent', hash1, 'test_custom1'],
['/data/downloads', 'private.torrent', hash2, 'test_custom2'],
)
client = RTorrent('http://localhost/RPC2')
torrents = client.torrents(fields=['custom1']) # Required fields should be added
assert isinstance(torrents, list)
for torrent in torrents:
assert torrent.get('base_path') == '/data/downloads'
assert torrent.get('name') == 'private.torrent'
if torrent.get('hash') == hash1:
assert torrent.get('custom1') == 'test_custom1'
elif torrent.get('hash') == hash2:
assert torrent.get('custom1') == 'test_custom2'
else:
assert False, 'Invalid hash returned'
assert mocked_proxy.system.multicall.called_with((
['main', 'd.directory_base=', 'd.name=', 'd.hash=', u'd.custom1='],
))
def test_update(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.system.multicall.return_value = [[0]]
client = RTorrent('http://localhost/RPC2')
update_fields = {
'custom1': 'test_custom1',
'directory_base': '/data/downloads',
'priority': 3,
}
resp = client.update(torrent_info_hash, fields=update_fields)
assert resp == 0
assert mocked_proxy.system.multicall.called_with(([
{'params': (torrent_info_hash, '/data/downloads'), 'methodName': 'd.directory_base'},
{'params': (torrent_info_hash, 'test_custom1'), 'methodName': 'd.custom1'},
{'params': (torrent_info_hash, '/data/downloads'), 'methodName': 'd.custom1'}
]))
def test_delete(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.d.erase.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.delete(torrent_info_hash)
assert resp == 0
assert mocked_proxy.d.erase.called_with((torrent_info_hash,))
def test_move(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.system.multicall.return_value = [
['private.torrent'], [torrent_info_hash], ['/data/downloads'],
]
mocked_proxy.move.return_value = 0
mocked_proxy.d.directory.set.return_value = 0
mocked_proxy.execute.throw.return_value = 0
client = RTorrent('http://localhost/RPC2')
client.move(torrent_info_hash, '/new/folder')
mocked_proxy.execute.throw.assert_has_calls([
mock.call('', 'mkdir', '-p', '/new/folder'),
mock.call('', 'mv', '-u', '/data/downloads', '/new/folder'),
])
def test_start(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.d.start.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.start(torrent_info_hash)
assert resp == 0
assert mocked_proxy.d.start.called_with((torrent_info_hash,))
def test_stop(self, mocked_proxy):
mocked_proxy = mocked_proxy()
mocked_proxy.d.close.return_value = 0
mocked_proxy.d.stop.return_value = 0
client = RTorrent('http://localhost/RPC2')
resp = client.stop(torrent_info_hash)
assert resp == 0
assert mocked_proxy.d.stop.called_with((torrent_info_hash,))
assert mocked_proxy.d.close.called_with((torrent_info_hash,))
@mock.patch('flexget.plugins.clients.rtorrent.RTorrent')
class TestRTorrentOutputPlugin(object):
config = """
tasks:
test_add_torrent:
accept_all: yes
mock:
- {title: 'test', url: '""" + torrent_url + """'}
rtorrent:
action: add
start: yes
mkdir: yes
uri: http://localhost/SCGI
priority: high
path: /data/downloads
custom1: test_custom1
test_add_torrent_set:
accept_all: yes
set:
path: /data/downloads
custom1: test_custom1
priority: low
custom2: test_custom2
mock:
- {title: 'test', url: '""" + torrent_url + """'}
rtorrent:
action: add
start: no
mkdir: no
uri: http://localhost/SCGI
test_update:
accept_all: yes
set:
path: /data/downloads
priority: low
mock:
- {title: 'test', url: '""" + torrent_url + """', 'torrent_info_hash': '09977FE761B8D293AD8A929CCAF2E9322D525A6C'}
rtorrent:
action: update
uri: http://localhost/SCGI
custom1: test_custom1
test_update_path:
accept_all: yes
mock:
- {title: 'test', url: '""" + torrent_url + """', 'torrent_info_hash': '09977FE761B8D293AD8A929CCAF2E9322D525A6C'}
rtorrent:
action: update
custom1: test_custom1
uri: http://localhost/SCGI
path: /new/path
test_delete:
accept_all: yes
mock:
- {title: 'test', url: '""" + torrent_url + """', 'torrent_info_hash': '09977FE761B8D293AD8A929CCAF2E9322D525A6C'}
rtorrent:
action: delete
uri: http://localhost/SCGI
custom1: test_custom1
"""
def test_add(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.load.return_value = 0
mocked_client.version = [0, 9, 4]
mocked_client.torrent.side_effect = [False, {'hash': torrent_info_hash}]
execute_task('test_add_torrent')
mocked_client.load.assert_called_with(
torrent_raw,
fields={'priority': 3, 'directory': '/data/downloads', 'custom1': 'test_custom1'},
start=True,
mkdir=True,
)
def test_add_set(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.load.return_value = 0
mocked_client.version = [0, 9, 4]
mocked_client.torrent.side_effect = [False, {'hash': torrent_info_hash}]
execute_task('test_add_torrent_set')
mocked_client.load.assert_called_with(
torrent_raw,
fields={
'priority': 1,
'directory': '/data/downloads',
'custom1': 'test_custom1',
'custom2': 'test_custom2'
},
start=False,
mkdir=False,
)
def test_update(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.version = [0, 9, 4]
mocked_client.update.return_value = 0
# ntpath complains on windows if base_path is a MagicMock
mocked_client.torrent.side_effect = [False, {'base_path': ''}]
execute_task('test_update')
mocked_client.update.assert_called_with(
torrent_info_hash,
{'priority': 1, 'custom1': 'test_custom1'}
)
def test_update_path(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.version = [0, 9, 4]
mocked_client.update.return_value = 0
mocked_client.move.return_value = 0
mocked_client.torrent.return_value = {'base_path': '/some/path'}
execute_task('test_update_path')
mocked_client.update.assert_called_with(
torrent_info_hash,
{'custom1': 'test_custom1'}
)
mocked_client.move.assert_called_with(
torrent_info_hash,
'/new/path',
)
def test_delete(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.load.return_value = 0
mocked_client.version = [0, 9, 4]
mocked_client.delete.return_value = 0
execute_task('test_delete')
mocked_client.delete.assert_called_with(torrent_info_hash)
@mock.patch('flexget.plugins.clients.rtorrent.RTorrent')
class TestRTorrentInputPlugin(object):
config = """
tasks:
test_input:
accept_all: yes
from_rtorrent:
uri: http://localhost/RPC2
view: complete
fields:
- custom1
- custom3
- down_rate
"""
def test_input(self, mocked_client, execute_task):
mocked_client = mocked_client()
mocked_client.version = [0, 9, 4]
mocked_torrent = {
'name': 'private.torrent',
'hash': torrent_info_hash,
'base_path': '/data/downloads/private',
'custom1': 'test_custom1',
'custom3': 'test_custom3',
'down_rate': 123456,
}
mocked_client.torrents.return_value = [mocked_torrent, mocked_torrent]
task = execute_task('test_input')
mocked_client.torrents.assert_called_with(
'complete',
fields=['custom1', 'custom3', 'down_rate'],
)
assert len(task.all_entries) == 2
for entry in task.entries:
assert entry['url'] == 'http://localhost/RPC2/%s' % torrent_info_hash
assert entry['name'] == 'private.torrent'
assert entry['torrent_info_hash'] == torrent_info_hash
assert entry['path'] == '/data/downloads/private'
assert entry['custom1'] == 'test_custom1'
assert entry['custom3'] == 'test_custom3'
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import operator
from nova import block_device
from nova.objects import block_device as block_device_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.volume import encryptors
LOG = logging.getLogger(__name__)
class _NotTransformable(Exception):
pass
class _InvalidType(_NotTransformable):
pass
class _NoLegacy(Exception):
pass
def update_db(method):
@functools.wraps(method)
def wrapped(obj, context, *args, **kwargs):
ret_val = method(obj, context, *args, **kwargs)
obj.save(context)
return ret_val
return wrapped
class DriverBlockDevice(dict):
"""A dict subclass that represents block devices used by the virt layer.
Uses block device objects internally to do the database access.
_fields and _legacy_fields class attributes present a set of fields that
are expected on a certain DriverBlockDevice type. We may have more legacy
versions in the future.
If an attribute access is attempted for a name that is found in the
_proxy_as_attr set, it will be proxied to the underlying object. This
allows us to access stuff that is not part of the data model that all
drivers understand.
The save() method allows us to update the database using the underlying
object. _update_on_save class attribute dictionary keeps the following
mapping:
{'object field name': 'driver dict field name (or None if same)'}
These fields will be updated on the internal object, from the values in the
dict, before the actual database update is done.
"""
_fields = set()
_legacy_fields = set()
_proxy_as_attr = set()
_update_on_save = {'disk_bus': None,
'device_name': None,
'device_type': None}
def __init__(self, bdm):
# TODO(ndipanov): Remove this check when we have all the rpc methods
# use objects for block devices.
if isinstance(bdm, block_device_obj.BlockDeviceMapping):
self.__dict__['_bdm_obj'] = bdm
else:
self.__dict__['_bdm_obj'] = block_device_obj.BlockDeviceMapping()
self._bdm_obj.update(block_device.BlockDeviceDict(bdm))
self._bdm_obj.obj_reset_changes()
if self._bdm_obj.no_device:
raise _NotTransformable()
self.update(dict((field, None)
for field in self._fields))
self._transform()
def __getattr__(self, name):
if name in self._proxy_as_attr:
return getattr(self._bdm_obj, name)
else:
raise AttributeError("Cannot access %s on DriverBlockDevice "
"class" % name)
def __setattr__(self, name, value):
if name in self._proxy_as_attr:
return setattr(self._bdm_obj, name, value)
else:
raise AttributeError("Cannot access %s on DriverBlockDevice "
"class" % name)
def _transform(self):
"""Transform bdm to the format that is passed to drivers."""
raise NotImplementedError()
def legacy(self):
"""Basic legacy transformation.
Basic method will just drop the fields that are not in
_legacy_fields set. Override this in subclass if needed.
"""
return dict((key, self.get(key)) for key in self._legacy_fields)
def attach(self, **kwargs):
"""Make the device available to be used by VMs.
To be overridden in subclasses with the connecting logic for
the type of device the subclass represents.
"""
raise NotImplementedError()
def save(self, context):
for attr_name, key_name in self._update_on_save.iteritems():
setattr(self._bdm_obj, attr_name, self[key_name or attr_name])
self._bdm_obj.save(context)
class DriverSwapBlockDevice(DriverBlockDevice):
_fields = set(['device_name', 'swap_size', 'disk_bus'])
_legacy_fields = _fields - set(['disk_bus'])
_update_on_save = {'disk_bus': None,
'device_name': None}
def _transform(self):
if not block_device.new_format_is_swap(self._bdm_obj):
raise _InvalidType
self.update({
'device_name': self._bdm_obj.device_name,
'swap_size': self._bdm_obj.volume_size or 0,
'disk_bus': self._bdm_obj.disk_bus
})
class DriverEphemeralBlockDevice(DriverBlockDevice):
_new_only_fields = set(['disk_bus', 'device_type', 'guest_format'])
_fields = set(['device_name', 'size']) | _new_only_fields
_legacy_fields = (_fields - _new_only_fields |
set(['num', 'virtual_name']))
def _transform(self):
if not block_device.new_format_is_ephemeral(self._bdm_obj):
raise _InvalidType
self.update({
'device_name': self._bdm_obj.device_name,
'size': self._bdm_obj.volume_size or 0,
'disk_bus': self._bdm_obj.disk_bus,
'device_type': self._bdm_obj.device_type,
'guest_format': self._bdm_obj.guest_format
})
def legacy(self, num=0):
legacy_bdm = super(DriverEphemeralBlockDevice, self).legacy()
legacy_bdm['num'] = num
legacy_bdm['virtual_name'] = 'ephemeral' + str(num)
return legacy_bdm
class DriverVolumeBlockDevice(DriverBlockDevice):
_legacy_fields = set(['connection_info', 'mount_device',
'delete_on_termination'])
_new_fields = set(['guest_format', 'device_type',
'disk_bus', 'boot_index'])
_fields = _legacy_fields | _new_fields
_valid_source = 'volume'
_valid_destination = 'volume'
_proxy_as_attr = set(['volume_size', 'volume_id'])
_update_on_save = {'disk_bus': None,
'device_name': 'mount_device',
'device_type': None}
def _transform(self):
if (not self._bdm_obj.source_type == self._valid_source
or not self._bdm_obj.destination_type ==
self._valid_destination):
raise _InvalidType
self.update(
dict((k, v) for k, v in self._bdm_obj.iteritems()
if k in self._new_fields | set(['delete_on_termination']))
)
self['mount_device'] = self._bdm_obj.device_name
try:
self['connection_info'] = jsonutils.loads(
self._bdm_obj.connection_info)
except TypeError:
self['connection_info'] = None
@update_db
def attach(self, context, instance, volume_api, virt_driver,
do_check_attach=True, do_driver_attach=False):
volume = volume_api.get(context, self.volume_id)
if do_check_attach:
volume_api.check_attach(context, volume, instance=instance)
volume_id = volume['id']
context = context.elevated()
connector = virt_driver.get_volume_connector(instance)
connection_info = volume_api.initialize_connection(context,
volume_id,
connector)
if 'serial' not in connection_info:
connection_info['serial'] = self.volume_id
# If do_driver_attach is False, we will attach a volume to an instance
# at boot time. So actual attach is done by instance creation code.
if do_driver_attach:
encryption = encryptors.get_encryption_metadata(
context, volume_api, volume_id, connection_info)
try:
virt_driver.attach_volume(
context, connection_info, instance,
self['mount_device'], disk_bus=self['disk_bus'],
device_type=self['device_type'], encryption=encryption)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
LOG.exception(_("Driver failed to attach volume "
"%(volume_id)s at %(mountpoint)s"),
{'volume_id': volume_id,
'mountpoint': self['mount_device']},
context=context, instance=instance)
volume_api.terminate_connection(context, volume_id,
connector)
self['connection_info'] = connection_info
mode = 'rw'
if 'data' in connection_info:
mode = connection_info['data'].get('access_mode', 'rw')
volume_api.attach(context, volume_id, instance['uuid'],
self['mount_device'], mode=mode)
@update_db
def refresh_connection_info(self, context, instance,
volume_api, virt_driver):
# NOTE (ndipanov): A no-op if there is no connection info already
if not self['connection_info']:
return
connector = virt_driver.get_volume_connector(instance)
connection_info = volume_api.initialize_connection(context,
self.volume_id,
connector)
if 'serial' not in connection_info:
connection_info['serial'] = self.volume_id
self['connection_info'] = connection_info
def save(self, context):
# NOTE(ndipanov): we might want to generalize this by adding it to the
# _update_on_save and adding a transformation function.
try:
self._bdm_obj.connection_info = jsonutils.dumps(
self.get('connection_info'))
except TypeError:
pass
super(DriverVolumeBlockDevice, self).save(context)
class DriverSnapshotBlockDevice(DriverVolumeBlockDevice):
_valid_source = 'snapshot'
_proxy_as_attr = set(['volume_size', 'volume_id', 'snapshot_id'])
def attach(self, context, instance, volume_api,
virt_driver, wait_func=None):
if not self.volume_id:
snapshot = volume_api.get_snapshot(context,
self.snapshot_id)
vol = volume_api.create(context, self.volume_size,
'', '', snapshot)
if wait_func:
wait_func(context, vol['id'])
self.volume_id = vol['id']
# Call the volume attach now
super(DriverSnapshotBlockDevice, self).attach(context, instance,
volume_api, virt_driver)
class DriverImageBlockDevice(DriverVolumeBlockDevice):
_valid_source = 'image'
_proxy_as_attr = set(['volume_size', 'volume_id', 'image_id'])
def attach(self, context, instance, volume_api,
virt_driver, wait_func=None):
if not self.volume_id:
vol = volume_api.create(context, self.volume_size,
'', '', image_id=self.image_id)
if wait_func:
wait_func(context, vol['id'])
self.volume_id = vol['id']
super(DriverImageBlockDevice, self).attach(context, instance,
volume_api, virt_driver)
def _convert_block_devices(device_type, block_device_mapping):
def _is_transformable(bdm):
try:
device_type(bdm)
except _NotTransformable:
return False
return True
return [device_type(bdm)
for bdm in block_device_mapping
if _is_transformable(bdm)]
convert_swap = functools.partial(_convert_block_devices,
DriverSwapBlockDevice)
convert_ephemerals = functools.partial(_convert_block_devices,
DriverEphemeralBlockDevice)
convert_volumes = functools.partial(_convert_block_devices,
DriverVolumeBlockDevice)
convert_snapshots = functools.partial(_convert_block_devices,
DriverSnapshotBlockDevice)
convert_images = functools.partial(_convert_block_devices,
DriverImageBlockDevice)
def attach_block_devices(block_device_mapping, *attach_args, **attach_kwargs):
def _log_and_attach(bdm):
context = attach_args[0]
instance = attach_args[1]
LOG.audit(_('Booting with volume %(volume_id)s at %(mountpoint)s'),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
bdm.attach(*attach_args, **attach_kwargs)
map(_log_and_attach, block_device_mapping)
return block_device_mapping
def refresh_conn_infos(block_device_mapping, *refresh_args, **refresh_kwargs):
map(operator.methodcaller('refresh_connection_info',
*refresh_args, **refresh_kwargs),
block_device_mapping)
return block_device_mapping
def legacy_block_devices(block_device_mapping):
def _has_legacy(bdm):
try:
bdm.legacy()
except _NoLegacy:
return False
return True
bdms = [bdm.legacy()
for bdm in block_device_mapping
if _has_legacy(bdm)]
# Re-enumerate ephemeral devices
if all(isinstance(bdm, DriverEphemeralBlockDevice)
for bdm in block_device_mapping):
for i, dev in enumerate(bdms):
dev['virtual_name'] = dev['virtual_name'][:-1] + str(i)
dev['num'] = i
return bdms
def get_swap(transformed_list):
"""Get the swap device out of the list context.
The block_device_info needs swap to be a single device,
not a list - otherwise this is a no-op.
"""
if not all(isinstance(device, DriverSwapBlockDevice) or
'swap_size' in device
for device in transformed_list):
return transformed_list
try:
return transformed_list.pop()
except IndexError:
return None
_IMPLEMENTED_CLASSES = (DriverSwapBlockDevice, DriverEphemeralBlockDevice,
DriverVolumeBlockDevice, DriverSnapshotBlockDevice,
DriverImageBlockDevice)
def is_implemented(bdm):
for cls in _IMPLEMENTED_CLASSES:
try:
cls(bdm)
return True
except _NotTransformable:
pass
return False
|
|
""" path.py - An object representing a path to a file or directory.
Example:
from path import path
d = path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0755)
This module requires Python 2.2 or later.
URL: http://www.jorendorff.com/articles/python/path
Author: Jason Orendorff <jason.orendorff\x40gmail\x2ecom> (and others - see the url!)
Date: 7 Mar 2004
"""
# TODO
# - Tree-walking functions don't avoid symlink loops. Matt Harrison sent me a patch for this.
# - Tree-walking functions can't ignore errors. Matt Harrison asked for this.
#
# - Two people asked for path.chdir(). This just seems wrong to me,
# I dunno. chdir() is moderately evil anyway.
#
# - Bug in write_text(). It doesn't support Universal newline mode.
# - Better error message in listdir() when self isn't a
# directory. (On Windows, the error message really sucks.)
# - Make sure everything has a good docstring.
# - Add methods for regex find and replace.
# - guess_content_type() method?
# - Perhaps support arguments to touch().
# - Could add split() and join() methods that generate warnings.
from __future__ import generators
import sys, warnings, os, fnmatch, glob, shutil, codecs, re
try:
import grp
except ImportError:
grp = None
__version__ = '2.1'
__all__ = ['path']
# Platform-specific support for path.owner
if os.name == 'nt':
try:
import win32security
except ImportError:
win32security = None
else:
try:
import pwd
except ImportError:
pwd = None
# Pre-2.3 support. Are unicode filenames supported?
_base = str
_getcwd = os.getcwd
try:
if os.path.supports_unicode_filenames:
_base = unicode
_getcwd = os.getcwdu
except AttributeError:
pass
# Pre-2.3 workaround for booleans
try:
True, False
except NameError:
True, False = 1, 0
# Pre-2.3 workaround for basestring.
try:
basestring
except NameError:
basestring = (str, unicode)
# Universal newline support
_textmode = 'r'
if hasattr(file, 'newlines'):
_textmode = 'U'
class PathWalkWarning(Warning):
pass
def _handleException(exc, mode, warningObject):
if mode == 'ignore':
return
elif mode == 'warn':
warnings.warn(warningObject.__class__(warningObject.message % dict(exc=exc)))
else:
raise exc
class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
@classmethod
def getcwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
@property
def drive(self):
"""
The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
"""
drv, r = os.path.splitdrive(self)
return self.__class__(drv)
@property
def parent(self):
"""
This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
"""
return self.dirname()
@property
def name(self):
"""
The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
"""
return self.basename()
@property
def namebase(self):
"""
The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
"""
base, ext = os.path.splitext(self.name)
return base
@property
def ext(self):
"""
The file extension, for example '.py'.
"""
f, extn = os.path.splitext(_base(self))
return extn
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
r""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, '/' or 'C:\\'). The other items in
the list will be strings.
path.path.joinpath(*result) will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None, realpath=False):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, d.dirs('build-*').
"""
if realpath:
return [p.realpath() for p in self.listdir(pattern) if p.isdir()]
else:
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walkfiles).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
d.files('*.pyc').
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict', regex=None ):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception, exc:
_handleException(exc,errors,
PathWalkWarning("Unable to list directory '%s': %%(exc)s" % self ))
else:
for child in childList:
if ( pattern is None or child.fnmatch(pattern) ) and ( regex is None or re.match( regex, child.name ) ):
yield child
try:
isdir = child.isdir()
except Exception, exc:
_handleException(exc,errors,PathWalkWarning("Unable to access '%s': %%(exc)s" % self))
if isdir:
for item in child.walk(pattern=pattern, errors=errors, regex=regex):
yield item
def walkdirs(self, pattern=None, errors='strict', realpath=False, regex=None):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, mydir.walkdirs('*test') yields only directories
with names ending in 'test'.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs(realpath=realpath)
except Exception, exc:
_handleException(exc,errors,PathWalkWarning("Unable to list directory '%s': %%(exc)s" % self))
else:
parent_realpath = None
for child in dirs:
if ( pattern is None or child.fnmatch(pattern) ) and ( regex is None or re.match( regex, child.name ) ):
if child.islink():
if parent_realpath is None:
parent_realpath = self.realpath()
if realpath:
child_realpath = child
else:
child_realpath = child.realpath()
# check for infinite recursion
if child_realpath == parent_realpath or parent_realpath.startswith( child_realpath + os.path.sep ):
#print "skipping %s to prevent infinite recursion" % child
continue
else:
yield child
else:
yield child
for subsubdir in child.walkdirs(pattern, errors=errors, realpath=realpath, regex=regex):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict', regex=None):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, pattern, limits the results to files
with names that match the pattern. For example,
mydir.walkfiles('*.tmp') yields only files with the .tmp
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception, exc:
_handleException(exc,errors,PathWalkWarning("Unable to list directory '%s': %%(exc)s" % self))
else:
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except Exception, exc:
_handleException(exc,errors,PathWalkWarning("Unable to access '%s': %%(exc)s" % self) )
else:
if isfile:
if ( pattern is None or child.fnmatch(pattern) ) and ( regex is None or re.match( regex, child.name ) ):
yield child
elif isdir:
for f in child.walkfiles(pattern=pattern, errors=errors, regex=regex):
yield f
def fnmatch(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example '*.py'.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self):
""" Calculate the md5 hash for this file.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = None
try:
# fix for python 2.5 - module md5 is deprecated and now part of new hashlib
import hashlib
m = hashlib.md5()
except ImportError:
import md5
m = md5.new()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isdir = os.path.isdir
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
def samepath(self, otherpath):
"""Whether the other path represents the same path as this one.
This will account for symbolic links, absolute/relative paths,
case differences (if on a case-insensitive file system), and '..'
usage (so paths such as A//B, A/./B and A/foo/../B will all compare equal).
This will NOT account for hard links - use 'samefile' for this, if
available on your os.
Essentially just compares the self.canonicalpath() to other.canonicalpath()
"""
return self.canonicalpath() == self.__class__(otherpath).canonicalpath()
def canonicalpath(self):
"""Attempt to return a 'canonical' version of the path
This will standardize for symbolic links, absolute/relative paths,
case differences (if on a case-insensitive file system), and '..'
usage (so paths such as A//B, A/./B and A/foo/../B will all compare equal).
The intention is that string comparison of canonical paths will yield
a reasonable guess as to whether two paths represent the same file.
"""
return self.__class__(self.abspath().realpath().normpath().normcase())
def truepath(self):
"""The absolute, real, normalized path.
Shortcut for .abspath().realpath().normpath()
Unlike canonicalpath, on case-sensitive filesystems, two different paths
may refer the same file, and so should only be used in cases where a
"normal" path from root is desired, but we wish to preserve case; in
situations where comparison is desired, canonicalpath (or samepath)
should be used.
"""
return self.__class__(self.abspath().realpath().normpath())
getatime = os.path.getatime
atime = property(
getatime, None, None,
""" Last access time of the file. """)
getmtime = os.path.getmtime
mtime = property(
getmtime, None, None,
""" Last-modified time of the file. """)
if hasattr(os.path, 'getctime'):
getctime = os.path.getctime
ctime = property(
getctime, None, None,
""" Creation time of the file. """)
getsize = os.path.getsize
size = property(
getsize, None, None,
""" Size of the file, in bytes. """)
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def get_owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
owner = property(
get_owner, None, None,
""" Name of the owner of this file or directory. """)
if grp:
def get_groupname(self):
"""get the group name for this file"""
return grp.getgrgid(self.stat().st_gid).gr_name
groupname = property( get_groupname )
def chgrp(self, group):
if isinstance(group, basestring):
group = grp.getgrnam(group).gr_gid
os.chown( self, -1, group )
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
def chmod(self, mode):
os.chmod(self, mode)
if hasattr(os, 'chown'):
def chown(self, uid, gid):
os.chown(self, uid, gid)
def rename(self, new):
os.rename(self, new)
def renames(self, new):
os.renames(self, new)
# --- Create/delete operations on directories
def mkdir(self, mode=0777):
os.mkdir(self, mode)
def makedirs(self, mode=0777):
os.makedirs(self, mode)
def rmdir(self):
os.rmdir(self)
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
def remove(self):
os.remove(self)
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
if hasattr(shutil, 'move'):
move = shutil.move
rmtree = shutil.rmtree
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
# migrating to PEP8 compliance
Path = path
|
|
"""Copyright 2021 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import logging
import os
import sys
import time
from typing import Any, Dict
import yaml
from oc_config_validate import (context, formatter, runner, schema, target,
testbase)
__version__ = "2.0.0"
LOGGING_FORMAT = "%(levelname)s(%(filename)s:%(lineno)d):%(message)s"
def createArgsParser() -> argparse.ArgumentParser:
"""Create parser for arguments passed into the program from the CLI.
Returns:
argparse.ArgumentParser object.
"""
parser = argparse.ArgumentParser(
description="OpenConfig Configuration Validation utility.")
parser.add_argument(
"-tgt",
"--target",
type=str,
help="The gNMI Target, as hostname:port.",
)
parser.add_argument(
"-user",
"--username",
type=str,
help="Username to use when establishing a gNMI Channel to the Target.",
)
parser.add_argument(
"-pass",
"--password",
type=str,
help="Password to use when establishing a gNMI Channel to the Target.",
)
parser.add_argument(
"-key",
"--private_key",
type=str,
help="Path to the Private key to use when establishing"
"a gNMI Channel to the Target.",
)
parser.add_argument(
"-ca",
"--root_ca_cert",
type=str,
help="Path to Root CA to use when building the gNMI Channel.",
)
parser.add_argument(
"-cert",
"--cert_chain",
type=str,
help="Path to Certificate chain to use when"
"establishing a gNMI Channel to the Target.")
parser.add_argument(
"-tests",
"--tests_file",
type=str,
action="store",
help="YAML file to read the test to run.")
parser.add_argument(
"-init",
"--init_config_file",
type=str,
action="store",
help="JSON file with the initial full OpenConfig configuration to "
"apply.")
parser.add_argument(
"-xpath",
"--init_config_xpath",
type=str,
action="store",
help="gNMI xpath where to apply the initial config.",
default="/")
parser.add_argument(
"-results",
"--results_file",
type=str,
action="store",
help="Filename where to write the test results.")
parser.add_argument(
"-f",
"--format",
type=str,
action="store",
help="Format "
"of the GetResponse to be printed. Default=JSON.",
choices=["json", "protobuff"],
default="json")
parser.add_argument(
"-v", "--version", help="Print program version", action="store_true")
parser.add_argument(
"-V", "--verbose", help="Enable gRPC debugging and extra logging.",
action="store_true")
parser.add_argument(
"-models", "--oc_models_versions", help="Print OC models versions.",
action="store_true")
parser.add_argument(
"--no_tls", help="gRPC insecure mode.", action="store_true")
parser.add_argument(
"-o",
"--tls_host_override",
type=str,
action="store",
help="Hostname to use during the TLS certificate check.",
)
parser.add_argument(
"-set_cooldown",
"--gnmi_set_cooldown_secs",
type=int,
action="store",
help="Seconds to wait after a successful gNMI Set message.",
)
parser.add_argument(
"--stop_on_error",
action="store_true",
help="Stop the execution if a test fails.",
)
parser.add_argument(
"--log_gnmi",
action="store_true",
help="Log the gnmi requests to the tests results.",
)
return parser
def validateArgs(args: Dict[str, Any]):
"""Returns True if the arguments are valid.
Raises:
ValueError if any argument is invalid.
IOError is unable to open a file given in argument.
"""
def isFileOK(filename: str, writable: bool = False):
try:
file = open(filename, "w+" if writable else "r", encoding="utf8")
file.close()
except IOError as io_error:
logging.error("Unable to open %s: %s", filename, io_error)
raise
# Mandatory args for tests
for arg, write in [("tests_file", False), ("results_file", True)]:
if not args[arg]:
raise ValueError("Needed --%s file" % arg)
isFileOK(args[arg], write)
if args["init_config_file"]:
isFileOK(args["init_config_file"], False)
# Output format supported
if (args["format"] and
args["format"].lower() not in formatter.SUPPORTED_FORMATS):
raise ValueError("Output format %s is not supported.")
def main(): # noqa
"""Executes this library."""
argparser = createArgsParser()
args = vars(argparser.parse_args())
if args["version"]:
print(__version__)
sys.exit()
if args["oc_models_versions"]:
print(schema.getOcModelsVersions())
sys.exit()
if args["verbose"]:
# os.environ["GRPC_TRACE"] = "all"
os.environ["GRPC_VERBOSITY"] = "DEBUG"
logging.basicConfig(
level=logging.DEBUG if args["verbose"] else logging.INFO,
format=LOGGING_FORMAT)
try:
validateArgs(args)
except (IOError, ValueError) as error:
sys.exit("Invalid arguments: %s" % error)
if args["log_gnmi"]:
testbase.LOG_GNMI = args["log_gnmi"]
try:
ctx = context.fromFile(args["tests_file"])
except IOError as io_error:
sys.exit("Unable to read %s: %s" % (args["tests_file"], io_error))
except yaml.YAMLError as yaml_error:
sys.exit("Unable to parse YAML file %s: %s" % (args["tests_file"],
yaml_error))
logging.info("Read tests file '%s': %d tests to run",
args["tests_file"], len(ctx.tests))
if not ctx.target:
ctx.target = context.Target()
# Override Target options
for arg in ["target", "username", "password", "no_tls", "private_key",
"cert_chain", "root_ca_cert", "tls_host_override",
"gnmi_set_cooldown_secs"]:
if args[arg]:
setattr(ctx.target, arg, args[arg])
tgt = target.TestTarget(ctx.target)
try:
tgt.validate()
except ValueError as error:
sys.exit("Invalid Target: %s" % error)
logging.info("Testing gNMI Target %s.", tgt)
if tgt.gnmi_set_cooldown_secs:
logging.info("Using gNMI Set Cooldown of %d secs",
tgt.gnmi_set_cooldown_secs)
# Apply initial configuration
if args["init_config_file"]:
ctx.init_configs.append(context.InitConfig(args["init_config_file"],
args["init_config_xpath"]))
if not runner.setInitConfigs(ctx, tgt,
stop_on_error=args["stop_on_error"]):
sys.exit(1)
start_t = time.time()
results = runner.runTests(ctx, tgt, stop_on_error=args["stop_on_error"])
end_t = time.time()
test_run = testbase.TestRun(ctx)
test_run.copyResults(results, start_t, end_t)
logging.info("Results Summary: %s", test_run.summary())
try:
fmtr = formatter.makeFormatter(args["format"])
fmtr.writeResultsToFile(test_run, args["results_file"])
logging.info("Test results written to %s", args["results_file"])
except IOError as io_error:
logging.exception("Unable to write file %s: %s", args["results_file"],
io_error)
except TypeError as type_error:
logging.exception("Unable to parse results into a JSON text: %s",
type_error)
if __name__ == "__main__":
main()
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for distributed_epoch processing mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.service import test_base as data_service_test_base
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class DistributedEpochTest(data_service_test_base.TestBase,
parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testBasic(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
num_elements = 100
ds = dataset_ops.Dataset.range(num_elements)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(
ds, list(range(num_elements)), assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testTensorSlices(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
vals = [5, 1, 2, 4]
ds = dataset_ops.Dataset.from_tensor_slices(vals)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(ds, vals, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testInterleave(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
elements = [1, 5, 0]
ds = dataset_ops.Dataset.from_tensor_slices(elements)
ds = ds.interleave(lambda x: dataset_ops.Dataset.from_tensor_slices([x]))
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(ds, elements, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testParallelInterleave(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
elements = [1, 5, 0]
ds = dataset_ops.Dataset.from_tensor_slices(elements)
ds = ds.interleave(
lambda x: dataset_ops.Dataset.from_tensor_slices([x]),
num_parallel_calls=dataset_ops.AUTOTUNE)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(ds, elements, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testFlatMap(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
elements = [1, 5, 0]
ds = dataset_ops.Dataset.from_tensor_slices(elements)
ds = ds.flat_map(lambda x: dataset_ops.Dataset.from_tensor_slices([x]))
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(ds, elements, assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testRepeat(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
num_repeats = 5
num_elements = 20
ds = dataset_ops.Dataset.range(num_elements).repeat(num_repeats)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(
ds, num_repeats * list(range(num_elements)), assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testForeverRepeat(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
num_elements = 20
elements_to_read = 1000
ds = dataset_ops.Dataset.range(num_elements).repeat()
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
get_next = self.getNext(ds)
results = {}
for _ in range(elements_to_read):
val = self.evaluate(get_next())
if val not in results:
results[val] = 0
results[val] += 1
for i in range(num_elements):
self.assertGreater(results[i], elements_to_read / num_elements / 2)
@combinations.generate(test_base.default_test_combinations())
def testForeverRepeatFewElements(self):
num_workers = 5
cluster = data_service_test_base.TestCluster(num_workers=num_workers)
# Less than the number of workers, so that some workers get zero elements on
# the first repetition.
num_elements = 1
ds = dataset_ops.Dataset.range(num_elements).repeat()
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
get_next = self.getNext(ds)
for _ in range(20):
self.assertEqual(self.evaluate(get_next()), 0)
# Stop all but one worker and check that we can still read.
for i in range(num_workers - 1):
cluster.workers[i].stop()
for _ in range(20):
self.assertEqual(self.evaluate(get_next()), 0)
@combinations.generate(test_base.default_test_combinations())
def testShuffleAndRepeat(self):
cluster = data_service_test_base.TestCluster(num_workers=2)
num_repeats = 5
num_elements = 20
ds = dataset_ops.Dataset.range(num_elements).shuffle(num_elements).repeat(
num_repeats)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(
ds, num_repeats * list(range(num_elements)), assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testZip(self):
num_elements = 10
cluster = data_service_test_base.TestCluster(num_workers=1)
a = dataset_ops.Dataset.range(num_elements)
ds = dataset_ops.Dataset.zip((a, a))
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(
ds, list(zip(range(num_elements), range(num_elements))))
@combinations.generate(test_base.default_test_combinations())
def testNestedZip(self):
num_elements = 10
cluster = data_service_test_base.TestCluster(num_workers=1)
a = dataset_ops.Dataset.range(num_elements)
ds = dataset_ops.Dataset.zip((a, a))
ds = dataset_ops.Dataset.zip((a, a, ds, a))
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
b = list(range(10))
self.assertDatasetProduces(ds, list(zip(b, b, zip(b, b), b)))
@combinations.generate(test_base.default_test_combinations())
def testImbalancedZip(self):
smaller_num_elements = 200
larger_num_elements = 1000
cluster = data_service_test_base.TestCluster(num_workers=1)
a = dataset_ops.Dataset.range(smaller_num_elements)
b = dataset_ops.Dataset.range(larger_num_elements)
ds = dataset_ops.Dataset.zip((a, b))
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertDatasetProduces(
ds, list(zip(range(smaller_num_elements), range(smaller_num_elements))))
@combinations.generate(test_base.default_test_combinations())
def testImbalancedZipMultiWorker(self):
smaller_num_elements = 200
larger_num_elements = 1000
cluster = data_service_test_base.TestCluster(num_workers=3)
a = dataset_ops.Dataset.range(smaller_num_elements)
b = dataset_ops.Dataset.range(larger_num_elements)
ds = dataset_ops.Dataset.zip((a, b))
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
# Cannot assert specific elements because the range datasets are split
# nondeterministically and may not line up.
self.assertLen(self.getDatasetOutput(ds), smaller_num_elements)
@combinations.generate(test_base.default_test_combinations())
def testZipDifferentRates(self):
cluster = data_service_test_base.TestCluster(num_workers=3)
a = dataset_ops.Dataset.range(100)
b = dataset_ops.Dataset.range(100).filter(
lambda x: math_ops.equal(x % 10, 0))
ds = dataset_ops.Dataset.zip((a, b))
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertLen(self.getDatasetOutput(ds), 10)
@combinations.generate(test_base.default_test_combinations())
def testZipDifferentRepeats(self):
cluster = data_service_test_base.TestCluster(num_workers=3)
a = dataset_ops.Dataset.range(50)
b = dataset_ops.Dataset.range(10).repeat(10)
ds = dataset_ops.Dataset.zip((a, b))
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
self.assertLen(self.getDatasetOutput(ds), 50)
@combinations.generate(test_base.default_test_combinations())
def testSampleFromDatasets(self):
cluster = data_service_test_base.TestCluster(num_workers=3)
num_samples = 200
weights = [.6, .3, .1]
classes = len(weights)
# Create a dataset that samples each integer in `[0, num_datasets)`
# with probability given by `weights[i]`.
ds = interleave_ops.sample_from_datasets(
[dataset_ops.Dataset.from_tensors(i).repeat() for i in range(classes)],
weights)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
ds = ds.take(num_samples)
freqs = np.zeros([classes])
for v in self.getDatasetOutput(ds):
freqs[v] += 1
self.assertGreater(freqs[0], freqs[1])
self.assertGreater(freqs[1], freqs[2])
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_workers=[1, 3])))
def testChooseFromDatasets(self, num_workers):
cluster = data_service_test_base.TestCluster(num_workers=num_workers)
words = [b"foo", b"bar", b"baz"]
datasets = [dataset_ops.Dataset.from_tensors(w).repeat() for w in words]
choice_array = np.random.randint(3, size=(15,), dtype=np.int64)
choice_dataset = dataset_ops.Dataset.from_tensor_slices(choice_array)
ds = interleave_ops.choose_from_datasets(datasets, choice_dataset)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
expected = [words[i] for i in choice_array]
assert_items_equal = (num_workers > 1)
self.assertDatasetProduces(
ds, expected, assert_items_equal=assert_items_equal)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_workers=[1, 3])))
def testConcatenate(self, num_workers):
cluster = data_service_test_base.TestCluster(num_workers=num_workers)
a = dataset_ops.Dataset.range(100)
b = dataset_ops.Dataset.range(100, 200)
ds = a.concatenate(b)
ds = self.make_distributed_dataset(
ds, cluster, processing_mode="distributed_epoch")
assert_items_equal = (num_workers > 1)
self.assertDatasetProduces(
ds, list(range(200)), assert_items_equal=assert_items_equal)
@combinations.generate(test_base.default_test_combinations())
def testDistributedDataset(self):
cluster_1 = data_service_test_base.TestCluster(num_workers=1)
cluster_2 = data_service_test_base.TestCluster(num_workers=1)
num_sizes = 10
size_repeats = 5
numbers = [1 * i for i in range(num_sizes)] * size_repeats
ds = dataset_ops.Dataset.from_tensor_slices(numbers)
ds = self.make_distributed_dataset(
ds, cluster_1, processing_mode="parallel_epochs")
ds = ds.map(lambda x: x + 1)
ds = self.make_distributed_dataset(
ds, cluster_2, processing_mode="distributed_epoch")
error_regex = ("Cannot create split providers for dataset " +
"of type DataServiceDataset")
with self.assertRaisesRegex(errors.UnimplementedError, error_regex):
self.getDatasetOutput(ds)
if __name__ == "__main__":
test.main()
|
|
from itertools import combinations
import numpy as np
from pyparsing import alphas, Combine, Literal, Optional, nums, Word
from pgmpy.models import BayesianNetwork, MarkovNetwork
from pgmpy.factors.discrete import TabularCPD, DiscreteFactor
class UAIReader(object):
"""
Class for reading UAI file format from files or strings.
"""
def __init__(self, path=None, string=None):
"""
Initialize an instance of UAI reader class
Parameters
----------
path : file or str
Path of the file containing UAI information.
string : str
String containing UAI information.
Examples
--------
>>> from pgmpy.readwrite import UAIReader
>>> reader = UAIReader('TestUai.uai')
>>> model = reader.get_model()
Reference
---------
http://graphmod.ics.uci.edu/uai08/FileFormat
"""
if path:
with open(path, "r") as f:
self.network = f.read()
elif string:
self.network = string
else:
raise ValueError("Must specify either path or string.")
self.grammar = self.get_grammar()
self.network_type = self.get_network_type()
self.variables = self.get_variables()
self.domain = self.get_domain()
self.edges = self.get_edges()
self.tables = self.get_tables()
def get_grammar(self):
"""
Returns the grammar of the UAI file.
"""
network_name = Word(alphas).setResultsName("network_name")
no_variables = Word(nums).setResultsName("no_variables")
grammar = network_name + no_variables
self.no_variables = int(grammar.parseString(self.network)["no_variables"])
domain_variables = (Word(nums) * self.no_variables).setResultsName(
"domain_variables"
)
grammar += domain_variables
no_functions = Word(nums).setResultsName("no_functions")
grammar += no_functions
self.no_functions = int(grammar.parseString(self.network)["no_functions"])
integer = Word(nums).setParseAction(lambda t: int(t[0]))
for function in range(0, self.no_functions):
scope_grammar = Word(nums).setResultsName("fun_scope_" + str(function))
grammar += scope_grammar
function_scope = grammar.parseString(self.network)[
"fun_scope_" + str(function)
]
function_grammar = ((integer) * int(function_scope)).setResultsName(
"fun_" + str(function)
)
grammar += function_grammar
floatnumber = Combine(
Word(nums) + Optional(Literal(".") + Optional(Word(nums)))
)
for function in range(0, self.no_functions):
no_values_grammar = Word(nums).setResultsName(
"fun_no_values_" + str(function)
)
grammar += no_values_grammar
no_values = grammar.parseString(self.network)[
"fun_no_values_" + str(function)
]
values_grammar = ((floatnumber) * int(no_values)).setResultsName(
"fun_values_" + str(function)
)
grammar += values_grammar
return grammar
def get_network_type(self):
"""
Returns the type of network defined by the file.
Returns
-------
string : str
String containing network type.
Examples
--------
>>> from pgmpy.readwrite import UAIReader
>>> reader = UAIReader('TestUAI.uai')
>>> reader.get_network_type()
'MARKOV'
"""
network_type = self.grammar.parseString(self.network)
return network_type["network_name"]
def get_variables(self):
"""
Returns a list of variables.
Each variable is represented by an index of list.
For example if the no of variables are 4 then the list will be
[var_0, var_1, var_2, var_3]
Returns
-------
list: list of variables
Examples
--------
>>> from pgmpy.readwrite import UAIReader
>>> reader = UAIReader('TestUAI.uai')
>>> reader.get_variables()
['var_0', 'var_1', 'var_2']
"""
variables = []
for var in range(0, self.no_variables):
var_name = "var_" + str(var)
variables.append(var_name)
return variables
def get_domain(self):
"""
Returns the dictionary of variables with keys as variable name
and values as domain of the variables.
Returns
-------
dict: dictionary containing variables and their domains
Examples
--------
>>> from pgmpy.readwrite import UAIReader
>>> reader = UAIReader('TestUAI.uai')
>>> reader.get_domain()
{'var_0': '2', 'var_1': '2', 'var_2': '3'}
"""
domain = {}
var_domain = self.grammar.parseString(self.network)["domain_variables"]
for var in range(0, len(var_domain)):
domain["var_" + str(var)] = var_domain[var]
return domain
def get_edges(self):
"""
Returns the edges of the network.
Returns
-------
set: set containing the edges of the network
Examples
--------
>>> from pgmpy.readwrite import UAIReader
>>> reader = UAIReader('TestUAI.uai')
>>> reader.get_edges()
{('var_0', 'var_1'), ('var_0', 'var_2'), ('var_1', 'var_2')}
"""
edges = []
for function in range(0, self.no_functions):
function_variables = self.grammar.parseString(self.network)[
"fun_" + str(function)
]
if isinstance(function_variables, int):
function_variables = [function_variables]
if self.network_type == "BAYES":
child_var = "var_" + str(function_variables[-1])
function_variables = function_variables[:-1]
for var in function_variables:
edges.append((child_var, "var_" + str(var)))
elif self.network_type == "MARKOV":
function_variables = ["var_" + str(var) for var in function_variables]
edges.extend(list(combinations(function_variables, 2)))
return set(edges)
def get_tables(self):
"""
Returns list of tuple of child variable and CPD in case of Bayesian
and list of tuple of scope of variables and values in case of Markov.
Returns
-------
list : list of tuples of child variable and values in Bayesian
list of tuples of scope of variables and values in case of Markov.
Examples
--------
>>> from pgmpy.readwrite import UAIReader
>>> reader = UAIReader('TestUAI.uai')
>>> reader.get_tables()
[(['var_0', 'var_1'], ['4.000', '2.400', '1.000', '0.000']),
(['var_0', 'var_1', 'var_2'],
['2.2500', '3.2500', '3.7500', '0.0000', '0.0000', '10.0000',
'1.8750', '4.0000', '3.3330', '2.0000', '2.0000', '3.4000'])]
"""
tables = []
for function in range(0, self.no_functions):
function_variables = self.grammar.parseString(self.network)[
"fun_" + str(function)
]
if isinstance(function_variables, int):
function_variables = [function_variables]
if self.network_type == "BAYES":
child_var = "var_" + str(function_variables[-1])
values = self.grammar.parseString(self.network)[
"fun_values_" + str(function)
]
tables.append((child_var, list(values)))
elif self.network_type == "MARKOV":
function_variables = ["var_" + str(var) for var in function_variables]
values = self.grammar.parseString(self.network)[
"fun_values_" + str(function)
]
tables.append((function_variables, list(values)))
return tables
def get_model(self):
"""
Returns an instance of Bayesian Model or Markov Model.
Variables are in the pattern var_0, var_1, var_2 where var_0 is
0th index variable, var_1 is 1st index variable.
Return
------
model: an instance of Bayesian or Markov Model.
Examples
--------
>>> from pgmpy.readwrite import UAIReader
>>> reader = UAIReader('TestUAI.uai')
>>> reader.get_model()
"""
if self.network_type == "BAYES":
model = BayesianNetwork()
model.add_nodes_from(self.variables)
model.add_edges_from(self.edges)
tabular_cpds = []
for child_var, values in self.tables:
states = int(self.domain[child_var])
values = np.fromiter(values, dtype=float)
values = values.reshape(states, values.size // states)
tabular_cpds.append(TabularCPD(child_var, states, values))
model.add_cpds(*tabular_cpds)
return model
elif self.network_type == "MARKOV":
model = MarkovNetwork(self.edges)
factors = []
for table in self.tables:
variables = table[0]
cardinality = [int(self.domain[var]) for var in variables]
value = list(map(float, table[1]))
factor = DiscreteFactor(
variables=variables, cardinality=cardinality, values=value
)
factors.append(factor)
model.add_factors(*factors)
return model
class UAIWriter(object):
"""
Class for writing models in UAI.
"""
def __init__(self, model):
"""
Initialize an instance of UAI writer class
Parameters
----------
model: A Bayesian or Markov model
The model to write
Examples
--------
>>> from pgmpy.readwrite import UAIWriter
>>> from pgmpy.utils import get_example_model
>>> model = get_example_model('asia')
>>> writer = UAIWriter(asia)
>>> writer.write_uai('asia.uai')
"""
if isinstance(model, BayesianNetwork):
self.network = "BAYES\n"
elif isinstance(model, MarkovNetwork):
self.network = "MARKOV\n"
else:
raise TypeError("Model must be an instance of Bayesian or Markov model.")
self.model = model
self.no_nodes = self.get_nodes()
self.domain = self.get_domain()
self.functions = self.get_functions()
self.tables = self.get_tables()
def __str__(self):
"""
Returns the UAI file as a string.
"""
self.network += self.no_nodes + "\n"
domain = sorted(self.domain.items(), key=lambda x: (x[1], x[0]))
self.network += " ".join([var[1] for var in domain]) + "\n"
self.network += str(len(self.functions)) + "\n"
for fun in self.functions:
self.network += str(len(fun)) + " "
self.network += " ".join(fun) + "\n"
self.network += "\n"
for table in self.tables:
self.network += str(len(table)) + "\n"
self.network += " ".join(table) + "\n"
return self.network[:-1]
def get_nodes(self):
"""
Adds variables to the network.
Examples
--------
>>> from pgmpy.readwrite import UAIWriter
>>> writer = UAIWriter(model)
>>> writer.get_nodes()
"""
no_nodes = len(self.model.nodes())
return str(no_nodes)
def get_domain(self):
"""
Adds domain of each variable to the network.
Examples
--------
>>> from pgmpy.readwrite import UAIWriter
>>> writer = UAIWriter(model)
>>> writer.get_domain()
"""
if isinstance(self.model, BayesianNetwork):
cpds = self.model.get_cpds()
cpds.sort(key=lambda x: x.variable)
domain = {}
for cpd in cpds:
domain[cpd.variable] = str(cpd.variable_card)
return domain
elif isinstance(self.model, MarkovNetwork):
factors = self.model.get_factors()
domain = {}
for factor in factors:
variables = factor.variables
for var in variables:
if var not in domain:
domain[var] = str(factor.get_cardinality([var])[var])
return domain
else:
raise TypeError("Model must be an instance of Markov or Bayesian model.")
def get_functions(self):
"""
Adds functions to the network.
Examples
-------_
>>> from pgmpy.readwrite import UAIWriter
>>> writer = UAIWriter(model)
>>> writer.get_functions()
"""
if isinstance(self.model, BayesianNetwork):
cpds = self.model.get_cpds()
cpds.sort(key=lambda x: x.variable)
variables = sorted(self.domain.items(), key=lambda x: (x[1], x[0]))
functions = []
for cpd in cpds:
child_var = cpd.variable
evidence = cpd.variables[:0:-1]
function = [
str(variables.index((var, self.domain[var]))) for var in evidence
]
function.append(
str(variables.index((child_var, self.domain[child_var])))
)
functions.append(function)
return functions
elif isinstance(self.model, MarkovNetwork):
factors = self.model.get_factors()
functions = []
variables = sorted(self.domain.items(), key=lambda x: (x[1], x[0]))
for factor in factors:
scope = factor.scope()
function = [
str(variables.index((var, self.domain[var]))) for var in scope
]
functions.append(function)
return functions
else:
raise TypeError("Model must be an instance of Markov or Bayesian model.")
def get_tables(self):
"""
Adds tables to the network.
Examples
--------
>>> from pgmpy.readwrite import UAIWriter
>>> writer = UAIWriter(model)
>>> writer.get_tables()
"""
if isinstance(self.model, BayesianNetwork):
cpds = self.model.get_cpds()
cpds.sort(key=lambda x: x.variable)
tables = []
for cpd in cpds:
values = list(map(str, cpd.values.ravel()))
tables.append(values)
return tables
elif isinstance(self.model, MarkovNetwork):
factors = self.model.get_factors()
tables = []
for factor in factors:
values = list(map(str, factor.values.ravel()))
tables.append(values)
return tables
else:
raise TypeError("Model must be an instance of Markov or Bayesian model.")
def write_uai(self, filename):
"""
Write the xml data into the file.
Parameters
----------
filename: Name of the file.
Examples
--------
>>> from pgmpy.readwrite import UAIWriter
>>> from pgmpy.utils import get_example_model
>>> model = get_example_model('asia')
>>> writer = UAIWriter(asia)
>>> writer.write_uai('asia.uai')
"""
writer = self.__str__()
with open(filename, "w") as fout:
fout.write(writer)
|
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import os
import sys
import time
from . import junit_output
ABS_PATH_PREFIX = os.getcwd() + os.sep
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class ProgressIndicator(object):
def __init__(self):
self.runner = None
def Starting(self):
pass
def Done(self):
pass
def AboutToRun(self, test):
pass
def HasRun(self, test, has_unexpected_output):
pass
def PrintFailureHeader(self, test):
if test.suite.IsNegativeTest(test):
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
class SimpleProgressIndicator(ProgressIndicator):
"""Abstract base class for {Verbose,Dots}ProgressIndicator"""
def Starting(self):
print 'Running %i tests' % self.runner.total
def Done(self):
print
for failed in self.runner.failed:
self.PrintFailureHeader(failed)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(self.runner.GetCommand(failed))
if failed.output.HasCrashed():
print "exit code: %d" % failed.output.exit_code
print "--- CRASHED ---"
if failed.output.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.runner.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.runner.failed)
if self.runner.crashed > 0:
print "=== %i tests CRASHED" % self.runner.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, test):
print 'Starting %s...' % test.GetLabel()
sys.stdout.flush()
def HasRun(self, test, has_unexpected_output):
if has_unexpected_output:
if test.output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def HasRun(self, test, has_unexpected_output):
total = self.runner.succeeded + len(self.runner.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if has_unexpected_output:
if test.output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif test.output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class CompactProgressIndicator(ProgressIndicator):
"""Abstract base class for {Color,Monochrome}ProgressIndicator"""
def __init__(self, templates):
super(CompactProgressIndicator, self).__init__()
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Done(self):
self.PrintProgress('Done')
print "" # Line break.
def AboutToRun(self, test):
self.PrintProgress(test.GetLabel())
def HasRun(self, test, has_unexpected_output):
if has_unexpected_output:
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(test)
stdout = test.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = test.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
if test.output.HasCrashed():
print "exit code: %d" % test.output.exit_code
print "--- CRASHED ---"
if test.output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, string, length):
if length and (len(string) > (length - 3)):
return string[:(length - 3)] + "..."
else:
return string
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.runner.succeeded,
'remaining': (((self.runner.total - self.runner.remaining) * 100) //
self.runner.total),
'failed': len(self.runner.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|"
"\033[34m%%%(remaining) 4d\033[0m|"
"\033[32m+%(passed) 4d\033[0m|"
"\033[31m-%(failed) 4d\033[0m]: %(test)s"),
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|%%%(remaining) 4d|"
"+%(passed) 4d|-%(failed) 4d]: %(test)s"),
'stdout': '%s',
'stderr': '%s',
}
super(MonochromeProgressIndicator, self).__init__(templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
class JUnitTestProgressIndicator(ProgressIndicator):
def __init__(self, progress_indicator, junitout, junittestsuite):
self.progress_indicator = progress_indicator
self.outputter = junit_output.JUnitTestOutput(junittestsuite)
if junitout:
self.outfile = open(junitout, "w")
else:
self.outfile = sys.stdout
def Starting(self):
self.progress_indicator.runner = self.runner
self.progress_indicator.Starting()
def Done(self):
self.progress_indicator.Done()
self.outputter.FinishAndWrite(self.outfile)
if self.outfile != sys.stdout:
self.outfile.close()
def AboutToRun(self, test):
self.progress_indicator.AboutToRun(test)
def HasRun(self, test, has_unexpected_output):
self.progress_indicator.HasRun(test, has_unexpected_output)
fail_text = ""
if has_unexpected_output:
stdout = test.output.stdout.strip()
if len(stdout):
fail_text += "stdout:\n%s\n" % stdout
stderr = test.output.stderr.strip()
if len(stderr):
fail_text += "stderr:\n%s\n" % stderr
fail_text += "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
if test.output.HasCrashed():
fail_text += "exit code: %d\n--- CRASHED ---" % test.output.exit_code
if test.output.HasTimedOut():
fail_text += "--- TIMEOUT ---"
self.outputter.HasRunTest(
[test.GetLabel()] + self.runner.context.mode_flags + test.flags,
test.duration,
fail_text)
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, progress_indicator, json_test_results, arch, mode):
self.progress_indicator = progress_indicator
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
self.results = []
def Starting(self):
self.progress_indicator.runner = self.runner
self.progress_indicator.Starting()
def Done(self):
self.progress_indicator.Done()
complete_results = []
if os.path.exists(self.json_test_results):
with open(self.json_test_results, "r") as f:
# Buildbot might start out with an empty file.
complete_results = json.loads(f.read() or "[]")
complete_results.append({
"arch": self.arch,
"mode": self.mode,
"results": self.results,
})
with open(self.json_test_results, "w") as f:
f.write(json.dumps(complete_results))
def AboutToRun(self, test):
self.progress_indicator.AboutToRun(test)
def HasRun(self, test, has_unexpected_output):
self.progress_indicator.HasRun(test, has_unexpected_output)
if not has_unexpected_output:
# Omit tests that run as expected. Passing tests of reruns after failures
# will have unexpected_output to be reported here has well.
return
self.results.append({
"name": test.GetLabel(),
"flags": test.flags,
"command": EscapeCommand(self.runner.GetCommand(test)).replace(
ABS_PATH_PREFIX, ""),
"run": test.run,
"stdout": test.output.stdout,
"stderr": test.output.stderr,
"exit_code": test.output.exit_code,
"result": test.suite.GetOutcome(test),
"expected": list(test.outcomes or ["PASS"]),
})
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'mono': MonochromeProgressIndicator
}
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, division
import warnings
from psd_tools.utils import be_array_from_bytes
from psd_tools.constants import Compression, ChannelID, ColorMode, ImageResourceID
from psd_tools import icc_profiles
try:
from PIL import Image
if hasattr(Image, 'frombytes'):
frombytes = Image.frombytes
else:
frombytes = Image.fromstring # PIL and older Pillow versions
except ImportError:
Image = None
try:
from PIL import ImageCms
except ImportError:
ImageCms = None
def tobytes(image):
# Some versions of PIL are missing the tobytes alias for tostring
if hasattr(image, 'tobytes'):
return image.tobytes()
else:
return image.tostring()
def extract_layer_image(decoded_data, layer_index):
"""
Converts a layer from the ``decoded_data`` to a PIL image.
"""
if Image is None:
raise Exception("This module requires PIL (or Pillow) installed.")
layers = decoded_data.layer_and_mask_data.layers
layer = layers.layer_records[layer_index]
return _channel_data_to_PIL(
channel_data = layers.channel_image_data[layer_index],
channel_ids = _get_layer_channel_ids(layer),
color_mode = decoded_data.header.color_mode, # XXX?
size = (layer.width(), layer.height()),
depth = decoded_data.header.depth,
icc_profile = get_icc_profile(decoded_data)
)
def extract_composite_image(decoded_data):
"""
Converts a composite (merged) image from the ``decoded_data``
to a PIL image.
"""
if Image is None:
raise Exception("This module requires PIL (or Pillow) installed.")
header = decoded_data.header
size = header.width, header.height
if size == (0, 0):
return
channel_ids = _get_header_channel_ids(header)
if channel_ids is None:
warnings.warn(
"This number of channels (%d) is unsupported for this color mode (%s)" % (
header.number_of_channels, header.color_mode
))
return
return _channel_data_to_PIL(
channel_data = decoded_data.image_data,
channel_ids = channel_ids,
color_mode = header.color_mode,
size = size,
depth = header.depth,
icc_profile = get_icc_profile(decoded_data)
)
def get_icc_profile(decoded_data):
"""
Return ICC image profile if it exists and was correctly decoded.
"""
# fixme: move this function somewhere?
icc_profiles = [res.data for res in decoded_data.image_resource_blocks
if res.resource_id == ImageResourceID.ICC_PROFILE]
if not icc_profiles:
return None
icc_profile = icc_profiles[0]
if isinstance(icc_profile, bytes):
# profile was not decoded
return None
return icc_profile
def apply_opacity(im, opacity):
""" Apply opacity to an image. """
if opacity == 255:
return im
if im.mode == 'RGB':
im.putalpha(opacity)
return im
elif im.mode in ('RGBA', 'LA'):
alpha_index = len(im.mode) - 1
a = im.split()[alpha_index]
opacity_scale = opacity / 255
a = a.point(lambda i: i * opacity_scale)
im.putalpha(a)
return im
else:
raise NotImplementedError()
def try_remove_alpha(im):
""" Removes an alpha channel if the image is fully opaque. """
if im.mode == 'RGBA':
alpha_channel_data = im.getdata(3)
elif im.mode == 'LA':
alpha_channel_data = im.getdata(1)
else:
raise NotImplementedError()
for value in alpha_channel_data:
if value < 255:
return im
return im.convert(im.mode[:-1])
def _channel_data_to_PIL(channel_data, channel_ids, color_mode, size, depth, icc_profile):
bands = _get_band_images(
channel_data = channel_data,
channel_ids = channel_ids,
color_mode = color_mode,
size = size,
depth = depth
)
return _merge_bands(bands, color_mode, size, icc_profile)
def _merge_bands(bands, color_mode, size, icc_profile):
if color_mode == ColorMode.RGB:
merged_image = Image.merge('RGB', [bands[key] for key in 'RGB'])
elif color_mode == ColorMode.CMYK:
merged_image = Image.merge('CMYK', [bands[key] for key in 'CMYK'])
merged_bytes = tobytes(merged_image)
# colors are inverted in Photoshop CMYK images; invert them back
merged_image = frombytes('CMYK', size, merged_bytes, 'raw', 'CMYK;I')
elif color_mode == ColorMode.GRAYSCALE:
merged_image = bands['L']
else:
raise NotImplementedError()
if icc_profile is not None:
assert ImageCms is not None
try:
if color_mode in (ColorMode.RGB, ColorMode.CMYK):
merged_image = ImageCms.profileToProfile(merged_image, icc_profile, icc_profiles.sRGB, outputMode='RGB')
elif color_mode == ColorMode.GRAYSCALE:
ImageCms.profileToProfile(merged_image, icc_profile, icc_profiles.gray, inPlace=True, outputMode='L')
except ImageCms.PyCMSError as e:
# PIL/Pillow/(old littlecms?) can't convert some ICC profiles
warnings.warn(repr(e))
if color_mode == ColorMode.CMYK:
merged_image = merged_image.convert('RGB')
alpha = bands.get('A')
if alpha:
merged_image.putalpha(alpha)
return merged_image
def _get_band_images(channel_data, channel_ids, color_mode, size, depth):
bands = {}
for channel, channel_id in zip(channel_data, channel_ids):
pil_band = _channel_id_to_PIL(channel_id, color_mode)
if pil_band is None:
warnings.warn("Unsupported channel type (%d)" % channel_id)
continue
if channel.compression in (Compression.RAW, Compression.ZIP, Compression.ZIP_WITH_PREDICTION):
if depth == 8:
im = _from_8bit_raw(channel.data, size)
elif depth == 16:
im = _from_16bit_raw(channel.data, size)
elif depth == 32:
im = _from_32bit_raw(channel.data, size)
else:
warnings.warn("Unsupported depth (%s)" % depth)
continue
elif channel.compression == Compression.PACK_BITS:
if depth != 8:
warnings.warn("Depth %s is unsupported for PackBits compression" % depth)
continue
im = frombytes('L', size, channel.data, "packbits", 'L')
else:
if Compression.is_known(channel.compression):
warnings.warn("Compression method is not implemented (%s)" % channel.compression)
else:
warnings.warn("Unknown compression method (%s)" % channel.compression)
continue
bands[pil_band] = im
return bands
def _from_8bit_raw(data, size):
return frombytes('L', size, data, "raw", 'L')
def _from_16bit_raw(data, size):
im = frombytes('I', size, data, "raw", 'I;16B')
return im.point(lambda i: i * (1/(256.0))).convert('L')
def _from_32bit_raw(data, size):
pixels = be_array_from_bytes("f", data)
im = Image.new("F", size)
im.putdata(pixels, 255, 0)
return im.convert('L')
def _channel_id_to_PIL(channel_id, color_mode):
if ChannelID.is_known(channel_id):
if channel_id == ChannelID.TRANSPARENCY_MASK:
return 'A'
warnings.warn("Channel %s (%s) is not handled" % (channel_id, ChannelID.name_of(channel_id)))
return None
try:
assert channel_id >= 0
if color_mode == ColorMode.RGB:
return 'RGB'[channel_id]
elif color_mode == ColorMode.CMYK:
return 'CMYK'[channel_id]
elif color_mode == ColorMode.GRAYSCALE:
return 'L'[channel_id]
except IndexError:
# spot channel
warnings.warn("Spot channel %s is not handled" % channel_id)
return None
def _get_header_channel_ids(header):
if header.color_mode == ColorMode.RGB:
if header.number_of_channels == 3:
return (0, 1, 2)
elif header.number_of_channels == 4:
return (0, 1, 2, ChannelID.TRANSPARENCY_MASK)
elif header.color_mode == ColorMode.CMYK:
if header.number_of_channels == 4:
return (0, 1, 2, 3)
elif header.number_of_channels == 5:
# XXX: how to distinguish
# "4 CMYK + 1 alpha" and "4 CMYK + 1 spot"?
return (0, 1, 2, 3, ChannelID.TRANSPARENCY_MASK)
elif header.color_mode == ColorMode.GRAYSCALE:
if header.number_of_channels == 1:
return (0,)
elif header.number_of_channels == 2:
return (0, ChannelID.TRANSPARENCY_MASK)
else:
warnings.warn("Unsupported color mode (%s)" % header.color_mode)
def _get_layer_channel_ids(layer):
return [info.id for info in layer.channels]
|
|
# Copyright (c) 2016 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``parser.py``
`Suricata rules parsing support`
"""
import itertools
from collections import namedtuple
from pygments import lexer
from pygments import token
class TreeNodeBase(object):
"""
"""
def __init__(self):
super(TreeNodeBase, self).__init__()
def is_root(self):
pass
def is_leaf(self):
pass
def iter_nodes(self):
pass
_TN_NO_PARENT = TreeNodeBase()
class TreeNode(TreeNodeBase):
TN_NO_PARENT = _TN_NO_PARENT
def __init__(self, parent=_TN_NO_PARENT, data=None):
super(TreeNode, self).__init__()
if not parent:
parent = _TN_NO_PARENT
self.parent = parent
self.nodes = {}
self.data = data
def is_root(self):
return self is self.TN_NO_PARENT
def is_leaf(self):
return bool(self.nodes)
def iter_nodes(self):
return iter(self.nodes)
class AST_Node(TreeNode):
def is_leaf(self):
return AST_Node_Pred.NP_IS_LEAF(self)
class AST_Node_Pred(object):
"""AST Node predicates helper.
"""
@classmethod
def NP_IS_TERMINAL(cls, node):
return isinstance(node, AST_T_Node)
@classmethod
def NP_IS_NONTERMINAL(cls, node):
return isinstance(node, AST_N_Node)
@classmethod
def NP_GEN_DATA(cls, data_predicate):
def wrapper(node):
return data_predicate(node.data)
@classmethod
def NP_IS_LEAF(cls, node):
return cls.NP_IS_TERMINAL(node) or cls.NP_IS_NONTERMINAL(node) and not node.data
class AST_T_Node(AST_Node):
"""AST Terminal - leaf nodes.
"""
def is_leaf(self):
return True
def __str__(self):
return 'Node T: T: {}'.format(self.data)
class AST_N_Node(AST_Node):
"""AST Nonterminal nodes.
"""
@classmethod
def from_products(cls, products, parent=TreeNode.TN_NO_PARENT, is_terminal=None):
_data = [AST_T_Node(parent=parent, data=p) if is_terminal(p)
else AST_N_Node(p, parent=parent)
for p in products]
return _data
def __init__(self, n, parent=TreeNode.TN_NO_PARENT, data=None):
super(AST_N_Node, self).__init__(parent=parent, data=data)
self.n = n
def __iter__(self):
return AST_Node_Iterator(self)
def get_nodes(self, predicate=None, depth=None, add_self=False):
"""
"""
nodes = []
if 0 == depth:
pass
else:
kwargs = {
'predicate': predicate,
'add_self': add_self,
}
if depth is not None:
assert depth > 0
kwargs['depth'] = depth - 1
nodes = []
for node in self.data:
if isinstance(node, AST_N_Node):
nodes.extend(node.get_nodes(**kwargs))
if not predicate or predicate(node):
nodes.append(node)
if add_self:
if not predicate or predicate(self):
nodes.append(self)
return nodes
def get_child_nodes(self, **kwargs):
kwargs['depth'] = 1
return self.get_nodes(**kwargs)
@classmethod
def NP_IS_TERMINAL(cls, node):
return isinstance(node, AST_T_Node)
@classmethod
def NP_IS_NONTERMINAL(cls, node):
return isinstance(node, AST_N_Node)
@classmethod
def NP_GEN_DATA(cls, data_predicate):
def wrapper(node):
return data_predicate(node.data)
def expand_node(self, products, is_terminal=None):
self.data = self.from_products(products, parent=self, is_terminal=is_terminal)
def __str__(self):
if not self.data:
return 'some N-T Node({})'.format(self.n)
return 'Node N: N: {0}, {1} nodes: [ {2} ]'.format(
self.n,
len(self.data),
' '.join(map(str, self.data)))
class AST_Node_Iterator(object):
"""
"""
def __init__(self, ast_node):
super(AST_Node_Iterator, self).__init__()
self.root_node = ast_node
self.it = ast_node
self.begin = True
def __next__(self):
if self.begin:
self.begin = False
return self.it
try:
while True:
try:
if self.it is self.root_node.parent:
raise StopIteration
if isinstance(self.it, AST_N_Node) and self.it.data:
self.it = self.it.data[0]
break
_i = self.it.parent.data.index(self.it)
self.it = self.it.parent.data[_i + 1] # advance sibling via parent
break
except IndexError:
while True:
if self.it is self.root_node:
raise StopIteration
self.it = self.it.parent
try:
_i = self.it.parent.data.index(self.it)
self.it = self.it.parent.data[_i + 1]
break
except IndexError:
continue
break
except AttributeError:
raise StopIteration
return self.it
AST_Node_Iterator.next = AST_Node_Iterator.__next__
class Tree(object):
"""
"""
def __init__(self, root):
super(Tree, self).__init__()
self.root = root
class LL_Parser(object):
"""
"""
__EPSILON = object()
__DOLLAR = object()
@classmethod
def _format_product(cls, product):
if cls.__EPSILON == product:
return '"__EPSILON__"'
if cls.__DOLLAR == product:
return '"__DOLLAR__"'
return '"{0}"'.format(str(product))
def __init__(self, grammar):
super(LL_Parser, self).__init__()
self.N, self.T, self.R, self.S = [grammar.get(item) for item in ['N', 'T', 'R', 'S']]
assert self._build_LL_table(grammar)
@classmethod
def SyntaxError(cls, node_root=None, node_ptr=None, tokens=None, got=None):
if isinstance(node_ptr, AST_T_Node):
node_ptr = next(iter(node_ptr.parent))
_expected_lst = [_n for _n in node_ptr]
_expected_str = ', '.join(map(str, [_n.data if isinstance(_n, AST_T_Node)
else _n.n if isinstance(_n, AST_N_Node)
else str(_n)
for _n in _expected_lst]))
raise Exception('Syntax Error: Unexpected token: {}. Expected: [{}]'
.format(got, _expected_str))
def parse(self, tokens, ignore_ws=False):
"""
"""
if ignore_ws:
tokens = [_t for _t in tokens
if not token.is_token_subtype(token.Token.Text.Whitespace, _t)]
tokens.extend([self.__DOLLAR])
t_it = iter(tokens)
t = next(t_it)
_root_parent = AST_N_Node('<$>', parent=AST_Node.TN_NO_PARENT)
ast_root = AST_N_Node(self.S, parent=_root_parent)
_root_parent.data = [ast_root, self.__DOLLAR]
top_node = None
for top_node in ast_root:
try:
if top_node is self.__DOLLAR or t is self.__DOLLAR:
break
elif isinstance(top_node, AST_T_Node): # Terminal -> Match
term = top_node.data
if self._match_T(term, t):
top_node.data = t
else:
self.SyntaxError(node_root=ast_root, node_ptr=top_node,
tokens=tokens, got=t)
t = next(t_it)
elif isinstance(top_node, AST_N_Node): # Nonterminal -> Predict/Expand
nonterm = top_node.n
rule = self._match_N(nonterm, t)
if not rule:
self.SyntaxError(node_root=ast_root, node_ptr=top_node,
tokens=tokens, got=t)
production = rule[1][:]
if production:
top_node.expand_node(production, lambda p: p in self.T)
else:
top_node.expand_node([])
else:
raise Exception('Lexical Error: Unknown Lexem: {0}'.format(type(top_node)))
except StopIteration:
break
if top_node is self.__DOLLAR and t is self.__DOLLAR: # ok
pass
elif top_node is self.__DOLLAR: # Overflow
self.SyntaxError(node_root=ast_root, node_ptr=top_node, tokens=tokens, got=t)
elif t is self.__DOLLAR: # Underflow
self.SyntaxError(node_root=ast_root, node_ptr=top_node, tokens=tokens, got=t)
else:
raise Exception('Unknown Error: It: {}, Node: {}'.format(t, str(top_node)))
_nodes = ast_root.get_nodes(depth=None)
for _n in _nodes:
print(str(_n.n if isinstance(_n, AST_N_Node) else _n.data))
return ast_root
def _match_N(self, s_top_N, _token):
_rules_LL = self.predict[s_top_N]
_rule = None
for term, rule in _rules_LL.items():
if token.is_token_subtype(_token[1], term):
_rule = rule
break
return _rule
def _match_T(self, s_top_T, _token):
return token.is_token_subtype(_token[1], s_top_T)
def _build_LL_table(self, grammar):
self.first = {}
_new_t = set()
for t_key in self.T:
self.first[t_key] = {t_key}
while t_key.parent:
if t_key.parent not in self.first:
self.first[t_key.parent] = set()
self.first[t_key.parent] |= self.first[t_key]
_new_t |= {t_key.parent}
t_key = t_key.parent
self.T |= _new_t
for n in self.N:
self.first[n] = set()
# the First sets
while True:
first_changed = False
for rule in self.R:
left, right = rule
assert 1 == len(left)
n = left[0]
assert n in self.N
_first_products = self._first_of_products(right, self.first)
if _first_products - self.first[n]:
first_changed = True
self.first[n] |= _first_products
if not first_changed:
break
# the Follow sets
self.follow = {}
for n in self.N:
self.follow[n] = set()
while True:
follow_changed = False
for rule in self.R:
left, right = rule
n = left[0]
_r_firsts = []
for p in reversed(right):
if p in self.N:
_first_products = self._first_of_products(_r_firsts, self.first)
_difference = _first_products - self.follow[p]
if _difference and _difference != {self.__EPSILON}:
follow_changed = True
self.follow[p] |= _difference - {self.__EPSILON}
if self.__EPSILON in _first_products:
if self.follow[n] - self.follow[p]:
follow_changed = True
self.follow[p] |= self.follow[n]
_r_firsts = [p] + _r_firsts
if not follow_changed:
break
# the Predict sets
self.predict = {}
for rule in self.R:
left, right = rule
n = left[0]
if not self.predict.get(n):
self.predict[n] = {}
_first_products = self._first_of_products(right, self.first)
if self.__EPSILON in _first_products:
for t in (_first_products - {self.__EPSILON}) | self.follow[n]:
self.predict[n][t] = rule
else:
for t in _first_products:
self.predict[n][t] = rule
return True
@classmethod
def _first_of_products(cls, products, first):
if not products:
return {cls.__EPSILON}
first_set = set()
it = iter(products)
while it:
try:
p = next(it)
# assert p in first
first_set |= first[p] - {cls.__EPSILON}
if cls.__EPSILON not in first[p]:
return first_set
except StopIteration:
first_set |= {cls.__EPSILON}
return first_set
return first_set
class Semantic(object):
pass
SuricataRule = namedtuple('SuricataRule', ['action', 'header', 'options'])
SR_Header = namedtuple(
'SR_Header',
['proto',
'src_host', 'src_port',
'direction',
'dst_host', 'dst_port'])
TT_ANY = token.Keyword.Any
TT_PASS = token.Keyword.Action.Pass
TT_REJECT = token.Keyword.Action.Reject
TT_DROP = token.Keyword.Action.Drop
TT_ALERT = token.Keyword.Action.Alert
TT_IP = token.Keyword.Proto.Ip
TT_TCP = token.Keyword.Proto.Tcp
TT_UDP = token.Keyword.Proto.Udp
TT_ICMP = token.Keyword.Proto.Icmp
TT_BR_LEFT = token.Operator.LeftBracket
TT_BR_RIGHT = token.Operator.RightBracket
TT_EXCLMARK = token.Operator.ExclMark
TT_COMMA = token.Operator.Comma
TT_IPv4 = token.Token.IPv4
TT_IPv4_ADDR = token.Token.IPv4.Addr
TT_IPv4_MASK = token.Token.IPv4.Addr_w_Mask
TT_VARIABLE = token.Token.Variable
class HostLexer(lexer.RegexLexer):
"""
"""
tokens = {
'root': [
(r'\s+', token.Token.Text.Whitespace),
(r'any', TT_ANY),
(r'pass', TT_PASS),
(r'reject', TT_REJECT),
(r'drop', TT_DROP),
(r'alert', TT_ALERT),
(r'ip', TT_IP),
(r'tcp', TT_TCP),
(r'udp', TT_UDP),
(r'icmp', TT_ICMP),
(r'\[', TT_BR_LEFT),
(r']', TT_BR_RIGHT),
(r'!', TT_EXCLMARK),
(r',', TT_COMMA),
(r'\d+\.\d+\.\d+\.\d+/\d+', TT_IPv4_MASK),
(r'\d+\.\d+\.\d+\.\d+', TT_IPv4_ADDR),
(r'\$[\w_][\w_\d]*', TT_VARIABLE),
],
}
class HostParser(LL_Parser):
"""
"""
HOST_GRAMMAR = {
'T': {
TT_IPv4_ADDR,
TT_IPv4_MASK,
TT_VARIABLE,
TT_BR_LEFT,
TT_BR_RIGHT,
TT_EXCLMARK,
TT_COMMA,
},
'N': {
'<HOST_GRP>',
'<HOST_EXPR>',
'<HOST_PARENS_CONTD>',
},
'R': {
(('<HOST_GRP>', ), ('<HOST_EXPR>', )),
(('<HOST_GRP>', ), (TT_EXCLMARK, '<HOST_GRP>')),
(('<HOST_GRP>', ), (TT_BR_LEFT, '<HOST_GRP>', '<HOST_PARENS_CONTD>', TT_BR_RIGHT)),
(('<HOST_EXPR>', ), (token.Token.IPv4, )),
(('<HOST_EXPR>', ), (TT_VARIABLE, )),
(('<HOST_PARENS_CONTD>', ), ()),
(('<HOST_PARENS_CONTD>', ), (TT_COMMA, '<HOST_GRP>', '<HOST_PARENS_CONTD>')),
},
'S': '<HOST_GRP>',
}
def __init__(self):
super(HostParser, self).__init__(self.HOST_GRAMMAR)
@classmethod
def disjoint_sets(cls, *args):
if len(args) < 1:
return
if len(args) < 2:
return True
_total_cnt = sum(map(len, args))
merged = set(itertools.chain(*args))
return _total_cnt == len(merged)
@classmethod
def interpret_host(cls, host_node, symbol_table):
assert host_node
assert isinstance(host_node, AST_T_Node)
_pos, _type, _value = host_node.data
if token.is_token_subtype(_type, TT_VARIABLE):
return symbol_table[_value]
if token.is_token_subtype(_type, TT_IPv4):
return _value
@classmethod
def semantics(cls, tree_node, symbol_table, check=False):
if isinstance(tree_node, AST_N_Node):
if tree_node.n == '<HOST_GRP>':
assert tree_node.data
# if TT_EXCLMARK == tree_node.data[0][1]: # invert
if len(tree_node.data) == 2\
and isinstance(tree_node.data[0], AST_T_Node)\
and token.is_token_subtype(tree_node.data[0].data[1], TT_EXCLMARK):
_yes, _no = cls.semantics(tree_node.data[1], symbol_table, check=check)
return _no, _yes
elif len(tree_node.data) >= 3\
and tree_node.data[1].n == '<HOST_GRP>'\
and tree_node.data[2].n == '<HOST_PARENS_CONTD>':
_gyes, _gno = cls.semantics(tree_node.data[1], symbol_table, check=check)
_pyes, _pno = cls.semantics(tree_node.data[2], symbol_table, check=check)
if check:
assert cls.disjoint_sets(_gyes, _gno, _pyes, _pno)
return _gyes | _pyes, _gno | _pno
elif len(tree_node.data) == 1 and tree_node.data[0].n == '<HOST_EXPR>':
return cls.semantics(tree_node.data[0], symbol_table, check=check)
else:
assert False
elif tree_node.n == '<HOST_EXPR>':
assert tree_node.data
assert len(tree_node.data) == 1
_host = cls.interpret_host(tree_node.data[0], symbol_table)
return {_host}, set()
elif tree_node.n == '<HOST_PARENS_CONTD>':
if tree_node.data:
assert tree_node.data[1].n == '<HOST_GRP>'
_gyes, _gno = cls.semantics(tree_node.data[1], symbol_table, check=check)
if len(tree_node.data) == 3:
_pyes, _pno = cls.semantics(tree_node.data[2], symbol_table, check=check)
if check:
assert cls.disjoint_sets(_gyes, _gno, _pyes, _pno)
_gyes |= _pyes
_gno |= _pno
return _gyes, _gno
else:
assert False
return set(), set()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011,2012 Nicira, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import session as sql_session
from nova import exception
from nova import flags
from nova import log as logging
from nova.network.quantum import client as quantum_client
from nova.network.quantum import fake_client
from nova.network.quantum import manager as quantum_manager
from nova.network.quantum import melange_connection
from nova.network.quantum import melange_ipam_lib
from nova.network.quantum import quantum_connection
from nova import test
from nova import utils
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
networks = [{'label': 'project1-net1',
'injected': False,
'multi_host': False,
'cidr': '100.168.0.0/24',
'cidr_v6': '100:1db8::/64',
'gateway_v6': '100:1db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '100.168.0.1',
'broadcast': '100.168.0.255',
'dns1': '8.8.8.8',
'vlan': None,
'host': None,
'vpn_public_address': None,
'project_id': 'fake_project1',
'priority': 1},
{'label': 'project2-net1',
'injected': False,
'multi_host': False,
'cidr': '101.168.1.0/24',
'cidr_v6': '101:1db9::/64',
'gateway_v6': '101:1db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '101.168.1.1',
'broadcast': '101.168.1.255',
'dns1': '8.8.8.8',
'vlan': None,
'host': None,
'project_id': 'fake_project2',
'priority': 1},
{'label': "public",
'injected': False,
'multi_host': False,
'cidr': '102.0.0.0/24',
'cidr_v6': '102:1dba::/64',
'gateway_v6': '102:1dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '102.0.0.1',
'broadcast': '102.0.0.255',
'dns1': '8.8.8.8',
'vlan': None,
'host': None,
'project_id': None,
'priority': 0},
{'label': "project2-net2",
'injected': False,
'multi_host': False,
'cidr': '103.0.0.0/24',
'cidr_v6': '103:1dbb::/64',
'gateway_v6': '103:1dbb::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': None,
'bridge_interface': None,
'gateway': '103.0.0.1',
'broadcast': '103.0.0.255',
'dns1': '8.8.8.8',
'vlan': None,
'host': None,
'project_id': "fake_project2",
'priority': 2}]
class QuantumConnectionTestCase(test.TestCase):
def test_connection(self):
fc = fake_client.FakeClient(LOG)
qc = quantum_connection.QuantumClientConnection(client=fc)
t = "tenant1"
net1_name = "net1"
net1_uuid = qc.create_network(t, net1_name)
self.assertEquals(net1_name, qc.get_network_name(t, net1_uuid))
self.assertTrue(qc.network_exists(t, net1_uuid))
self.assertFalse(qc.network_exists(t, "fake-uuid"))
self.assertFalse(qc.network_exists("fake-tenant", net1_uuid))
nets = qc.get_networks(t)['networks']
self.assertEquals(len(nets), 1)
self.assertEquals(nets[0]['id'], net1_uuid)
num_ports = 10
for i in range(0, num_ports):
qc.create_and_attach_port(t, net1_uuid,
'iface' + str(i), state='ACTIVE')
self.assertEquals(len(qc.get_attached_ports(t, net1_uuid)), num_ports)
for i in range(0, num_ports):
port_uuid = qc.get_port_by_attachment(t, net1_uuid,
'iface' + str(i))
self.assertTrue(port_uuid)
qc.detach_and_delete_port(t, net1_uuid, port_uuid)
self.assertEquals(len(qc.get_attached_ports(t, net1_uuid)), 0)
# test port not found
qc.create_and_attach_port(t, net1_uuid, 'foo', state='ACTIVE')
port_uuid = qc.get_port_by_attachment(t, net1_uuid, 'foo')
qc.detach_and_delete_port(t, net1_uuid, port_uuid)
self.assertRaises(quantum_client.QuantumNotFoundException,
qc.detach_and_delete_port, t,
net1_uuid, port_uuid)
qc.delete_network(t, net1_uuid)
self.assertFalse(qc.network_exists(t, net1_uuid))
self.assertEquals(len(qc.get_networks(t)['networks']), 0)
self.assertRaises(quantum_client.QuantumNotFoundException,
qc.get_network_name, t, net1_uuid)
# this is a base class to be used by other QuantumManager Test classes
class QuantumNovaTestCase(test.TestCase):
def setUp(self):
super(QuantumNovaTestCase, self).setUp()
self.flags(quantum_use_dhcp=True)
self.flags(l3_lib="nova.network.l3.LinuxNetL3")
linuxdrv = "nova.network.linux_net.LinuxOVSInterfaceDriver"
self.flags(linuxnet_interface_driver=linuxdrv)
fc = fake_client.FakeClient(LOG)
qc = quantum_connection.QuantumClientConnection(client=fc)
self.net_man = quantum_manager.QuantumManager(
ipam_lib="nova.network.quantum.nova_ipam_lib",
q_conn=qc)
def func(arg1, arg2):
pass
def func2(arg1, arg2, arg3):
pass
def func1(arg1):
pass
self.net_man.driver.update_dhcp_hostfile_with_text = func
self.net_man.driver.restart_dhcp = func2
self.net_man.driver.kill_dhcp = func1
# Tests seem to create some networks by default, which
# we don't want. So we delete them.
ctx = context.RequestContext('user1', 'fake_project1').elevated()
for n in db.network_get_all(ctx):
db.network_delete_safe(ctx, n['id'])
# Other unit tests (e.g., test_compute.py) have a nasty
# habit of of creating fixed IPs and not cleaning up, which
# can confuse these tests, so we remove all existing fixed
# ips before starting.
session = sql_session.get_session()
result = session.query(models.FixedIp).all()
with session.begin():
for fip_ref in result:
session.delete(fip_ref)
self.net_man.init_host()
def _create_network(self, n):
ctx = context.RequestContext('user1', n['project_id'])
nwks = self.net_man.create_networks(
ctx,
label=n['label'], cidr=n['cidr'],
multi_host=n['multi_host'],
num_networks=1, network_size=256,
cidr_v6=n['cidr_v6'],
gateway=n['gateway'],
gateway_v6=n['gateway_v6'], bridge=None,
bridge_interface=None, dns1=n['dns1'],
project_id=n['project_id'],
priority=n['priority'])
n['uuid'] = nwks[0]['uuid']
class QuantumAllocationTestCase(QuantumNovaTestCase):
def test_get_network_in_db(self):
context = self.mox.CreateMockAnything()
context.elevated().AndReturn('elevated')
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.net_man.context = context
db.network_get_by_uuid('elevated', 'quantum_net_id').AndReturn(
{'uuid': 1})
self.mox.ReplayAll()
network = self.net_man.get_network(context, ('quantum_net_id',
'net_tenant_id'))
self.assertEquals(network['quantum_net_id'], 'quantum_net_id')
self.assertEquals(network['uuid'], 1)
def test_get_network_not_in_db(self):
context = self.mox.CreateMockAnything()
context.elevated().AndReturn('elevated')
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.net_man.context = context
db.network_get_by_uuid('elevated', 'quantum_net_id').AndReturn(None)
self.mox.ReplayAll()
network = self.net_man.get_network(context, ('quantum_net_id',
'net_tenant_id'))
self.assertEquals(network['quantum_net_id'], 'quantum_net_id')
self.assertEquals(network['uuid'], 'quantum_net_id')
class QuantumDeallocationTestCase(QuantumNovaTestCase):
def test_deallocate_port(self):
quantum = self.mox.CreateMock(
quantum_connection.QuantumClientConnection)
quantum.get_port_by_attachment('q_tenant_id', 'net_id',
'interface_id').AndReturn('port_id')
quantum.detach_and_delete_port('q_tenant_id', 'net_id', 'port_id')
self.net_man.q_conn = quantum
self.mox.ReplayAll()
self.net_man.deallocate_port('interface_id', 'net_id', 'q_tenant_id',
'instance_id')
def test_deallocate_port_logs_error(self):
quantum = self.mox.CreateMock(
quantum_connection.QuantumClientConnection)
quantum.get_port_by_attachment('q_tenant_id', 'net_id',
'interface_id').AndRaise(Exception)
self.net_man.q_conn = quantum
self.mox.StubOutWithMock(quantum_manager.LOG, 'exception')
quantum_manager.LOG.exception(mox.Regex(r'port deallocation failed'))
self.mox.ReplayAll()
self.net_man.deallocate_port('interface_id', 'net_id', 'q_tenant_id',
'instance_id')
def test_deallocate_ip_address(self):
ipam = self.mox.CreateMock(melange_ipam_lib.QuantumMelangeIPAMLib)
ipam.get_tenant_id_by_net_id('context', 'net_id', {'uuid': 1},
'project_id').AndReturn('ipam_tenant_id')
self.net_man.ipam = ipam
self.mox.ReplayAll()
self.net_man.deallocate_ip_address('context', 'net_id', 'project_id',
{'uuid': 1}, 'instance_id')
def test_deallocate_ip_address(self):
ipam = self.mox.CreateMock(melange_ipam_lib.QuantumMelangeIPAMLib)
ipam.get_tenant_id_by_net_id('context', 'net_id', {'uuid': 1},
'project_id').AndRaise(Exception())
self.net_man.ipam = ipam
self.mox.StubOutWithMock(quantum_manager.LOG, 'exception')
quantum_manager.LOG.exception(mox.Regex(r'ipam deallocation failed'))
self.mox.ReplayAll()
self.net_man.deallocate_ip_address('context', 'net_id', 'project_id',
{'uuid': 1}, 'instance_id')
class QuantumManagerTestCase(QuantumNovaTestCase):
def test_create_and_delete_nets(self):
self._create_nets()
self._delete_nets()
def _create_nets(self):
for n in networks:
self._create_network(n)
def _delete_nets(self):
for n in networks:
ctx = context.RequestContext('user1', n['project_id'])
self.net_man.delete_network(ctx, None, n['uuid'])
self.assertRaises(exception.NoNetworksFound,
db.network_get_all, ctx.elevated())
def _validate_nw_info(self, nw_info, expected_net_labels):
self.assertEquals(len(nw_info), len(expected_net_labels))
ctx = context.RequestContext('user1', 'foo').elevated()
all_net_map = {}
for n in db.network_get_all(ctx):
all_net_map[n['label']] = n
for i in range(0, len(nw_info)):
vif = nw_info[i]
net = all_net_map[expected_net_labels[i]]
# simple test assumes that each starting prefix is unique
expected_v4_cidr_start = net['cidr'].split(".")[0].lower()
expected_v6_cidr_start = net['cidr_v6'].split(":")[0].lower()
for subnet in vif['network']['subnets']:
addr = subnet['ips'][0]['address']
if subnet['version'] == 4:
address_start = addr.split(".")[0].lower()
self.assertTrue(expected_v4_cidr_start, address_start)
else:
address_start = addr.split(":")[0].lower()
self.assertTrue(expected_v6_cidr_start, address_start)
# confirm that there is a DHCP device on corresponding net
for l in expected_net_labels:
n = all_net_map[l]
tenant_id = (n['project_id'] or
FLAGS.quantum_default_tenant_id)
ports = self.net_man.q_conn.get_attached_ports(
tenant_id, n['uuid'])
self.assertEquals(len(ports), 2) # gw + instance VIF
# make sure we aren't allowed to delete network with
# active port
self.assertRaises(exception.NetworkBusy,
self.net_man.delete_network,
ctx, None, n['uuid'])
def _check_vifs(self, expect_num_vifs):
ctx = context.RequestContext('user1', "").elevated()
self.assertEqual(len(db.virtual_interface_get_all(ctx)),
expect_num_vifs)
def _allocate_and_deallocate_instance(self, project_id, requested_networks,
expected_labels):
ctx = context.RequestContext('user1', project_id)
self._check_vifs(0)
instance_ref = db.instance_create(ctx,
{"project_id": project_id})
nw_info = self.net_man.allocate_for_instance(ctx.elevated(),
instance_id=instance_ref['id'], host="",
rxtx_factor=3,
project_id=project_id,
requested_networks=requested_networks)
self._check_vifs(len(nw_info))
self._validate_nw_info(nw_info, expected_labels)
nw_info = self.net_man.get_instance_nw_info(ctx, instance_ref['id'],
instance_ref['uuid'],
instance_ref['instance_type_id'], "",
project_id=project_id)
self._check_vifs(len(nw_info))
self._validate_nw_info(nw_info, expected_labels)
port_net_pairs = []
for vif in nw_info:
nid = vif['network']['id']
pid = self.net_man.q_conn.get_port_by_attachment(
project_id, nid, vif['id'])
if pid is None:
pid = self.net_man.q_conn.get_port_by_attachment(
FLAGS.quantum_default_tenant_id,
nid, vif['id'])
self.assertTrue(pid is not None)
port_net_pairs.append((pid, nid))
self.net_man.deallocate_for_instance(ctx,
instance_id=instance_ref['id'],
project_id=project_id)
for pid, nid in port_net_pairs:
self.assertRaises(quantum_client.QuantumNotFoundException,
self.net_man.q_conn.detach_and_delete_port,
project_id, nid, pid)
self.assertRaises(quantum_client.QuantumNotFoundException,
self.net_man.q_conn.detach_and_delete_port,
FLAGS.quantum_default_tenant_id, nid, pid)
self._check_vifs(0)
def test_allocate_and_deallocate_instance_static(self):
self._create_nets()
self._allocate_and_deallocate_instance("fake_project1", None,
['public', 'project1-net1'])
self._delete_nets()
def test_allocate_and_deallocate_instance_dynamic(self):
self._create_nets()
project_id = "fake_project2"
ctx = context.RequestContext('user1', project_id)
all_valid_networks = self.net_man.ipam.get_project_and_global_net_ids(
ctx, project_id)
requested_networks = [(n[0], None) for n in all_valid_networks]
self.net_man.validate_networks(ctx, requested_networks)
label_map = {}
for n in db.network_get_all(ctx.elevated()):
label_map[n['uuid']] = n['label']
expected_labels = [label_map[uid] for uid, _i in requested_networks]
self._allocate_and_deallocate_instance(project_id, requested_networks,
expected_labels)
self._delete_nets()
def test_validate_bad_network(self):
ctx = context.RequestContext('user1', 'fake_project1')
self.assertRaises(exception.NetworkNotFound,
self.net_man.validate_networks, ctx, [("", None)])
def test_create_net_external_uuid(self):
"""Tests use case where network can be created directly via
Quantum API, then the UUID is passed in via nova-manage"""
project_id = "foo_project"
ctx = context.RequestContext('user1', project_id)
net_id = self.net_man.q_conn.create_network(project_id, 'net1')
self.net_man.create_networks(
ctx,
label='achtungbaby',
cidr="9.9.9.0/24",
multi_host=False,
num_networks=1,
network_size=256,
cidr_v6=None,
gateway="9.9.9.1",
gateway_v6=None,
bridge=None,
bridge_interface=None,
dns1="8.8.8.8",
project_id=project_id,
priority=9,
uuid=net_id)
net = db.network_get_by_uuid(ctx.elevated(), net_id)
self.assertTrue(net is not None)
self.assertEquals(net['uuid'], net_id)
def test_create_net_external_uuid_and_host_is_set(self):
"""Make sure network['host'] is set when creating a network via the
network manager"""
project_id = "foo_project"
ctx = context.RequestContext('user1', project_id)
net_id = self.net_man.q_conn.create_network(project_id, 'net2')
self.net_man.create_networks(
ctx, label='achtungbaby2', cidr="9.9.8.0/24", multi_host=False,
num_networks=1, network_size=256, cidr_v6=None,
gateway="9.9.8.1", gateway_v6=None, bridge=None,
bridge_interface=None, dns1="8.8.8.8", project_id=project_id,
priority=8, uuid=net_id)
net = db.network_get_by_uuid(ctx.elevated(), net_id)
self.assertTrue(net is not None)
self.assertEquals(net['uuid'], net_id)
self.assertTrue(net['host'] != None)
class QuantumNovaMACGenerationTestCase(QuantumNovaTestCase):
def test_local_mac_address_creation(self):
self.flags(use_melange_mac_generation=False)
fake_mac = "ab:cd:ef:ab:cd:ef"
self.stubs.Set(utils, "generate_mac_address",
lambda: fake_mac)
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
self._create_network(networks[0])
all_valid_networks = self.net_man.ipam.get_project_and_global_net_ids(
ctx, project_id)
requested_networks = [(n[0], None) for n in all_valid_networks]
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
rxtx_factor=3,
project_id=project_id,
requested_networks=requested_networks)
self.assertEqual(nw_info[0]['address'], fake_mac)
def test_melange_mac_address_creation(self):
self.flags(use_melange_mac_generation=True)
fake_mac = "ab:cd:ef:ab:cd:ef"
self.stubs.Set(melange_connection.MelangeConnection, "create_vif",
lambda w, x, y, z: fake_mac)
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
self._create_network(networks[0])
all_valid_networks = self.net_man.ipam.get_project_and_global_net_ids(
ctx, project_id)
requested_networks = [(n[0], None) for n in all_valid_networks]
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
rxtx_factor=3,
project_id=project_id,
requested_networks=requested_networks)
self.assertEqual(nw_info[0]['address'], fake_mac)
class QuantumNovaPortSecurityTestCase(QuantumNovaTestCase):
def test_port_securty(self):
self.flags(use_melange_mac_generation=True)
self.flags(quantum_use_port_security=True)
fake_mac = "ab:cd:ef:ab:cd:ef"
self.stubs.Set(melange_connection.MelangeConnection, "create_vif",
lambda w, x, y, z: fake_mac)
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
self._create_network(networks[0])
all_valid_networks = self.net_man.ipam.get_project_and_global_net_ids(
ctx, project_id)
requested_networks = [(n[0], None) for n in all_valid_networks]
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
oldfunc = self.net_man.q_conn.create_and_attach_port
# Make sure we get the appropriate mac set in allowed_address_pairs
# if port security is enabled.
def _instrumented_create_and_attach_port(tenant_id, net_id,
interface_id, **kwargs):
self.assertTrue('allowed_address_pairs' in kwargs.keys())
pairs = kwargs['allowed_address_pairs']
self.assertTrue(pairs[0]['mac_address'] == fake_mac)
self.net_man.q_conn.create_and_attach_port = oldfunc
return oldfunc(tenant_id, net_id, interface_id, **kwargs)
_port_attach = _instrumented_create_and_attach_port
self.net_man.q_conn.create_and_attach_port = _port_attach
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
rxtx_factor=3,
project_id=project_id,
requested_networks=requested_networks)
self.assertEqual(nw_info[0]['address'], fake_mac)
def test_port_securty_negative(self):
self.flags(use_melange_mac_generation=True)
self.flags(quantum_use_port_security=False)
fake_mac = "ab:cd:ef:ab:cd:ef"
self.stubs.Set(melange_connection.MelangeConnection, "create_vif",
lambda w, x, y, z: fake_mac)
project_id = "fake_project1"
ctx = context.RequestContext('user1', project_id)
self._create_network(networks[0])
all_valid_networks = self.net_man.ipam.get_project_and_global_net_ids(
ctx, project_id)
requested_networks = [(n[0], None) for n in all_valid_networks]
instance_ref = db.api.instance_create(ctx,
{"project_id": project_id})
oldfunc = self.net_man.q_conn.create_and_attach_port
# Make sure no pairs are passed in if port security is turned off
def _instrumented_create_and_attach_port(tenant_id, net_id,
interface_id, **kwargs):
self.assertTrue('allowed_address_pairs' in kwargs.keys())
pairs = kwargs['allowed_address_pairs']
self.assertTrue(len(pairs) == 0)
self.net_man.q_conn.create_and_attach_port = oldfunc
return oldfunc(tenant_id, net_id, interface_id, **kwargs)
_port_attach = _instrumented_create_and_attach_port
self.net_man.q_conn.create_and_attach_port = _port_attach
nw_info = self.net_man.allocate_for_instance(ctx,
instance_id=instance_ref['id'], host="",
rxtx_factor=3,
project_id=project_id,
requested_networks=requested_networks)
self.assertEqual(nw_info[0]['address'], fake_mac)
class QuantumMelangeTestCase(test.TestCase):
def setUp(self):
super(QuantumMelangeTestCase, self).setUp()
fc = fake_client.FakeClient(LOG)
qc = quantum_connection.QuantumClientConnection(client=fc)
self.net_man = quantum_manager.QuantumManager(
ipam_lib="nova.network.quantum.nova_ipam_lib",
q_conn=qc)
def test_get_instance_uuids_by_ip_filter(self):
fake_context = context.RequestContext('user', 'project')
address = '1.2.3.4'
filters = {'ip': address}
self.net_man.ipam = self.mox.CreateMockAnything()
self.net_man.ipam.get_instance_ids_by_ip_address(fake_context,
address).AndReturn(['instance_id'])
instance = self.mox.CreateMockAnything()
instance.uuid = 'instance_uuid'
self.mox.StubOutWithMock(db, 'instance_get')
db.instance_get(fake_context, 'instance_id').AndReturn(instance)
self.mox.ReplayAll()
uuids = self.net_man.get_instance_uuids_by_ip_filter(fake_context,
filters)
self.assertEquals(uuids, [{'instance_uuid':'instance_uuid'}])
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for barrier ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
class BarrierTest(test.TestCase):
def testConstructorWithShapes(self):
with ops.Graph().as_default():
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((1, 2, 3), (8,)),
shared_name="B",
name="B")
self.assertTrue(isinstance(b.barrier_ref, ops.Tensor))
self.assertProtoEquals("""
name:'B' op:'Barrier'
attr {
key: "capacity"
value {
i: -1
}
}
attr { key: 'component_types'
value { list { type: DT_FLOAT type: DT_FLOAT } } }
attr {
key: 'shapes'
value {
list {
shape {
dim { size: 1 } dim { size: 2 } dim { size: 3 }
}
shape {
dim { size: 8 }
}
}
}
}
attr { key: 'container' value { s: "" } }
attr { key: 'shared_name' value: { s: 'B' } }
""", b.barrier_ref.op.node_def)
@test_util.run_deprecated_v1
def testInsertMany(self):
with self.cached_session():
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
self.assertEqual([], size_t.get_shape())
keys = [b"a", b"b", b"c"]
insert_0_op = b.insert_many(0, keys, [10.0, 20.0, 30.0])
insert_1_op = b.insert_many(1, keys, [100.0, 200.0, 300.0])
self.assertEquals(size_t.eval(), [0])
insert_0_op.run()
self.assertEquals(size_t.eval(), [0])
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
def testInsertManyEmptyTensor(self):
with self.cached_session():
error_message = ("Empty tensors are not supported, but received shape "
r"\'\(0,\)\' at index 1")
with self.assertRaisesRegexp(ValueError, error_message):
data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((1,), (0,)), name="B")
@test_util.run_deprecated_v1
def testInsertManyEmptyTensorUnknown(self):
with self.cached_session():
b = data_flow_ops.Barrier((dtypes.float32, dtypes.float32), name="B")
size_t = b.ready_size()
self.assertEqual([], size_t.get_shape())
keys = [b"a", b"b", b"c"]
insert_0_op = b.insert_many(0, keys, np.array([[], [], []], np.float32))
self.assertEquals(size_t.eval(), [0])
with self.assertRaisesOpError(
".*Tensors with no elements are not supported.*"):
insert_0_op.run()
@test_util.run_deprecated_v1
def testTakeMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
take_t = b.take_many(3)
insert_0_op.run()
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
@test_util.run_deprecated_v1
def testTakeManySmallBatch(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
size_i = b.incomplete_size()
keys = [b"a", b"b", b"c", b"d"]
values_0 = [10.0, 20.0, 30.0, 40.0]
values_1 = [100.0, 200.0, 300.0, 400.0]
insert_0_op = b.insert_many(0, keys, values_0)
# Split adding of the second component into two independent operations.
# After insert_1_1_op, we'll have two ready elements in the barrier,
# 2 will still be incomplete.
insert_1_1_op = b.insert_many(1, keys[0:2], values_1[0:2]) # add "a", "b"
insert_1_2_op = b.insert_many(1, keys[2:3], values_1[2:3]) # add "c"
insert_1_3_op = b.insert_many(1, keys[3:], values_1[3:]) # add "d"
insert_empty_op = b.insert_many(0, [], [])
close_op = b.close()
close_op_final = b.close(cancel_pending_enqueues=True)
index_t, key_t, value_list_t = b.take_many(3, allow_small_batch=True)
insert_0_op.run()
insert_1_1_op.run()
close_op.run()
# Now we have a closed barrier with 2 ready elements. Running take_t
# should return a reduced batch with 2 elements only.
self.assertEquals(size_i.eval(), [2]) # assert that incomplete size = 2
self.assertEquals(size_t.eval(), [2]) # assert that ready size = 2
_, keys_val, values_0_val, values_1_val = sess.run(
[index_t, key_t, value_list_t[0], value_list_t[1]])
# Check that correct values have been returned.
for k, v0, v1 in zip(keys[0:2], values_0[0:2], values_1[0:2]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# The next insert completes the element with key "c". The next take_t
# should return a batch with just 1 element.
insert_1_2_op.run()
self.assertEquals(size_i.eval(), [1]) # assert that incomplete size = 1
self.assertEquals(size_t.eval(), [1]) # assert that ready size = 1
_, keys_val, values_0_val, values_1_val = sess.run(
[index_t, key_t, value_list_t[0], value_list_t[1]])
# Check that correct values have been returned.
for k, v0, v1 in zip(keys[2:3], values_0[2:3], values_1[2:3]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# Adding nothing ought to work, even if the barrier is closed.
insert_empty_op.run()
# currently keys "a" and "b" are not in the barrier, adding them
# again after it has been closed, ought to cause failure.
with self.assertRaisesOpError("is closed"):
insert_1_1_op.run()
close_op_final.run()
# These ops should fail because the barrier has now been closed with
# cancel_pending_enqueues = True.
with self.assertRaisesOpError("is closed"):
insert_empty_op.run()
with self.assertRaisesOpError("is closed"):
insert_1_3_op.run()
@test_util.run_deprecated_v1
def testUseBarrierWithShape(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((2, 2), (8,)), name="B")
size_t = b.ready_size()
keys = [b"a", b"b", b"c"]
values_0 = np.array(
[[[10.0] * 2] * 2, [[20.0] * 2] * 2, [[30.0] * 2] * 2], np.float32)
values_1 = np.array([[100.0] * 8, [200.0] * 8, [300.0] * 8], np.float32)
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
take_t = b.take_many(3)
insert_0_op.run()
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
self.assertShapeEqual(keys_val, take_t[1])
self.assertShapeEqual(values_0_val, take_t[2][0])
self.assertShapeEqual(values_1_val, take_t[2][1])
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertAllEqual(values_0_val[idx], v0)
self.assertAllEqual(values_1_val[idx], v1)
@test_util.run_deprecated_v1
def testParallelInsertMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
size_t = b.ready_size()
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_ops = [b.insert_many(0, [k], [v]) for k, v in zip(keys, values)]
take_t = b.take_many(10)
self.evaluate(insert_ops)
self.assertEquals(size_t.eval(), [10])
indices_val, keys_val, values_val = sess.run(
[take_t[0], take_t[1], take_t[2][0]])
self.assertAllEqual(indices_val, [-2**63 + x for x in range(10)])
for k, v in zip(keys, values):
idx = keys_val.tolist().index(k)
self.assertEqual(values_val[idx], v)
@test_util.run_deprecated_v1
def testParallelTakeMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
size_t = b.ready_size()
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_op = b.insert_many(0, keys, values)
take_t = [b.take_many(1) for _ in keys]
insert_op.run()
self.assertEquals(size_t.eval(), [10])
index_fetches = []
key_fetches = []
value_fetches = []
for ix_t, k_t, v_t in take_t:
index_fetches.append(ix_t)
key_fetches.append(k_t)
value_fetches.append(v_t[0])
vals = sess.run(index_fetches + key_fetches + value_fetches)
index_vals = vals[:len(keys)]
key_vals = vals[len(keys):2 * len(keys)]
value_vals = vals[2 * len(keys):]
taken_elems = []
for k, v in zip(key_vals, value_vals):
taken_elems.append((k[0], v[0]))
self.assertAllEqual(np.hstack(index_vals), [-2**63] * 10)
self.assertItemsEqual(
zip(keys, values), [(k[0], v[0]) for k, v in zip(key_vals, value_vals)])
@test_util.run_deprecated_v1
def testBlockingTakeMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(dtypes.float32, shapes=())
keys = [str(x).encode("ascii") for x in range(10)]
values = [float(x) for x in range(10)]
insert_ops = [b.insert_many(0, [k], [v]) for k, v in zip(keys, values)]
take_t = b.take_many(10)
def take():
indices_val, keys_val, values_val = sess.run(
[take_t[0], take_t[1], take_t[2][0]])
self.assertAllEqual(indices_val,
[int(x.decode("ascii")) - 2**63 for x in keys_val])
self.assertItemsEqual(zip(keys, values), zip(keys_val, values_val))
t = self.checkedThread(target=take)
t.start()
time.sleep(0.1)
for insert_op in insert_ops:
insert_op.run()
t.join()
@test_util.run_deprecated_v1
def testParallelInsertManyTakeMany(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 100
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(0, keys_i(i), values_0 + i)
for i in range(num_iterations)
]
insert_1_ops = [
b.insert_many(1, keys_i(i), values_1 + i)
for i in range(num_iterations)
]
take_ops = [b.take_many(10) for _ in range(num_iterations)]
def take(sess, i, taken):
indices_val, keys_val, values_0_val, values_1_val = sess.run([
take_ops[i][0], take_ops[i][1], take_ops[i][2][0], take_ops[i][2][1]
])
taken.append({
"indices": indices_val,
"keys": keys_val,
"values_0": values_0_val,
"values_1": values_1_val
})
def insert(sess, i):
sess.run([insert_0_ops[i], insert_1_ops[i]])
taken = []
take_threads = [
self.checkedThread(
target=take, args=(sess, i, taken)) for i in range(num_iterations)
]
insert_threads = [
self.checkedThread(
target=insert, args=(sess, i)) for i in range(num_iterations)
]
for t in take_threads:
t.start()
time.sleep(0.1)
for t in insert_threads:
t.start()
for t in take_threads:
t.join()
for t in insert_threads:
t.join()
self.assertEquals(len(taken), num_iterations)
flatten = lambda l: [item for sublist in l for item in sublist]
all_indices = sorted(flatten([t_i["indices"] for t_i in taken]))
all_keys = sorted(flatten([t_i["keys"] for t_i in taken]))
expected_keys = sorted(
flatten([keys_i(i) for i in range(num_iterations)]))
expected_indices = sorted(
flatten([-2**63 + j] * 10 for j in range(num_iterations)))
self.assertAllEqual(all_indices, expected_indices)
self.assertAllEqual(all_keys, expected_keys)
for taken_i in taken:
outer_indices_from_keys = np.array(
[int(k.decode("ascii").split(":")[0]) for k in taken_i["keys"]])
inner_indices_from_keys = np.array(
[int(k.decode("ascii").split(":")[1]) for k in taken_i["keys"]])
self.assertAllEqual(taken_i["values_0"],
outer_indices_from_keys + inner_indices_from_keys)
expected_values_1 = np.vstack(
(1 + outer_indices_from_keys + inner_indices_from_keys,
2 + outer_indices_from_keys + inner_indices_from_keys)).T
self.assertAllEqual(taken_i["values_1"], expected_values_1)
@test_util.run_deprecated_v1
def testClose(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
incomplete_t = b.incomplete_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys, values_1)
close_op = b.close()
fail_insert_op = b.insert_many(0, ["f"], [60.0])
take_t = b.take_many(3)
take_too_many_t = b.take_many(4)
self.assertEquals(size_t.eval(), [0])
self.assertEquals(incomplete_t.eval(), [0])
insert_0_op.run()
self.assertEquals(size_t.eval(), [0])
self.assertEquals(incomplete_t.eval(), [3])
close_op.run()
# This op should fail because the barrier is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should succeed because the barrier has not canceled
# pending enqueues
insert_1_op.run()
self.assertEquals(size_t.eval(), [3])
self.assertEquals(incomplete_t.eval(), [0])
# This op should fail because the barrier is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should fail because we requested more elements than are
# available in incomplete + ready queue.
with self.assertRaisesOpError(r"is closed and has insufficient elements "
r"\(requested 4, total size 3\)"):
sess.run(take_too_many_t[0]) # Sufficient to request just the indices
# This op should succeed because there are still completed elements
# to process.
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 3)
for k, v0, v1 in zip(keys, values_0, values_1):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# This op should fail because there are no more completed elements and
# the queue is closed.
with self.assertRaisesOpError("is closed and has insufficient elements"):
sess.run(take_t[0])
@test_util.run_deprecated_v1
def testCancel(self):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
size_t = b.ready_size()
incomplete_t = b.incomplete_size()
keys = [b"a", b"b", b"c"]
values_0 = [10.0, 20.0, 30.0]
values_1 = [100.0, 200.0, 300.0]
insert_0_op = b.insert_many(0, keys, values_0)
insert_1_op = b.insert_many(1, keys[0:2], values_1[0:2])
insert_2_op = b.insert_many(1, keys[2:], values_1[2:])
cancel_op = b.close(cancel_pending_enqueues=True)
fail_insert_op = b.insert_many(0, ["f"], [60.0])
take_t = b.take_many(2)
take_too_many_t = b.take_many(3)
self.assertEquals(size_t.eval(), [0])
insert_0_op.run()
insert_1_op.run()
self.assertEquals(size_t.eval(), [2])
self.assertEquals(incomplete_t.eval(), [1])
cancel_op.run()
# This op should fail because the queue is closed.
with self.assertRaisesOpError("is closed"):
fail_insert_op.run()
# This op should fail because the queue is canceled.
with self.assertRaisesOpError("is closed"):
insert_2_op.run()
# This op should fail because we requested more elements than are
# available in incomplete + ready queue.
with self.assertRaisesOpError(r"is closed and has insufficient elements "
r"\(requested 3, total size 2\)"):
sess.run(take_too_many_t[0]) # Sufficient to request just the indices
# This op should succeed because there are still completed elements
# to process.
indices_val, keys_val, values_0_val, values_1_val = sess.run(
[take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
self.assertAllEqual(indices_val, [-2**63] * 2)
for k, v0, v1 in zip(keys[0:2], values_0[0:2], values_1[0:2]):
idx = keys_val.tolist().index(k)
self.assertEqual(values_0_val[idx], v0)
self.assertEqual(values_1_val[idx], v1)
# This op should fail because there are no more completed elements and
# the queue is closed.
with self.assertRaisesOpError("is closed and has insufficient elements"):
sess.run(take_t[0])
def _testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(self, cancel):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), name="B")
take_t = b.take_many(1, allow_small_batch=True)
self.evaluate(b.close(cancel))
with self.assertRaisesOpError("is closed and has insufficient elements"):
self.evaluate(take_t)
@test_util.run_deprecated_v1
def testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(self):
self._testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(cancel=False)
self._testClosedEmptyBarrierTakeManyAllowSmallBatchRaises(cancel=True)
def _testParallelInsertManyTakeManyCloseHalfwayThrough(self, cancel):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 50
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(0, keys_i(i), values_0 + i)
for i in range(num_iterations)
]
insert_1_ops = [
b.insert_many(1, keys_i(i), values_1 + i)
for i in range(num_iterations)
]
take_ops = [b.take_many(10) for _ in range(num_iterations)]
close_op = b.close(cancel_pending_enqueues=cancel)
def take(sess, i, taken):
try:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run([
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
take_ops[i][2][1]
])
taken.append(len(indices_val))
except errors_impl.OutOfRangeError:
taken.append(0)
def insert(sess, i):
try:
sess.run([insert_0_ops[i], insert_1_ops[i]])
except errors_impl.CancelledError:
pass
taken = []
take_threads = [
self.checkedThread(
target=take, args=(sess, i, taken)) for i in range(num_iterations)
]
insert_threads = [
self.checkedThread(
target=insert, args=(sess, i)) for i in range(num_iterations)
]
first_half_insert_threads = insert_threads[:num_iterations // 2]
second_half_insert_threads = insert_threads[num_iterations // 2:]
for t in take_threads:
t.start()
for t in first_half_insert_threads:
t.start()
for t in first_half_insert_threads:
t.join()
close_op.run()
for t in second_half_insert_threads:
t.start()
for t in take_threads:
t.join()
for t in second_half_insert_threads:
t.join()
self.assertEqual(
sorted(taken),
[0] * (num_iterations // 2) + [10] * (num_iterations // 2))
@test_util.run_deprecated_v1
def testParallelInsertManyTakeManyCloseHalfwayThrough(self):
self._testParallelInsertManyTakeManyCloseHalfwayThrough(cancel=False)
@test_util.run_deprecated_v1
def testParallelInsertManyTakeManyCancelHalfwayThrough(self):
self._testParallelInsertManyTakeManyCloseHalfwayThrough(cancel=True)
def _testParallelPartialInsertManyTakeManyCloseHalfwayThrough(self, cancel):
with self.cached_session() as sess:
b = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int64), shapes=((), (2,)))
num_iterations = 100
keys = [str(x) for x in range(10)]
values_0 = np.asarray(range(10), dtype=np.float32)
values_1 = np.asarray([[x + 1, x + 2] for x in range(10)], dtype=np.int64)
keys_i = lambda i: [("%d:%s" % (i, k)).encode("ascii") for k in keys]
insert_0_ops = [
b.insert_many(
0, keys_i(i), values_0 + i, name="insert_0_%d" % i)
for i in range(num_iterations)
]
close_op = b.close(cancel_pending_enqueues=cancel)
take_ops = [
b.take_many(
10, name="take_%d" % i) for i in range(num_iterations)
]
# insert_1_ops will only run after closure
insert_1_ops = [
b.insert_many(
1, keys_i(i), values_1 + i, name="insert_1_%d" % i)
for i in range(num_iterations)
]
def take(sess, i, taken):
if cancel:
try:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run(
[
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
take_ops[i][2][1]
])
taken.append(len(indices_val))
except errors_impl.OutOfRangeError:
taken.append(0)
else:
indices_val, unused_keys_val, unused_val_0, unused_val_1 = sess.run([
take_ops[i][0], take_ops[i][1], take_ops[i][2][0],
take_ops[i][2][1]
])
taken.append(len(indices_val))
def insert_0(sess, i):
insert_0_ops[i].run(session=sess)
def insert_1(sess, i):
if cancel:
try:
insert_1_ops[i].run(session=sess)
except errors_impl.CancelledError:
pass
else:
insert_1_ops[i].run(session=sess)
taken = []
take_threads = [
self.checkedThread(
target=take, args=(sess, i, taken)) for i in range(num_iterations)
]
insert_0_threads = [
self.checkedThread(
target=insert_0, args=(sess, i)) for i in range(num_iterations)
]
insert_1_threads = [
self.checkedThread(
target=insert_1, args=(sess, i)) for i in range(num_iterations)
]
for t in insert_0_threads:
t.start()
for t in insert_0_threads:
t.join()
for t in take_threads:
t.start()
close_op.run()
for t in insert_1_threads:
t.start()
for t in take_threads:
t.join()
for t in insert_1_threads:
t.join()
if cancel:
self.assertEqual(taken, [0] * num_iterations)
else:
self.assertEqual(taken, [10] * num_iterations)
@test_util.run_deprecated_v1
def testParallelPartialInsertManyTakeManyCloseHalfwayThrough(self):
self._testParallelPartialInsertManyTakeManyCloseHalfwayThrough(cancel=False)
@test_util.run_deprecated_v1
def testParallelPartialInsertManyTakeManyCancelHalfwayThrough(self):
self._testParallelPartialInsertManyTakeManyCloseHalfwayThrough(cancel=True)
@test_util.run_deprecated_v1
def testIncompatibleSharedBarrierErrors(self):
with self.cached_session():
# Do component types and shapes.
b_a_1 = data_flow_ops.Barrier(
(dtypes.float32,), shapes=(()), shared_name="b_a")
b_a_2 = data_flow_ops.Barrier(
(dtypes.int32,), shapes=(()), shared_name="b_a")
b_a_1.barrier_ref.eval()
with self.assertRaisesOpError("component types"):
b_a_2.barrier_ref.eval()
b_b_1 = data_flow_ops.Barrier(
(dtypes.float32,), shapes=(()), shared_name="b_b")
b_b_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.int32), shapes=((), ()), shared_name="b_b")
b_b_1.barrier_ref.eval()
with self.assertRaisesOpError("component types"):
b_b_2.barrier_ref.eval()
b_c_1 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_c")
b_c_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shared_name="b_c")
b_c_1.barrier_ref.eval()
with self.assertRaisesOpError("component shapes"):
b_c_2.barrier_ref.eval()
b_d_1 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32), shapes=((), ()), shared_name="b_d")
b_d_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_d")
b_d_1.barrier_ref.eval()
with self.assertRaisesOpError("component shapes"):
b_d_2.barrier_ref.eval()
b_e_1 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 2), (8,)),
shared_name="b_e")
b_e_2 = data_flow_ops.Barrier(
(dtypes.float32, dtypes.float32),
shapes=((2, 5), (8,)),
shared_name="b_e")
b_e_1.barrier_ref.eval()
with self.assertRaisesOpError("component shapes"):
b_e_2.barrier_ref.eval()
if __name__ == "__main__":
test.main()
|
|
'''
matrix2tree.py - build tree from a distance matrix
==================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python matrix2tree.py --help
Type::
python matrix2tree.py --help
for command line help.
Command line options
--------------------
'''
import os
import sys
import string
import re
import getopt
import time
import optparse
import math
import tempfile
import subprocess
import random
from types import *
import CGAT.Experiment as E
import CGAT.TreeTools as TreeTools
import CGAT.Tree as Tree
import CGAT.IOTools as IOTools
import CGAT.WrapperPhylip as WrapperPhylip
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: matrix2tree.py 2782 2009-09-10 11:40:29Z andreas $")
parser.add_option("-i", "--invert-map", dest="invert_map", action="store_true",
help="""invert map.""")
parser.add_option("--input-format", dest="input_format", type="choice",
choices=("phylip", "full"),
help="""input format.""")
parser.add_option("-t", "--tree-nh-file", dest="filename_tree", type="string",
help="""filename with tree to fit.""")
parser.add_option("-m", "--method", dest="method", type="choice",
choices=("nj", "kitsch", "fitch"),
help="""algorithm to run.""")
parser.add_option("-e", "--replicates", dest="replicates", action="store_true",
help="replicates.")
parser.add_option("-r", "--root", dest="root", action="store_true",
help="midpoint root (if it is not rooted).")
parser.add_option("-u", "--unroot", dest="unroot", action="store_true",
help="unroot tree (if it is rooted).")
parser.add_option("--skip-separators", dest="write_separators", action="store_false",
help="do not echo separators (starting with >)")
# parser.add_option("-i", "--iterations", dest="iterations", type="int",
# help="number of iterations." )
parser.add_option("-p", "--power", dest="power", type="float",
help="power.")
parser.add_option("--prune-tree", dest="prune_tree", action="store_true",
help="prune tree such to include only taxa which are part of the input matrix.")
parser.add_option("--add-random", dest="add_random", action="store_true",
help="add small random value to off-diagonal zero elements in matrix.")
parser.add_option("--pseudo-replicates", dest="pseudo_replicates", action="store_true",
help="add small random value to off-diagonal zero elements in matrix, even if they have no replicates.")
parser.add_option("--debug", dest="debug", action="store_true",
help="dump debug information.")
parser.set_defaults(
value=0,
method="nj",
input_format="phylip",
filename_tree=None,
outgroup=None,
replicates=False,
root=False,
unroot=False,
power=0,
write_separators=True,
prune_tree=False,
add_random=False,
debug=False,
)
(options, args) = E.Start(parser, add_pipe_options=True)
phylip = WrapperPhylip.Phylip()
if options.debug:
phylip.setLogLevel(options.loglevel)
phylip.setPruneTree(options.prune_tree)
lines = filter(lambda x: x[0] != "#", sys.stdin.readlines())
chunks = filter(lambda x: lines[x][0] == ">", range(len(lines)))
if not chunks:
options.write_separators = False
chunks = [-1]
chunks.append(len(lines))
for x in range(len(chunks) - 1):
matrix = lines[chunks[x] + 1:chunks[x + 1]]
# parse phylip matrix
if options.add_random:
mm = []
ids = []
for l in range(1, len(matrix)):
values = re.split("\s+", matrix[l][:-1])
ids.append(values[0])
mm.append(map(lambda x: x.strip(), values[1:]))
d = len(mm)
if options.replicates:
for row in range(d - 1):
for col in range(row + 1, d):
cc = col * 2
rr = row * 2
if mm[row][cc] == "0" and mm[row][cc + 1] != "0":
mm[row][cc + 1] = "1"
mm[col][rr + 1] = "1"
v = str(random.random() / 10000.0)
mm[row][cc] = v
mm[col][rr] = v
else:
for row in range(d - 1):
for col in range(row + 1, d):
if mm[row][col] == "0":
v = str(random.random() / 10000.0)
mm[row][col] = v
mm[col][row] = v
matrix = ["%i\n" % d]
for row in range(d):
matrix.append(ids[row] + " " + " ".join(mm[row]) + "\n")
# parse phylip matrix
if options.pseudo_replicates:
mm = []
ids = []
for l in range(1, len(matrix)):
values = re.split("\s+", matrix[l][:-1])
ids.append(values[0])
mm.append(map(lambda x: x.strip(), values[1:]))
d = len(mm)
if options.replicates:
for row in range(d - 1):
for col in range(row + 1, d):
cc = col * 2
rr = row * 2
if mm[row][cc + 1] == "0":
mm[row][cc + 1] = "1"
mm[col][rr + 1] = "1"
v = str(random.random() / 10000.0)
mm[row][cc] = v
mm[col][rr] = v
else:
mm[row][cc + 1] = "100"
mm[col][rr + 1] = "100"
else:
for row in range(d - 1):
for col in range(row + 1, d):
if mm[row][col] == "0":
v = str(random.random() / 10000.0)
mm[row][col] = v
mm[col][row] = v
matrix = ["%i\n" % d]
for row in range(d):
matrix.append(ids[row] + " " + " ".join(mm[row]) + "\n")
phylip.setMatrix(matrix)
phylip_options = []
if options.filename_tree:
nexus = TreeTools.Newick2Nexus(open(options.filename_tree, "r"))
ref_tree = nexus.trees[0]
phylip.setTree(ref_tree)
phylip_options.append("U")
else:
ref_tree = None
if options.method == "nj":
phylip.setProgram("neighbor")
elif options.method == "fitch":
phylip.setProgram("fitch")
elif options.method == "kitsch":
phylip.setProgram("kitsch")
if options.replicates:
phylip_options.append("S")
if options.power > 0:
phylip_options.append("P")
phylip_options.append("%f" % options.power)
phylip_options.append("Y")
phylip.setOptions(phylip_options)
result = phylip.run()
# root with outgroup
if options.root:
if options.outgroup:
pass
# midpoint root
else:
for tree in result.mNexus.trees:
tree.root_midpoint()
# explicitely unroot
elif options.unroot:
phylip.setOptions(("Y", "W", "U", "Q"))
phylip.setProgram("retree")
for x in range(len(result.mNexus.trees)):
phylip.setTree(result.mNexus.trees[x])
xresult = phylip.run()
result.mNexus.trees[x] = xresult.mNexus.trees[0]
if options.write_separators:
options.stdout.write(lines[chunks[x]])
if result.mNexus:
options.stdout.write(TreeTools.Nexus2Newick(result.mNexus) + "\n")
if options.loglevel >= 1:
if ref_tree:
nref = len(ref_tree.get_terminals())
else:
nref = 0
for tree in result.mNexus.trees:
options.stdlog.write("# ninput=%i, nreference=%i, noutput=%i\n" % (
len(matrix) - 1, nref, len(tree.get_terminals())))
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function
from chainer import utils
from chainer.utils import collections_abc
from chainer.utils import type_check
def _logsumexp(a, xp, axis=None):
vmax = xp.amax(a, axis=axis, keepdims=True)
if xp is numpy:
vmax += xp.log(xp.sum(xp.exp(a - vmax),
axis=axis, keepdims=True, dtype=a.dtype))
else:
_logsumexp_impl = cuda.reduce(
'T x, T vmax', 'T y',
'exp(x - vmax)', 'a + b', 'y += log(a)', '0',
'logsumexp_impl')
_logsumexp_impl(a, vmax, vmax, axis=axis, keepdims=True)
return xp.squeeze(vmax, axis=axis)
def _softmax(x, xp):
val = xp.exp(x - xp.amax(x, axis=2, keepdims=True))
val /= xp.sum(val, axis=2, keepdims=True)
return val
def _label_to_path(labels, blank_symbol, xp):
path = xp.full((len(labels), labels.shape[1] * 2 + 1),
blank_symbol, dtype=numpy.int32)
path[:, 1::2] = labels
return path
def _flip_path(path, path_length, xp):
"""Flips label sequence.
This function rotates a label sequence and flips it.
``path[b, t]`` stores a label at time ``t`` in ``b``-th batch.
The rotated matrix ``r`` is defined as
``r[b, t] = path[b, t + path_length[b]]``
.. ::
a b c d . . a b c d d c b a .
e f . . . -> . . . e f -> f e . . .
g h i j k g h i j k k j i h g
"""
n_batch, n_label = path.shape
rotate = (xp.arange(n_label) + path_length[:, None]) % n_label
return path[xp.arange(n_batch, dtype='i')[:, None],
rotate][:, ::-1]
def _flip_label_probability(y, input_length, xp):
"""Flips a label probability matrix.
This function rotates a label probability matrix and flips it.
``y[i, b, l]`` stores log probability of label ``l`` at ``i``-th
input in ``b``-th batch.
The rotated matrix ``r`` is defined as
``r[i, b, l] = y[i + input_length[b], b, l]``
"""
seq, n_batch, n_vocab = y.shape
rotate = (xp.arange(seq, dtype='i')[:, None] + input_length) % seq
return y[
rotate[:, :, None],
xp.arange(n_batch, dtype='i')[None, :, None],
xp.arange(n_vocab, dtype='i')[None, None, :]][::-1]
def _flip_path_probability(prob, input_length, path_length, xp):
"""Flips a path probability matrix.
This function returns a path probability matrix and flips it.
``prob[i, b, t]`` stores log probability at ``i``-th input and
at time ``t`` in a output sequence in ``b``-th batch.
The rotated matrix ``r`` is defined as
``r[i, j, k] = prob[i + input_length[j], j, k + path_length[j]]``
"""
seq, n_batch, n_label = prob.shape
rotate_input = (xp.arange(seq, dtype='i')[:, None] + input_length) % seq
rotate_label = (
xp.arange(n_label, dtype='i') + path_length[:, None]) % n_label
return prob[
rotate_input[:, :, None],
xp.arange(n_batch, dtype='i')[None, :, None],
rotate_label][::-1, :, ::-1]
class ConnectionistTemporalClassification(function.Function):
"""The implementation of Connectionist Temporal Classfication loss functions.
To make it usable for real-world cases, this class has two policies below.
1. This class computes forward and backward variables in the log domain.
2. This class applies the softmax function to inputs. The Backward
values of CTC loss is often overflows. This is avoided by computing
backward values before the activation function is applied.
"""
def __init__(self, blank_symbol, reduce='mean'):
self.blank_symbol = blank_symbol
self.zero_padding = -10000000000.0
if reduce not in ('mean', 'no'):
raise ValueError(
"only 'mean' and 'no' are valid "
"for 'reduce', but '%s' is given" % reduce)
self.reduce = reduce
def check_type_forward(self, in_types):
type_check.argname(
in_types, ('input_length', 'label_length', 't', 'x'))
input_length_type, label_length_type, t_type, x_type = in_types
type_check.expect(
input_length_type.dtype == numpy.int32,
input_length_type.ndim == 1,
label_length_type.dtype == numpy.int32,
label_length_type.ndim == 1,
t_type.ndim == 2,
t_type.dtype == numpy.int32,
x_type.ndim == 3,
x_type.dtype == numpy.float32,
)
n_batch = x_type.shape[1]
type_check.expect(
t_type.shape[0] == n_batch,
input_length_type.shape[0] == n_batch,
label_length_type.shape[0] == n_batch,
)
def log_matrix(self, x, xp):
if xp == numpy:
res = numpy.ma.log(x).filled(fill_value=self.zero_padding)
else:
create_recurrence_relation = cuda.elementwise(
'T x, T e', 'T y',
'y = x == 0 ? e : log(x)',
'create_recurrence_relation')
res = create_recurrence_relation(x, self.zero_padding)
return res.astype(numpy.float32)
# path probablity to label probability
def label_probability(self, label_size, path, path_length,
multiply_seq, xp):
seq_length = len(multiply_seq)
n_batch = len(path)
dtype = multiply_seq.dtype
ret = xp.zeros((seq_length, n_batch, label_size), dtype)
if xp == numpy:
for b in six.moves.range(len(path)):
target_path = path[b, :path_length[b]]
chars = {c for c in target_path}
for c in chars:
ret[:, b, c] = xp.sum(
multiply_seq[:, b, 0:path_length[b]]
[:, target_path == c], axis=1)
else:
cuda.elementwise(
'T prob, I path, I path_length, I max_path_length',
'raw T cum_prob',
'''
I t = i % max_path_length;
if (t < path_length) {
int n_batch = cum_prob.shape()[1];
I s = i / (max_path_length * n_batch);
I b = (i - s * (max_path_length * n_batch))
/ max_path_length;
int ind[] = {s, b, path};
atomicAdd(&cum_prob[ind], prob);
}
''', 'ctc_label_prob_sum'
)(multiply_seq, path, path_length[:, None], path.shape[1], ret)
return ret
def _computes_transition(
self, prev_prob, path, path_length, cum_prob, y):
xp = cuda.get_array_module(prev_prob)
if xp == numpy:
n_batch, max_path_length = path.shape
mat = xp.full(
(3, n_batch, max_path_length), self.zero_padding, 'f')
mat[0, :, :] = prev_prob
mat[1, :, 1:] = prev_prob[:, :-1]
mat[2, :, 2:] = prev_prob[:, :-2]
# disable transition between the same symbols
# (including blank-to-blank)
same_transition = (path[:, :-2] == path[:, 2:])
mat[2, :, 2:][same_transition] = self.zero_padding
prob = _logsumexp(mat, xp, axis=0)
outside = xp.arange(max_path_length) >= path_length[:, None]
prob[outside] = self.zero_padding
cum_prob += prob
batch_index = xp.arange(n_batch, dtype='i')
prob += y[batch_index[:, None], path]
else:
prob = xp.empty_like(prev_prob)
cuda.elementwise(
'raw T prob, raw I path, I path_length, T zero, raw T y',
'T z, T cum_prob',
'''
int length = prob.shape()[1];
int b = i / length;
int t = i - b * length;
if (t >= path_length) {
z = zero;
cum_prob += zero;
return;
}
int ind1[] = {b, t};
int ind2[] = {b, t - 1};
int ind3[] = {b, t - 2};
float f1 = prob[ind1];
float f2 = (0 <= t - 1) ? prob[ind2] : zero;
float f3 = (0 <= t - 2 && path[ind3] != path[ind1]) ?
prob[ind3] : zero;
// calculates log-sum-exp
float m = max(f1, max(f2, f3));
z = m + log(exp(f1 - m) + exp(f2 - m) + exp(f3 - m));
cum_prob += z;
int y_ind[] = {b, path[ind1]};
z += y[y_ind];
''', 'ctc_transition'
)(prev_prob, path, path_length[:, None], self.zero_padding, y,
prob, cum_prob)
return prob
def calc_trans(self, yseq, input_length,
label, label_length, path, path_length, xp):
max_input_length, n_batch, n_unit = yseq.shape
max_label_length = label.shape[1]
max_path_length = path.shape[1]
assert label.shape == (n_batch, max_label_length), label.shape
assert path.shape == (n_batch, max_label_length * 2 + 1)
forward_prob = xp.full(
(n_batch, max_path_length), self.zero_padding, dtype='f')
forward_prob[:, 0] = 0
backward_prob = forward_prob
batch_index = xp.arange(n_batch, dtype='i')
seq_index = xp.arange(len(yseq), dtype='i')
prob = yseq[seq_index[:, None, None], batch_index[:, None], path]
# forward computation.
for i, y in enumerate(yseq):
forward_prob = self._computes_transition(
forward_prob, path, path_length, prob[i], y)
r_path = _flip_path(path, path_length, xp)
yseq_inv = _flip_label_probability(yseq, input_length, xp)
prob = _flip_path_probability(prob, input_length, path_length, xp)
for i, y_inv in enumerate(yseq_inv):
backward_prob = self._computes_transition(
backward_prob, r_path, path_length, prob[i], y_inv)
return _flip_path_probability(prob, input_length, path_length, xp)
def forward(self, inputs):
xp = cuda.get_array_module(inputs[0])
self.input_length, label_length, t, xs = inputs
if chainer.is_debug():
assert len(xs) >= xp.max(self.input_length)
assert t.shape[1] >= xp.max(label_length)
self.path_length = 2 * label_length + 1
self.yseq = _softmax(xs, xp)
log_yseq = self.log_matrix(self.yseq, xp)
self.path = _label_to_path(t, self.blank_symbol, xp)
self.prob_trans = self.calc_trans(
log_yseq, self.input_length, t,
label_length, self.path, self.path_length, xp)
loss = -_logsumexp(self.prob_trans[0], xp, axis=1)
if self.reduce == 'mean':
loss = utils.force_array(xp.mean(loss))
return loss,
def backward(self, inputs, grad_output):
xp = cuda.get_array_module(inputs[0])
batch_size = len(inputs[2])
total_probability = _logsumexp(self.prob_trans[0], xp, axis=1)
label_prob = self.label_probability(
self.yseq.shape[2], self.path, self.path_length,
xp.exp(self.prob_trans - total_probability[:, None]), xp)
self.yseq -= label_prob
if self.reduce == 'mean':
self.yseq *= grad_output[0] / batch_size
else:
self.yseq *= grad_output[0][..., None]
# mask
self.yseq *= (
xp.arange(len(self.yseq))[:, None] < self.input_length)[..., None]
return None, None, None, self.yseq
def connectionist_temporal_classification(
x, t, blank_symbol, input_length=None, label_length=None,
reduce='mean'):
"""Connectionist Temporal Classification loss function.
Connectionist Temporal Classification(CTC) [Graves2006]_ is a loss function
of sequence labeling where the alignment between the inputs and target is
unknown. See also [Graves2012]_
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the samplewise
loss values. If it is ``'mean'``, it takes the mean of loss values.
Args:
x (list or tuple of :class:`~chainer.Variable`):
A list of unnormalized probabilities for labels.
Each element of ``x``, ``x[i]`` is a :class:`~chainer.Variable`
object, which has shape ``(B, V)``, where ``B``
is the batch size and ``V`` is the number of labels.
The softmax of ``x[i]`` represents the probabilities of the labels
at time ``i``.
t (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
A matrix including expected label sequences.
Its shape is ``(B, M)``, where ``B`` is the batch size and ``M`` is
the maximum length of the label sequences.
All elements in ``t`` must be less than ``V``, the number of
labels.
blank_symbol (int): Index of blank_symbol.
This value must be non-negative.
input_length (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray` or ``None``):
Length of sequence for each of mini batch ``x`` (optional).
Its shape must be ``(B,)``.
If the ``input_length`` is omitted or ``None``, it assumes that
all of ``x`` is valid input.
label_length (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray` or ``None``):
Length of sequence for each of mini batch ``t`` (optional).
Its shape must be ``(B,)``.
If the ``label_length`` is omitted or ``None``, it assumes that
all of ``t`` is valid input.
reduce (str): Reduction option. Its value must be either
``'mean'`` or ``'no'``. Otherwise,
:class:`ValueError` is raised.
Returns:
~chainer.Variable:
A variable holding a scalar value of the CTC loss.
If ``reduce`` is ``'no'``, the output variable holds array
whose shape is `(B,)` where `B` is the number of samples.
If it is ``'mean'``, it holds a scalar.
.. note::
You need to input ``x`` without applying to activation functions(e.g.
softmax function), because this function applies softmax functions
to ``x`` before calculating CTC loss to avoid numerical limitations.
You also need to apply softmax function to forwarded values before you
decode it.
.. note::
This function is differentiable only by ``x``.
.. note::
This function supports (batch, sequence, 1-dimensional input)-data.
.. [Graves2006] Alex Graves, Santiago Fernandez,\
Faustino Gomez, Jurgen Schmidhuber,\
`Connectionist Temporal Classification: Labelling Unsegmented\
Sequence Data with Recurrent Neural Networks\
<ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf>`_
.. [Graves2012] Alex Graves,\
`Supervised Sequence Labelling with Recurrent Neural Networks\
<https://www.cs.toronto.edu/~graves/preprint.pdf>`_
"""
if not isinstance(x, collections_abc.Sequence):
raise TypeError('x must be a list of Variables')
if not isinstance(blank_symbol, int):
raise TypeError('blank_symbol must be non-negative integer.')
assert 0 <= blank_symbol < x[0].shape[1]
# This implementation only supports 1-dimensional data.
# TODO(jnishi): Support d(>1)-dimentinal inputs.
assert x[0].ndim == 2
xp = cuda.get_array_module(x[0])
if input_length is None:
input_length = xp.full(len(x[0]), len(x), dtype=numpy.int32)
if label_length is None:
label_length = xp.full(len(t), t.shape[1], dtype=numpy.int32)
return ConnectionistTemporalClassification(blank_symbol, reduce)(
input_length, label_length, t, chainer.functions.stack(x))
|
|
#!/usr/bin/python2.7
# -*- coding: utf-8
from utils import Utils
import json
import logging
import random
import config
logger = logging.getLogger(__name__)
class Botnet:
ut = Utils()
def __init__(self, player):
self.username = player.username
self.password = player.password
self.uhash = player.uhash
self.botNetServers = 3
self.botnet = []
self.p = player
self.ofwhat = config.BotNet_updates
self.energy = 0
self._initbot()
def _initbot(self):
"""
Grab the amount of bots in the botnet
and populate and array of Bot class
:return: none
"""
if(self.ofwhat == "ALL"):
self.ofwhat = ["fw", "av", "smash", "mwk"]
data = self._botnetInfo()
bots = json.loads(data)
self.botnet = []
if int(bots['count']) > 0:
for i in bots['data']:
bot = Bot(i['running'], self.ofwhat[random.randint(0,len(self.ofwhat)-1)], self.energy, i['hostname'], self.username, self.password, self.uhash)
self.botnet.append(bot)
def printbots(self):
"""
Print a list of player PCs in the botnet
:return: None
"""
for bot in self.botnet:
logger.info(bot)
def getbotnetdata(self):
"""
Return an array of bot class.
Contains all the bots in the botnet.
:return: list of bot class
"""
return self.botnet
def getInfo(self):
"""
Get info about the entire botnet.
Including if you can attack bot net servers etc.
Also botnet PC info.
:return: list of vHack serves that can be hacked.
['1','2','1']. '1' = can be hacked, '2' time not elapsed.
"""
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_botnetInfo.php")
response = json.loads(response)
return response
def attack(self):
"""
Check if vHack server botnet is attackable,
then attack if can.
:return: none
"""
self._initbot()
logger.info("Trying Bot Net")
cinfo = self.getInfo()
for i in range(1, self.botNetServers + 1):
if cinfo[i - 1] == '1':
logger.debug('I am attacking #{}'.format(i))
if i == 1:
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_attackCompany.php", company=str(i))
else:
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_attackCompany" + str(i) + ".php", company=str(i))
logger.debug('I attacked #{} with response {}'.format(i, response))
if response == '0':
logger.info('#{} Netcoins gained'.format(i))
else:
logger.info('#{} Failed! No netcoins...'.format(i))
else:
logger.info("Botnet #{} not hackable yet".format(i))
def upgradebotnet(self, hostname, running, count):
"""
Check if there is enough money to upgrade a botnet PC.
Cycle through and upgrade until no money.
:return: None
"""
ofwhat = self.ofwhat[random.randint(0,len(self.ofwhat)-1)]
logger.info("Prepare attempting to upgrade bot net PC '"+ hostname +"'")
get_infobot = self.getInfo()
if (int(get_infobot['data'][count]['strength']) == 1120 and int(get_infobot['data'][count]['stars']) == 4):
logger.info("Bot '"+hostname+"' has max strength [1120] for level " + str(get_infobot['data'][count]['stars']))
return False
elif (int(get_infobot['data'][count]['strength']) == 840 and int(get_infobot['data'][count]['stars']) == 3):
logger.info("Bot '"+hostname+"' has max strength [840] for level " + str(get_infobot['data'][count]['stars']))
return False
elif (int(get_infobot['data'][count]['strength']) == 600 and int(get_infobot['data'][count]['stars']) == 2):
logger.info("Bot '"+hostname+"' has max strength [600] for level " + str(get_infobot['data'][count]['stars']))
return False
elif (int(get_infobot['data'][count]['strength']) == 400 and int(get_infobot['data'][count]['stars']) == 1):
logger.info("Bot '"+hostname+"' has max strength [400] for level " + str(get_infobot['data'][count]['stars']))
return False
elif (int(get_infobot['data'][count]['strength']) == 3000 and int(get_infobot['data'][count]['stars']) == 0):
logger.info("Bot '"+hostname+"' has max strength [3000] for level " + str(get_infobot['data'][count]['stars']))
return False
if (int(get_infobot['data'][count]['running']) == 0 and int(get_infobot['energy']) > 0):
if int(get_infobot['data'][count]['stars']) > 0:
maxofwhat = 20 + (5*int(get_infobot['data'][count]['stars']))
elif int(get_infobot['data'][count]['stars']) == 0:
maxofwhat = 250
remove = 0
for a, i in enumerate(xrange(0, len(self.ofwhat)-1)):
if int(get_infobot['data'][count][unicode(self.ofwhat[i-remove])]) == int(maxofwhat):
self.ofwhat.remove(self.ofwhat[i-remove])
remove = remove + 1
if i == 3:
break
ofwhat = self.ofwhat[random.randint(0,(len(self.ofwhat)-1))]
new_bal = self.upgradesinglebot(hostname, ofwhat)
if new_bal:
logger.info("Waiting! Doing updates for bot '" + hostname + "', [" + ofwhat + "]")
return True
elif (int(get_infobot['energy']) == 0):
logger.info("You don't have enough energy to upgrade '" + hostname + "'! :(")
return False
elif (int(get_infobot['data'][count]['running']) == 1):
logger.info("Waiting! Doing updates for bot '" + hostname + "', [" + ofwhat + "]")
return False
logger.debug("The bot '{}' is not upgradeable".format(hostname))
return False
def _botnetInfo(self):
"""
Get the botnet information including vHack servers and PC data.
:return: string
'{"count":"14",
"data":[{"bID":"1","bLVL":"100","bSTR":"100","bPRICE":"10000000"},
{"bID":"2","bLVL":"100","bSTR":"100","bPRICE":"10000000"}],
"strength":23,"resethours1":"","resetminutes1":"14","resethours2":"4","resetminutes2":"15",
"resethours3":"3","resetminutes3":"15",
"canAtt1":"2","canAtt2":"2","canAtt3":"2"}'
"""
temp = self.ut.requestString(self.username, self.password, self.uhash, "vh_botnetInfo.php")
return temp
def upgradesinglebot(self, hostname, ofwhat):
"""
Pass in bot class object and call upgrade function based on bot ID.
details :
{u'strength': u'22', u'old': u'30', u'mm': u'68359859',
u'money': u'66259859', u'costs': u'2100000',
u'lvl': u'21', u'new': u'22'}
current lvl, bot number, x, x, upgrade cost, lvl, next lvl
:return: None
"""
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_upgradePC.php", hostname=hostname, ofwhat=ofwhat, inst="0", much="1")
jsons = json.loads(response)
if int(jsons['result']) == 0:
return True
else:
logger.error("Upgrades on " + hostname + " Failed !")
return False
def __repr__(self):
return "Botnet details: vHackServers: {0}, Bot Net PC's: {1}".format(self.botNetServers, self.botnet)
class Bot:
ut = Utils()
def __init__(self, running, ofwhat, energy, hostname, username, password, uhash):
self.username = username
self.uhash = uhash
self.password = password
self.running = int(running)
self.ofwhat = ofwhat
self.energy = energy
self.hostname = hostname
def botupgradable(self, running):
"""
Determine if botnet PC is at max level or not.
:return: Bool
"""
if running == 0:
return True
else:
return False
def nextlevelcostenergy(self):
"""
Return the cost of upgrading bot to the next level
:return:int
"""
return self.energy
def parse_json_stream(self, stream):
decoder = json.JSONDecoder()
while stream:
obj, idx = decoder.raw_decode(stream)
yield obj
stream = stream[idx:].lstrip()
def upgradesinglebot(self, hostname, ofwhat):
"""
Pass in bot class object and call upgrade function based on bot ID.
details :
{u'strength': u'22', u'old': u'30', u'mm': u'68359859',
u'money': u'66259859', u'costs': u'2100000',
u'lvl': u'21', u'new': u'22'}
current lvl, bot number, x, x, upgrade cost, lvl, next lvl
:return: None
"""
response = self.ut.requestString(self.username, self.password, self.uhash, "vh_upgradePC.php", hostname=hostname, ofwhat=ofwhat)
#response = response.split('}{')[0] + '}'
#jsons = json.loads(response)
#logger.info(jsons)
return True
def __repr__(self):
return "Bot details: running: {0}, energy: {1}, upgrade: {2}, botname: {3}".format(self.running, self.energy, self.ofwhat, self.hostname)
|
|
#!/usr/bin/python
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from google.cloud import datacatalog
from google.protobuf import timestamp_pb2
from google.datacatalog_connectors.commons import prepare
from google.datacatalog_connectors.qlik.prepare import constants
class DataCatalogEntryFactory(prepare.BaseEntryFactory):
__INCOMING_TIMESTAMP_UTC_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, project_id, location_id, entry_group_id,
user_specified_system, site_url):
self.__project_id = project_id
self.__location_id = location_id
self.__entry_group_id = entry_group_id
self.__user_specified_system = user_specified_system
self.__site_url = site_url
def make_entry_for_app(self, app_metadata):
entry = datacatalog.Entry()
generated_id = self.__format_id(constants.ENTRY_ID_PART_APP,
app_metadata.get('id'))
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
generated_id)
entry.user_specified_system = self.__user_specified_system
entry.user_specified_type = constants.USER_SPECIFIED_TYPE_APP
entry.display_name = self._format_display_name(
app_metadata.get('name'))
entry.description = app_metadata.get('description')
entry.linked_resource = f'{self.__site_url}' \
f'/sense/app/{app_metadata.get("id")}'
created_datetime = datetime.strptime(
app_metadata.get('createdDate'),
self.__INCOMING_TIMESTAMP_UTC_FORMAT)
create_timestamp = timestamp_pb2.Timestamp()
create_timestamp.FromDatetime(created_datetime)
entry.source_system_timestamps.create_time = create_timestamp
modified_date = app_metadata.get('modifiedDate')
resolved_modified_date = modified_date or app_metadata.get(
'createdDate')
modified_datetime = datetime.strptime(
resolved_modified_date, self.__INCOMING_TIMESTAMP_UTC_FORMAT)
update_timestamp = timestamp_pb2.Timestamp()
update_timestamp.FromDatetime(modified_datetime)
entry.source_system_timestamps.update_time = update_timestamp
return generated_id, entry
def make_entry_for_custom_property_definition(
self, custom_property_def_metadata):
entry = datacatalog.Entry()
generated_id = self.__format_id(
constants.ENTRY_ID_PART_CUSTOM_PROPERTY_DEFINITION,
custom_property_def_metadata.get('id'))
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
generated_id)
entry.user_specified_system = self.__user_specified_system
entry.user_specified_type = \
constants.USER_SPECIFIED_TYPE_CUSTOM_PROPERTY_DEFINITION
entry.display_name = self._format_display_name(
custom_property_def_metadata.get('name'))
entry.description = custom_property_def_metadata.get('description')
# The linked_resource field is not fulfilled because there is no way to
# jump directly to an 'edit' page in the QlikView Management Console
# (QMC). The the ID wee see in the URL of the Custom Property
# Definition edit page is generated at the client side as a wrapper
# around the object. The reason for this is: if someone select a bunch
# of things in the QMC, it can't pick one, or have a list, so it
# generates a new 'synthetic' key for the edit page.
# -- from the Qlik Analytics Platform Architecture Team
created_datetime = datetime.strptime(
custom_property_def_metadata.get('createdDate'),
self.__INCOMING_TIMESTAMP_UTC_FORMAT)
create_timestamp = timestamp_pb2.Timestamp()
create_timestamp.FromDatetime(created_datetime)
entry.source_system_timestamps.create_time = create_timestamp
modified_date = custom_property_def_metadata.get('modifiedDate')
resolved_modified_date = \
modified_date or custom_property_def_metadata.get('createdDate')
modified_datetime = datetime.strptime(
resolved_modified_date, self.__INCOMING_TIMESTAMP_UTC_FORMAT)
update_timestamp = timestamp_pb2.Timestamp()
update_timestamp.FromDatetime(modified_datetime)
entry.source_system_timestamps.update_time = update_timestamp
return generated_id, entry
def make_entry_for_dimension(self, dimension_metadata):
entry = datacatalog.Entry()
app_metadata = dimension_metadata.get('app')
# The Dimension ID is usually a 7 letters string, so the App ID is
# prepended to prevent overlapping.
generated_id = self.__format_id(
constants.ENTRY_ID_PART_DIMENSION, f'{app_metadata.get("id")}'
f'_{dimension_metadata.get("qInfo").get("qId")}')
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
generated_id)
entry.user_specified_system = self.__user_specified_system
entry.user_specified_type = constants.USER_SPECIFIED_TYPE_DIMENSION
q_meta_def = dimension_metadata.get('qMetaDef')
entry.display_name = self._format_display_name(q_meta_def.get('title'))
entry.description = q_meta_def.get('description')
# The linked_resource field is not fulfilled because there is no way to
# jump directly to a Dimension 'edit' page in Qlik Sense.
# The create_time and update_time fields are not fulfilled because
# there is no such info in the Dimension metadata.
return generated_id, entry
def make_entry_for_measure(self, measure_metadata):
entry = datacatalog.Entry()
app_metadata = measure_metadata.get('app')
# The Measure ID is usually a 7 letters string, so the App ID is
# prepended to prevent overlapping.
generated_id = self.__format_id(
constants.ENTRY_ID_PART_MEASURE, f'{app_metadata.get("id")}'
f'_{measure_metadata.get("qInfo").get("qId")}')
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
generated_id)
entry.user_specified_system = self.__user_specified_system
entry.user_specified_type = constants.USER_SPECIFIED_TYPE_MEASURE
q_meta_def = measure_metadata.get('qMetaDef')
entry.display_name = self._format_display_name(q_meta_def.get('title'))
entry.description = q_meta_def.get('description')
# The linked_resource field is not fulfilled because there is no way to
# jump directly to a Measure 'edit' page in Qlik Sense.
# The create_time and update_time fields are not fulfilled because
# there is no such info in the Measure metadata.
return generated_id, entry
def make_entry_for_sheet(self, sheet_metadata):
entry = datacatalog.Entry()
sheet_id = sheet_metadata.get('qInfo').get('qId')
generated_id = self.__format_id(constants.ENTRY_ID_PART_SHEET,
sheet_id)
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
generated_id)
entry.user_specified_system = self.__user_specified_system
entry.user_specified_type = constants.USER_SPECIFIED_TYPE_SHEET
q_meta = sheet_metadata.get('qMeta')
entry.display_name = self._format_display_name(q_meta.get('title'))
entry.description = q_meta.get('description')
entry.linked_resource = f'{self.__site_url}' \
f'/sense/app/' \
f'{sheet_metadata.get("app").get("id")}' \
f'/sheet/{sheet_id}'
created_datetime = datetime.strptime(
q_meta.get('createdDate'), self.__INCOMING_TIMESTAMP_UTC_FORMAT)
create_timestamp = timestamp_pb2.Timestamp()
create_timestamp.FromDatetime(created_datetime)
entry.source_system_timestamps.create_time = create_timestamp
modified_date = q_meta.get('modifiedDate')
resolved_modified_date = modified_date or q_meta.get('createdDate')
modified_datetime = datetime.strptime(
resolved_modified_date, self.__INCOMING_TIMESTAMP_UTC_FORMAT)
update_timestamp = timestamp_pb2.Timestamp()
update_timestamp.FromDatetime(modified_datetime)
entry.source_system_timestamps.update_time = update_timestamp
return generated_id, entry
def make_entry_for_stream(self, stream_metadata):
entry = datacatalog.Entry()
generated_id = self.__format_id(constants.ENTRY_ID_PART_STREAM,
stream_metadata.get('id'))
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
generated_id)
entry.user_specified_system = self.__user_specified_system
entry.user_specified_type = constants.USER_SPECIFIED_TYPE_STREAM
entry.display_name = self._format_display_name(
stream_metadata.get('name'))
entry.linked_resource = f'{self.__site_url}' \
f'/hub/stream/{stream_metadata.get("id")}'
created_datetime = datetime.strptime(
stream_metadata.get('createdDate'),
self.__INCOMING_TIMESTAMP_UTC_FORMAT)
create_timestamp = timestamp_pb2.Timestamp()
create_timestamp.FromDatetime(created_datetime)
entry.source_system_timestamps.create_time = create_timestamp
modified_date = stream_metadata.get('modifiedDate')
resolved_modified_date = modified_date or stream_metadata.get(
'createdDate')
modified_datetime = datetime.strptime(
resolved_modified_date, self.__INCOMING_TIMESTAMP_UTC_FORMAT)
update_timestamp = timestamp_pb2.Timestamp()
update_timestamp.FromDatetime(modified_datetime)
entry.source_system_timestamps.update_time = update_timestamp
return generated_id, entry
def make_entry_for_visualization(self, visualization_metadata):
entry = datacatalog.Entry()
viz_id = visualization_metadata.get('qInfo').get('qId')
generated_id = self.__format_id(constants.ENTRY_ID_PART_VISUALIZATION,
viz_id)
entry.name = datacatalog.DataCatalogClient.entry_path(
self.__project_id, self.__location_id, self.__entry_group_id,
generated_id)
entry.user_specified_system = self.__user_specified_system
entry.user_specified_type = constants.USER_SPECIFIED_TYPE_VISUALIZATION
q_meta_def = visualization_metadata.get('qMetaDef')
entry.display_name = self._format_display_name(q_meta_def.get('title'))
entry.description = q_meta_def.get('description')
# The linked_resource field is not fulfilled because Data Catalog
# currently does not accept ``?`` and ``=`` in the field value.
# The below statements can be uncommented once
# https://issuetracker.google.com/issues/176912978
# has been fixed.
#
# app_id = visualization_metadata.get('app').get('id')
# entry.linked_resource = f'{self.__site_url}/sense/single' \
# f'?appid={app_id}' \
# f'&obj={viz_id}'
# The create_time and update_time fields are not fulfilled because
# there is no such info in the Visualization metadata.
return generated_id, entry
@classmethod
def __format_id(cls, source_type_identifier, source_id):
no_prefix_fmt_id = cls._format_id(
f'{source_type_identifier}{source_id}')
if len(no_prefix_fmt_id) > constants.NO_PREFIX_ENTRY_ID_MAX_LENGTH:
no_prefix_fmt_id = \
no_prefix_fmt_id[:constants.NO_PREFIX_ENTRY_ID_MAX_LENGTH]
return f'{constants.ENTRY_ID_PREFIX}{no_prefix_fmt_id}'
|
|
'''
chop.py
Takes a list of input ingredient names. Chops each ingredient, and adds the
resulting model to the current blend or a new blend
Thomas Storey
2016
'''
import sys
import argparse
import bpy
import numpy as np
import os
import bmesh
from math import *
from mathutils import *
import random
def createMesh(name, origin, verts, faces):
mesh = bpy.data.meshes.new(name+"Mesh")
obj = bpy.data.objects.new(name, mesh)
obj.location = origin
obj.show_name = True
scn = bpy.context.scene
scn.objects.link(obj)
mesh.from_pydata(verts, [], faces)
mesh.update()
return obj
def deleteObject(obj):
bpy.context.scene.objects.unlink(obj)
obj.user_clear()
bpy.data.objects.remove(obj)
def getObject(objdir, objname):
if (bpy.data.objects.get(objname) == None):
objpath = os.path.join(objdir, objname+".obj")
bpy.ops.import_scene.obj(filepath=objpath,axis_forward='Y',axis_up='Z')
return bpy.data.objects[objname]
def generateChopGeometry(obj):
# number of times to divide bounding box
div = 2
# dimensions of bounding box
(l, w, h) = (obj.dimensions.x, obj.dimensions.y, obj.dimensions.z)
# dimensions of cells
(cl, cw, ch) = (l/div, w/div, h/div)
# position of first cell
cpos = (obj.bound_box[0][0]+cl*0.5,
obj.bound_box[0][1]+cw*0.5,
obj.bound_box[0][2]+ch*0.5)
# cell faces
faces = ((0,3,2,1),(0,1,5,4),(1,2,6,5),
(3,7,6,2),(0,4,7,3),(4,5,6,7))
# cells which will be used to chop the object
cells = []
for z in range(div):
for y in range(div):
for x in range(div):
origin = (cpos[0]+(cl*x), cpos[1]+(cw*y), cpos[2]+(ch*z))
verts = (( (cl/2), (cw/2), (ch/2)), #0
( (cl/2), -(cw/2), (ch/2)), #1
(-(cl/2), -(cw/2), (ch/2)), #2
(-(cl/2), (cw/2), (ch/2)), #3
( (cl/2), (cw/2), -(ch/2)), #4
( (cl/2), -(cw/2), -(ch/2)), #5
(-(cl/2), -(cw/2), -(ch/2)), #6
(-(cl/2), (cw/2), -(ch/2))) #7
cells.append(createMesh("cell", origin, verts, faces))
return cells
def chop(scn, obj, cells):
# for each cell, make a duplicate of the object
dups = []
dup = obj.copy()
dup.data = obj.data.copy()
scn.objects.link(dup)
# add a boolean intersection modifier
mod = dup.modifiers.new('intersection', 'BOOLEAN')
mod.object = cells[0]
mod.operation = 'INTERSECT'
# apply modifier
dup.data = dup.to_mesh(scn, True, 'RENDER')
scn.update()
dup.modifiers.remove(mod)
deleteObject(dup)
for cell in cells:
dup = obj.copy()
dup.data = obj.data.copy()
scn.objects.link(dup)
# add a boolean intersection modifier
mod = dup.modifiers.new('intersection', 'BOOLEAN')
mod.object = cell
mod.operation = 'INTERSECT'
# apply modifier
dup.data = dup.to_mesh(scn, True, 'RENDER')
scn.update()
dup.modifiers.remove(mod)
if len(dup.data.vertices) <= 0:
# if after modifier, duplicate has zero verts, delete it
deleteObject(dup)
else:
dups.append(dup)
set_uvs(dup.data)
# delete cells
deleteObject(cell)
return dups
def addGroundPlane(obj):
faces = ((0,3,2,1),(0,1,5,4),(1,2,6,5),
(3,7,6,2),(0,4,7,3),(4,5,6,7))
(l, w, h) = (obj.dimensions.x, obj.dimensions.y, obj.dimensions.z)
po = (obj.bound_box[0][0]+(l*0.5),
obj.bound_box[0][1]+(w*0.5),
obj.bound_box[0][2]-1)
pv = ((l*10,l*10,1),(l*10,l*-10,1),(l*-10,l*-10,1),(l*-10,l*10,1),
(l*10,l*10,-1),(l*10,l*-10,-1),(l*-10,l*-10,-1),(l*-10,l*10,-1))
return createMesh("plane",po,pv,faces)
def addPlate(obj):
cwd = os.getcwd()
objdir = os.path.join(cwd, 'objs')
objpath = os.path.join(objdir, "plate.obj")
(l, w, h) = (obj.dimensions.x, obj.dimensions.y, obj.dimensions.z)
po = (obj.bound_box[0][0]+(l*0.5),
obj.bound_box[0][1]+(w*0.5),
obj.bound_box[0][2]-1)
bpy.ops.import_scene.obj(filepath=objpath,axis_forward='Z',axis_up='Y')
plate = getObjectsBySubstring("Plate")[0]
plate.location = po
return plate
def addRigidbody(scn, obj, e, k, f, cs):
bpy.ops.object.select_all(action='DESELECT')
obj.select = True
scn.objects.active = obj
bpy.ops.rigidbody.object_add(type='ACTIVE')
obj.rigid_body.enabled = e
obj.rigid_body.kinematic = k
obj.rigid_body.friction = f
obj.rigid_body.collision_shape = cs
obj.rigid_body.collision_margin = 0.200
if cs == 'MESH':
obj.rigid_body.mesh_source = 'BASE'
obj.select = False
def removeRigidbody(scn, obj):
bpy.ops.object.select_all(action='DESELECT')
obj.select = True
scn.objects.active = obj
bpy.ops.object.visual_transform_apply()
bpy.ops.rigidbody.object_remove()
obj.select = False
def setOriginToGeometry(scn, obj):
obj.select = True;
scn.objects.active = obj
bpy.ops.object.origin_set(type="ORIGIN_GEOMETRY")
obj.select = False;
def joinObjects(scn, objs, name):
bpy.ops.object.select_all(action='DESELECT')
for obj in objs:
obj.select = True;
activeobj = objs[0]
scn.objects.active = activeobj
bpy.ops.object.join()
activeobj.name = name
activeobj.data.name = name
return activeobj
def set_uvs_for_face(bm, fi, uv_layer):
face = bm.faces[fi]
zv = Vector((0.0, 0.0))
normal = face.normal
dx=abs(normal[0])
dy=abs(normal[1])
dz=abs(normal[2])
if (dz > dx):
u = Vector([1,0,0])
if (dz>dy):
v = Vector([0,1,0])
else:
v = Vector([0,0,1])
else:
v = Vector([0,0,1])
if dx>dy:
u = Vector([0,1,0])
else:
u = Vector([1,0,0])
for i in range(len(face.loops)):
if(face.loops[i][uv_layer].uv == zv):
l = face.loops[i]
l[uv_layer].uv = [ u.dot(l.vert.co),v.dot(l.vert.co)]
def set_uvs(mesh):
uv = mesh.uv_textures[0]
bm = bmesh.new()
bm.from_mesh(mesh)
bm.faces.ensure_lookup_table()
uv_layer = bm.loops.layers.uv[uv.name]
for fi in range(len(bm.faces)):
set_uvs_for_face(bm, fi, uv_layer)
bm.to_mesh(mesh)
def indexOfSubstring(the_list, substring):
for i, s in enumerate(the_list):
if substring in s:
return i
return -1
def getObjectsBySubstring(objname):
copies = []
for obj in bpy.data.objects:
if(objname in obj.name):
copies.append(obj)
return copies
def execute(inputs, output):
ctx = bpy.context
scn = ctx.scene
cwd = os.getcwd()
objdir = os.path.join(cwd, 'objs')
for objname in inputs:
# import file, or get it if it's already here
obj = getObject(objdir, objname)
cells = generateChopGeometry(obj)
chopped = chop(scn, obj, cells)
# add a plane under the cells
plane = addPlate(obj)
# delete original object
deleteObject(obj)
# ensure we are at the beginning of the timeline
scn.frame_current = scn.frame_start
scn.frame_set(scn.frame_start)
# add 'Animated' rigidbody to plane
try:
bpy.ops.rigidbody.world_add()
except RuntimeError:
pass
scn.rigidbody_world.group = bpy.data.groups.new("rigidbodies")
addRigidbody(scn, plane, False, True, 0.150, 'MESH')
scn.update()
# # for each cell, add a 'Dynamic' rigidbody
for chunk in chopped:
setOriginToGeometry(scn, chunk)
addRigidbody(scn, chunk, True, False, 0.150, 'CONVEX_HULL')
# # bake simulation and apply result
bpy.ops.ptcache.free_bake_all()
bpy.ops.ptcache.bake_all(bake=True)
scn.frame_current = scn.frame_end
scn.frame_set(scn.frame_end)
for chunk in chopped:
removeRigidbody(scn, chunk)
# # group dups together into one object
joined = joinObjects(scn, chopped, objname)
setOriginToGeometry(scn,joined)
joined.location = Vector([0,0,0])
# clean up - delete ground plane
deleteObject(plane)
# save out .blend
if not output == None:
bpy.ops.wm.save_as_mainfile(filepath=output,
check_existing=False,relative_remap=False)
else:
bpy.ops.wm.save_mainfile(check_existing=False,relative_remap=False)
def main():
argv = sys.argv
if "--" not in argv:
argv = []
else:
argv = argv[argv.index("--") + 1:]
usage_text =\
"Usage: blender -b [.blend file] --python " + __file__ + " -- [options]"
parser = argparse.ArgumentParser(description=usage_text)
parser.add_argument("-i", "--input", dest="input", type=str, required=True,
help="Comma delimited list of .objs to import. Exclude the file extension.")
parser.add_argument("-o", "--output", dest="output", type=str, required=False,
help="Name of blend file to save to, if not the same as the one being opened.")
args = parser.parse_args(argv)
output = ""
if not argv:
parser.print_help()
return
if not args.input:
print("input argument not given. aborting.")
parser.print_help()
return
if not args.output:
output = None
else:
output = args.output+".blend"
inputs = args.input.split(",")
execute(inputs, output)
print("chopped " + ", ".join(inputs))
if __name__ == "__main__":
main()
|
|
from Cookie import SimpleCookie
from pprint import pformat
from urllib import urlencode
from django.utils import datastructures
class HttpRequest(object): # needs to be new-style class because subclasses define "property"s
"A basic HTTP request"
def __init__(self):
self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {}
self.path = ''
def __repr__(self):
return '<HttpRequest\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' % \
(pformat(self.GET), pformat(self.POST), pformat(self.COOKIES),
pformat(self.META))
def __getitem__(self, key):
for d in (self.POST, self.GET):
if d.has_key(key):
return d[key]
raise KeyError, "%s not found in either POST or GET" % key
def has_key(self, key):
return self.GET.has_key(key) or self.POST.has_key(key)
def get_full_path(self):
return ''
def parse_file_upload(header_dict, post_data):
"Returns a tuple of (POST MultiValueDict, FILES MultiValueDict)"
import email, email.Message
from cgi import parse_header
raw_message = '\r\n'.join(['%s:%s' % pair for pair in header_dict.items()])
raw_message += '\r\n\r\n' + post_data
msg = email.message_from_string(raw_message)
POST = datastructures.MultiValueDict()
FILES = datastructures.MultiValueDict()
for submessage in msg.get_payload():
if isinstance(submessage, email.Message.Message):
name_dict = parse_header(submessage['Content-Disposition'])[1]
# name_dict is something like {'name': 'file', 'filename': 'test.txt'} for file uploads
# or {'name': 'blah'} for POST fields
# We assume all uploaded files have a 'filename' set.
if name_dict.has_key('filename'):
assert type([]) != type(submessage.get_payload()), "Nested MIME messages are not supported"
if not name_dict['filename'].strip():
continue
# IE submits the full path, so trim everything but the basename.
# (We can't use os.path.basename because it expects Linux paths.)
filename = name_dict['filename'][name_dict['filename'].rfind("\\")+1:]
FILES.appendlist(name_dict['name'], {
'filename': filename,
'content-type': (submessage.has_key('Content-Type') and submessage['Content-Type'] or None),
'content': submessage.get_payload(),
})
else:
POST.appendlist(name_dict['name'], submessage.get_payload())
return POST, FILES
class QueryDict(datastructures.MultiValueDict):
"""A specialized MultiValueDict that takes a query string when initialized.
This is immutable unless you create a copy of it."""
def __init__(self, query_string):
try:
from mod_python.util import parse_qsl
except ImportError:
from cgi import parse_qsl
if not query_string:
self.data = {}
self._keys = []
else:
self.data = {}
self._keys = []
for name, value in parse_qsl(query_string, True): # keep_blank_values=True
if name in self.data:
self.data[name].append(value)
else:
self.data[name] = [value]
if name not in self._keys:
self._keys.append(name)
self._mutable = False
def __setitem__(self, key, value):
if not self._mutable:
raise AttributeError, "This QueryDict instance is immutable"
else:
self.data[key] = [value]
if not key in self._keys:
self._keys.append(key)
def setlist(self, key, list_):
if not self._mutable:
raise AttributeError, "This QueryDict instance is immutable"
else:
self.data[key] = list_
if not key in self._keys:
self._keys.append(key)
def copy(self):
"Returns a mutable copy of this object"
cp = datastructures.MultiValueDict.copy(self)
cp._mutable = True
return cp
def assert_synchronized(self):
assert(len(self._keys) == len(self.data.keys())), \
"QueryDict data structure is out of sync: %s %s" % (str(self._keys), str(self.data))
def items(self):
"Respect order preserved by self._keys"
self.assert_synchronized()
items = []
for key in self._keys:
if key in self.data:
items.append((key, self.data[key][0]))
return items
def keys(self):
self.assert_synchronized()
return self._keys
def urlencode(self):
output = []
for k, list_ in self.data.items():
output.extend([urlencode({k: v}) for v in list_])
return '&'.join(output)
def parse_cookie(cookie):
if cookie == '':
return {}
c = SimpleCookie()
c.load(cookie)
cookiedict = {}
for key in c.keys():
cookiedict[key] = c.get(key).value
return cookiedict
class HttpResponse:
"A basic HTTP response, with content and dictionary-accessed headers"
def __init__(self, content='', mimetype=None):
if not mimetype:
from django.conf.settings import DEFAULT_CONTENT_TYPE, DEFAULT_CHARSET
mimetype = "%s; charset=%s" % (DEFAULT_CONTENT_TYPE, DEFAULT_CHARSET)
self.content = content
self.headers = {'Content-Type':mimetype}
self.cookies = SimpleCookie()
self.status_code = 200
def __str__(self):
"Full HTTP message, including headers"
return '\n'.join(['%s: %s' % (key, value)
for key, value in self.headers.items()]) \
+ '\n\n' + self.content
def __setitem__(self, header, value):
self.headers[header] = value
def __delitem__(self, header):
try:
del self.headers[header]
except KeyError:
pass
def __getitem__(self, header):
return self.headers[header]
def has_header(self, header):
"Case-insensitive check for a header"
header = header.lower()
for key in self.headers.keys():
if key.lower() == header:
return True
return False
def set_cookie(self, key, value='', max_age=None, expires=None, path='/', domain=None, secure=None):
self.cookies[key] = value
for var in ('max_age', 'path', 'domain', 'secure', 'expires'):
val = locals()[var]
if val is not None:
self.cookies[key][var.replace('_', '-')] = val
def delete_cookie(self, key):
try:
self.cookies[key]['max_age'] = 0
except KeyError:
pass
def get_content_as_string(self, encoding):
"""
Returns the content as a string, encoding it from a Unicode object if
necessary.
"""
if isinstance(self.content, unicode):
return self.content.encode(encoding)
return self.content
# The remaining methods partially implement the file-like object interface.
# See http://docs.python.org/lib/bltin-file-objects.html
def write(self, content):
self.content += content
def flush(self):
pass
def tell(self):
return len(self.content)
class HttpResponseRedirect(HttpResponse):
def __init__(self, redirect_to):
HttpResponse.__init__(self)
self['Location'] = redirect_to
self.status_code = 302
class HttpResponseNotModified(HttpResponse):
def __init__(self):
HttpResponse.__init__(self)
self.status_code = 304
class HttpResponseNotFound(HttpResponse):
def __init__(self, *args, **kwargs):
HttpResponse.__init__(self, *args, **kwargs)
self.status_code = 404
class HttpResponseForbidden(HttpResponse):
def __init__(self, *args, **kwargs):
HttpResponse.__init__(self, *args, **kwargs)
self.status_code = 403
class HttpResponseGone(HttpResponse):
def __init__(self, *args, **kwargs):
HttpResponse.__init__(self, *args, **kwargs)
self.status_code = 410
class HttpResponseServerError(HttpResponse):
def __init__(self, *args, **kwargs):
HttpResponse.__init__(self, *args, **kwargs)
self.status_code = 500
|
|
#!/usr/bin/env python
"""Launchpad to github bug migration script.
There's a ton of code from Hydrazine copied here:
https://launchpad.net/hydrazine
Usage
-----
This code is meant to port a bug database for a project from Launchpad to
GitHub. It was used to port the IPython bug history.
The code is meant to be used interactively. I ran it multiple times in one long
IPython session, until the data structures I was getting from Launchpad looked
right. Then I turned off (see 'if 0' markers below) the Launchpad part, and ran
it again with the github part executing and using the 'bugs' variable from my
interactive namespace (via"%run -i" in IPython).
This code is NOT fire and forget, it's meant to be used with some intelligent
supervision at the wheel. Start by making a test repository (I made one called
ipython/BugsTest) and upload only a few issues into that. Once you are sure
that everything is OK, run it against your real repo with all your issues.
You should read all the code below and roughly understand what's going on
before using this. Since I didn't intend to use this more than once, it's not
particularly robust or documented. It got the job done and I've never used it
again.
Configuration
-------------
To pull things off LP, you need to log in first (see the Hydrazine docs). Your
Hydrazine credentials will be cached locally and this script can reuse them.
To push to GH, you need to set below the GH repository owner, API token and
repository name you wan to push issues into. See the GH section for the
necessary variables.
"""
import collections
import os.path
import subprocess
import sys
import time
from pprint import pformat
import launchpadlib
from launchpadlib.credentials import Credentials
from launchpadlib.launchpad import (
Launchpad, STAGING_SERVICE_ROOT, EDGE_SERVICE_ROOT )
#-----------------------------------------------------------------------------
# Launchpad configuration
#-----------------------------------------------------------------------------
# The official LP project name
PROJECT_NAME = 'statsmodels'
# How LP marks your bugs, I don't know where this is stored, but they use it to
# generate bug descriptions and we need to split on this string to create
# shorter Github bug titles
PROJECT_ID = 'statsmodels'
# Default Launchpad server, see their docs for details
service_root = EDGE_SERVICE_ROOT
#-----------------------------------------------------------------------------
# Code copied/modified from Hydrazine (https://launchpad.net/hydrazine)
#-----------------------------------------------------------------------------
# Constants for the names in LP of certain
lp_importances = ['Critical', 'High', 'Medium', 'Low', 'Wishlist', 'Undecided']
lp_status = ['Confirmed', 'Triaged', 'Fix Committed', 'Fix Released',
'In Progress',"Won't Fix", "Incomplete", "Invalid", "New"]
def squish(a):
return a.lower().replace(' ', '_').replace("'",'')
lp_importances_c = set(map(squish, lp_importances))
lp_status_c = set(map(squish, lp_status))
def trace(s):
sys.stderr.write(s + '\n')
def create_session():
lplib_cachedir = os.path.expanduser("~/.cache/launchpadlib/")
hydrazine_cachedir = os.path.expanduser("~/.cache/hydrazine/")
rrd_dir = os.path.expanduser("~/.cache/hydrazine/rrd")
for d in [lplib_cachedir, hydrazine_cachedir, rrd_dir]:
if not os.path.isdir(d):
os.makedirs(d, mode=0700)
hydrazine_credentials_filename = os.path.join(hydrazine_cachedir,
'credentials')
if os.path.exists(hydrazine_credentials_filename):
credentials = Credentials()
credentials.load(file(
os.path.expanduser("~/.cache/hydrazine/credentials"),
"r"))
trace('loaded existing credentials')
return Launchpad(credentials, service_root,
lplib_cachedir)
# TODO: handle the case of having credentials that have expired etc
else:
launchpad = Launchpad.get_token_and_login(
'Hydrazine',
service_root,
lplib_cachedir)
trace('saving credentials...')
launchpad.credentials.save(file(
hydrazine_credentials_filename,
"w"))
return launchpad
def canonical_enum(entered, options):
entered = squish(entered)
return entered if entered in options else None
def canonical_importance(from_importance):
return canonical_enum(from_importance, lp_importances_c)
def canonical_status(entered):
return canonical_enum(entered, lp_status_c)
#-----------------------------------------------------------------------------
# Functions and classes
#-----------------------------------------------------------------------------
class Base(object):
def __str__(self):
a = dict([(k,v) for (k,v) in self.__dict__.iteritems()
if not k.startswith('_')])
return pformat(a)
__repr__ = __str__
class Message(Base):
def __init__(self, m):
self.content = m.content
o = m.owner
self.owner = o.name
self.owner_name = o.display_name
self.date = m.date_created
class Bug(Base):
def __init__(self, bt):
# Cache a few things for which launchpad will make a web request each
# time.
bug = bt.bug
o = bt.owner
a = bt.assignee
dupe = bug.duplicate_of
# Store from the launchpadlib bug objects only what we want, and as
# local data
self.id = bug.id
self.lp_url = 'https://bugs.launchpad.net/%s/+bug/%i' % \
(PROJECT_NAME, self.id)
self.title = bt.title
self.description = bug.description
# Every bug has an owner (who created it)
self.owner = o.name
self.owner_name = o.display_name
# Not all bugs have been assigned to someone yet
try:
self.assignee = a.name
self.assignee_name = a.display_name
except AttributeError:
self.assignee = self.assignee_name = None
# Store status/importance in canonical format
self.status = canonical_status(bt.status)
self.importance = canonical_importance(bt.importance)
self.tags = bug.tags
# Store the bug discussion messages, but skip m[0], which is the same
# as the bug description we already stored
self.messages = map(Message, list(bug.messages)[1:])
self.milestone = getattr(bt.milestone, 'name', None)
# Duplicate handling disabled, since the default query already filters
# out the duplicates. Keep the code here in case we ever want to look
# into this...
if 0:
# Track duplicates conveniently
try:
self.duplicate_of = dupe.id
self.is_duplicate = True
except AttributeError:
self.duplicate_of = None
self.is_duplicate = False
# dbg dupe info
if bug.number_of_duplicates > 0:
self.duplicates = [b.id for b in bug.duplicates]
else:
self.duplicates = []
# tmp - debug
self._bt = bt
self._bug = bug
#-----------------------------------------------------------------------------
# Main script
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Launchpad part
#-----------------------------------------------------------------------------
# launchpad = create_session()
launchpad = Launchpad.login_with('statsmodels', 'production')
project = launchpad.projects[PROJECT_NAME]
# Note: by default, this will give us all bugs except duplicates and those
# with status "won't fix" or 'invalid'
bug_tasks = project.searchTasks(status=lp_status)
bugs = {}
for bt in list(bug_tasks):
b = Bug(bt)
bugs[b.id] = b
print b.title
sys.stdout.flush()
#-----------------------------------------------------------------------------
# Github part
#-----------------------------------------------------------------------------
#http://pypi.python.org/pypi/github2
#http://github.com/ask/python-github2
# Github libraries
from github2 import core, issues, client
for mod in (core, issues, client):
reload(mod)
def format_title(bug):
return bug.title.split('{0}: '.format(PROJECT_ID), 1)[1].strip('"')
def format_body(bug):
body = \
"""Original Launchpad bug {bug.id}: {bug.lp_url}
Reported by: {bug.owner} ({owner_name}).
{description}""".format(bug=bug, owner_name=bug.owner_name.encode('utf-8'),
description=bug.description.encode('utf-8'))
return body
def format_message(num, m):
body = \
"""[ LP comment {num} by: {owner_name}, on {m.date!s} ]
{content}""".format(num=num, m=m, owner_name=m.owner_name.encode('utf-8'),
content=m.content.encode('utf-8'))
return body
# Config
user = 'wesm'
token= '12efaff85b8e17f63ee835c5632b8cf0'
repo = 'statsmodels/statsmodels'
#repo = 'ipython/ipython'
# Skip bugs with this status:
# to_skip = set([u'fix_committed', u'incomplete'])
to_skip = set()
# Only label these importance levels:
gh_importances = set([u'critical', u'high', u'low', u'medium', u'wishlist'])
# Start script
gh = client.Github(username=user, api_token=token)
# Filter out the full LP bug dict to process only the ones we want
bugs_todo = dict( (id, b) for (id, b) in bugs.iteritems()
if not b.status in to_skip )
# Select which bug ids to run
#bids = bugs_todo.keys()[50:100]
# bids = bugs_todo.keys()[12:]
bids = bugs_todo.keys()
#bids = bids[:5]+[502787]
# Start loop over bug ids and file them on Github
nbugs = len(bids)
gh_issues = [] # for reporting at the end
for n, bug_id in enumerate(bids):
bug = bugs[bug_id]
title = format_title(bug)
body = format_body(bug)
print
if len(title)<65:
print bug.id, '[{0}/{1}]'.format(n+1, nbugs), title
else:
print bug.id, title[:65]+'...'
# still check bug.status, in case we manually added other bugs to the list
# above (mostly during testing)
if bug.status in to_skip:
print '--- Skipping - status:',bug.status
continue
print '+++ Filing...',
sys.stdout.flush()
# Create github issue for this bug
issue = gh.issues.open(repo, title=title, body=body)
print 'created GitHub #', issue.number
gh_issues.append(issue.number)
sys.stdout.flush()
# Mark status as a label
#status = 'status-{0}'.format(b.status)
#gh.issues.add_label(repo, issue.number, status)
# Mark any extra tags we might have as labels
for tag in b.tags:
label = 'tag-{0}'.format(tag)
gh.issues.add_label(repo, issue.number, label)
# If bug has assignee, add it as label
if bug.assignee:
gh.issues.add_label(repo, issue.number,
#bug.assignee
# Github bug, gets confused with dots in labels.
bug.assignee.replace('.','_')
)
if bug.importance in gh_importances:
if bug.importance == 'wishlist':
label = bug.importance
else:
label = 'prio-{0}'.format(bug.importance)
gh.issues.add_label(repo, issue.number, label)
if bug.milestone:
label = 'milestone-{0}'.format(bug.milestone).replace('.','_')
gh.issues.add_label(repo, issue.number, label)
# Add original message thread
for num, message in enumerate(bug.messages):
# Messages on LP are numbered from 1
comment = format_message(num+1, message)
gh.issues.comment(repo, issue.number, comment)
time.sleep(0.5) # soft sleep after each message to prevent gh block
if bug.status in ['fix_committed', 'fix_released', 'invalid']:
gh.issues.close(repo, issue.number)
# too many fast requests and gh will block us, so sleep for a while
# I just eyeballed these values by trial and error.
time.sleep(1) # soft sleep after each request
# And longer one after every batch
batch_size = 10
tsleep = 60
if (len(gh_issues) % batch_size)==0:
print
print '*** SLEEPING for {0} seconds to avoid github blocking... ***'.format(tsleep)
sys.stdout.flush()
time.sleep(tsleep)
# Summary report
print
print '*'*80
print 'Summary of GitHub issues filed:'
print gh_issues
print 'Total:', len(gh_issues)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from comic_dl import globalFunctions
import json
import os
import logging
import base64
"""A HUGE thanks to @abcfy2 for his amazing implementation of the ac.qq.com APIs.
Original code for ac.qq.com : https://github.com/abcfy2/getComic/
"""
class AcQq(object):
def __init__(self, manga_url, download_directory, chapter_range, **kwargs):
current_directory = kwargs.get("current_directory")
conversion = kwargs.get("conversion")
keep_files = kwargs.get("keep_files")
self.logging = kwargs.get("log_flag")
self.sorting = kwargs.get("sorting_order")
self.comic_name = self.name_cleaner(manga_url)
self.print_index = kwargs.get("print_index")
if "/index/" in str(manga_url):
self.single_chapter(manga_url, self.comic_name, download_directory, conversion=conversion,
keep_files=keep_files)
else:
self.full_series(comic_url=manga_url, comic_name=self.comic_name, sorting=self.sorting,
download_directory=download_directory, chapter_range=chapter_range, conversion=conversion,
keep_files=keep_files)
def name_cleaner(self, url):
initial_name = re.search(r"id/(\d+)", str(url)).group(1)
safe_name = re.sub(r"[0-9][a-z][A-Z]\ ", "", str(initial_name))
manga_name = str(safe_name.title()).replace("_", " ")
return manga_name
def single_chapter(self, comic_url, comic_name, download_directory, conversion, keep_files):
chapter_number = re.search(r"cid/(\d+)", str(comic_url)).group(1)
source, cookies_main = globalFunctions.GlobalFunctions().page_downloader(manga_url=comic_url)
base64data = re.findall(r"DATA\s*=\s*'(.+?)'", str(source))[0][1:]
data = re.findall(r"data:\s*'(.+?)',", str(source))
nonce = re.findall(r'data-mpmvr="(.+?)"', str(source))[0]
logging.debug("base64data : %s" % base64data)
# print(base64data)
# import sys
# sys.exit()
img_detail_json = json.loads(self.__decode_base64_data(base64data))
logging.debug("img_detail_json : %s" % img_detail_json)
img_list = []
for img_url in img_detail_json.get('picture'):
img_list.append(img_url['url'])
logging.debug("img_list : %s" % img_list)
file_directory = globalFunctions.GlobalFunctions().create_file_directory(chapter_number, comic_name)
# directory_path = os.path.realpath(file_directory)
directory_path = os.path.realpath(str(download_directory) + "/" + str(file_directory))
if not os.path.exists(directory_path):
os.makedirs(directory_path)
# for num, image_link in enumerate(img_list):
# print(num)
links = []
file_names = []
for current_chapter, image_link in enumerate(img_list):
# file_name = "0" + str(img_list.index(image_link)) + "." + str(image_link).split(".")[-1]
# file_name = str(current_chapter) + '.' + str(image_link).split(".")[-1]
current_chapter += 1
file_name = str(globalFunctions.GlobalFunctions().prepend_zeroes(current_chapter, len(img_list))) + ".jpg"
logging.debug("image_link : %s" % image_link)
file_names.append(file_name)
links.append(image_link)
globalFunctions.GlobalFunctions().multithread_download(chapter_number, comic_name, comic_url, directory_path,
file_names, links, self.logging)
globalFunctions.GlobalFunctions().conversion(directory_path, conversion, keep_files, comic_name,
chapter_number)
return 0
def full_series(self, comic_url, comic_name, sorting, download_directory, chapter_range, conversion, keep_files):
# TODO fix, broken, doesn't return a json anymore
chapter_list = "https://ac.qq.com/Comic/comicInfo/id/" + str(comic_name)
source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=chapter_list)
all_links = []
raw_chapters_table = source.find_all('ol', {'class': 'chapter-page-all works-chapter-list'})
for table_data in raw_chapters_table:
x = table_data.findAll('a')
for a in x:
if "/ComicView/" in str(a['href']):
all_links.append("https://ac.qq.com" + str(a['href']).strip())
# import sys
# sys.exit()
# content_json = json.loads(str(source))
# logging.debug("content_json : %s" % content_json)
# last = int(content_json['last'])
# first = int(content_json['first'])
# logging.debug("first : %s" % first)
# logging.debug("last : %s" % last)
#
# all_links = []
#
# for chapter_number in range(first, last + 1):
# "http://ac.qq.com/ComicView/index/id/538359/cid/114"
# chapter_url = "http://ac.qq.com/ComicView/index/id/%s/cid/%s" % (comic_name, chapter_number)
# all_links.append(chapter_url)
logging.debug("all_links : %s" % all_links)
if chapter_range != "All":
# -1 to shift the episode number accordingly to the INDEX of it. List starts from 0 xD!
starting = int(str(chapter_range).split("-")[0]) - 1
if str(chapter_range).split("-")[1].isdigit():
ending = int(str(chapter_range).split("-")[1])
else:
ending = len(all_links)
indexes = [x for x in range(starting, ending)]
# [::-1] in sub_list in beginning to start this from the 1st episode and at the last,
# it is to reverse the list again, becasue I'm reverting it again at the end.
all_links = [all_links[x] for x in indexes][::-1]
else:
all_links = all_links
if self.print_index:
idx = 0
for chap_link in all_links:
idx = idx + 1
print(str(idx) + ": " + str(chap_link))
return
if str(sorting).lower() in ['new', 'desc', 'descending', 'latest']:
for chap_link in all_links:
try:
logging.debug("chap_link : %s" % chap_link)
self.single_chapter(comic_url=str(chap_link), comic_name=comic_name,
download_directory=download_directory, conversion=conversion,
keep_files=keep_files)
# if chapter range contains "__EnD__" write new value to config.json
# @Chr1st-oo - modified condition due to some changes on automatic download and config.
if chapter_range != "All" and (chapter_range.split("-")[1] == "__EnD__" or len(chapter_range.split("-")) == 3):
globalFunctions.GlobalFunctions().addOne(comic_url)
except Exception as single_chapter_exception:
logging.debug("Single Chapter Exception : %s" % single_chapter_exception)
print("Some excpetion occured with the details : \n%s" % single_chapter_exception)
pass
elif str(sorting).lower() in ['old', 'asc', 'ascending', 'oldest', 'a']:
for chap_link in all_links[::-1]:
try:
logging.debug("chap_link : %s" % chap_link)
self.single_chapter(comic_url=str(chap_link), comic_name=comic_name,
download_directory=download_directory, conversion=conversion,
keep_files=keep_files)
# if chapter range contains "__EnD__" write new value to config.json
# @Chr1st-oo - modified condition due to some changes on automatic download and config.
if chapter_range != "All" and (chapter_range.split("-")[1] == "__EnD__" or len(chapter_range.split("-")) == 3):
globalFunctions.GlobalFunctions().addOne(comic_url)
except Exception as single_chapter_exception:
logging.debug("Single Chapter Exception : %s" % single_chapter_exception)
print("Some excpetion occured with the details : \n%s" % single_chapter_exception)
pass
return 0
def __decode_data(data, nonce):
t = list(data)
n = re.findall(r'(\d+)([a-zA-Z]+)', nonce)
n_len = len(n)
index = n_len - 1
while index >= 0:
locate = int(n[index][0]) & 255
del t[locate:locate + len(n[index][1])]
index = index - 1
base64_str = ''.join(t)
json_str = base64.b64decode(base64_str).decode('utf-8')
return json.loads(json_str)
def __decode_base64_data(self, base64data):
base64DecodeChars = [- 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1,
-1,
63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5,
6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1,
-1,
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49,
50, 51, -1, -1, -1, -1, -1]
data_length = len(base64data)
i = 0
out = ""
c1 = c2 = c3 = c4 = 0
while i < data_length:
while True:
c1 = base64DecodeChars[ord(base64data[i]) & 255]
i += 1
if not (i < data_length and c1 == -1):
break
if c1 == -1:
break
while True:
c2 = base64DecodeChars[ord(base64data[i]) & 255]
i += 1
if not (i < data_length and c2 == -1):
break
if c2 == -1:
break
out += chr(c1 << 2 | (c2 & 48) >> 4)
while True:
c3 = ord(base64data[i]) & 255
i += 1
if c3 == 61:
return out
c3 = base64DecodeChars[c3]
if not (i < data_length and c3 == - 1):
break
if c3 == -1:
break
out += chr((c2 & 15) << 4 | (c3 & 60) >> 2)
while True:
c4 = ord(base64data[i]) & 255
i += 1
if c4 == 61:
return out
c4 = base64DecodeChars[c4]
if not (i < data_length and c4 == - 1):
break
out += chr((c3 & 3) << 6 | c4)
return out
|
|
#!/bin/env python
import numpy as np
from openff.toolkit.typing.engines.smirnoff.forcefield import ForceField
# Function definitions for parsing sections within parameter file
def _parse_nonbon_line( line ):
"""Parse an AMBER frcmod nonbon line and return relevant parameters in a dictionary. AMBER uses rmin_half and epsilon in angstroms and kilocalories per mole."""
tmp = line.split()
params = {}
params['smirks'] = tmp[0]
params['rmin_half'] = tmp[1]
params['epsilon'] = tmp[2]
return params
def _parse_bond_line( line ):
"""Parse an AMBER frcmod BOND line and return relevant parameters in a dictionary. AMBER uses length and force constant, with the factor of two dropped. Here we multiply by the factor of two before returning. Units are angstroms and kilocalories per mole per square angstrom."""
tmp = line.split()
params = {}
params['smirks'] = tmp[0]
params['k'] = str(2*float(tmp[1]))
params['length'] = tmp[2]
return params
def _parse_angl_line( line ):
"""Parse an AMBER frcmod ANGL line and return relevant parameters in a dictionary. AMBER uses angle and force constant, with the factor of two dropped. Here we multiply by the factor of two before returning. Units are degrees and kilocalories per mole."""
tmp = line.split()
params = {}
params['smirks'] = tmp[0]
params['k'] = str(2*float(tmp[1]))
params['angle'] = tmp[2]
return params
def _parse_dihe_line( line ):
"""Parse an AMBER frcmod DIHE line and return relevant parameters in a dictionary. Units for k are kilocalories per mole."""
tmp = line.split()
params = {}
params['smirks'] = tmp[0]
params['idivf1'] = tmp[1]
params['k1'] = tmp[2]
params['phase1'] = tmp[3]
params['periodicity1'] = str(int(np.abs(float(tmp[4]))))
return params
def _parse_impr_line( line ):
"""Parse an AMBER frcmod DIHE line and return relevant parameters in a dictionary. Units for k are kilocalories per mole."""
tmp = line.split()
params = {}
params['smirks'] = tmp[0]
params['k1'] = tmp[1]
params['phase1'] = tmp[2]
params['periodicity1'] = str(int(np.abs(float(tmp[3]))))
return params
def parse_frcmod(frcmod_file_name):
"""
Parse a smirnoffish FRCMOD file, returning a dict of lists containing the individual parameters that would go
into a "SMIRNOFF data" dictionary. When combined with section headers, this dictionary is suitable to be fed
into the ForceField initializer
Parameters
----------
frcmod_file_name : str
path to a smirnoffish FRCMOD file
Returns
-------
parameter_lists : dict of list
Hierarchical dict of lists, containing parameter entries compliant with the SMIRNOFF spec version 0.2
author : str or None
Contents of the Author field, if any
date: str or None
Contents of the Date field, if any
"""
# Obtain sections from target file
file = open(frcmod_file_name, 'r')
text = file.readlines()
file.close()
sections = {}
# Section names from frcmod which we will parse
sec_names = ['NONBON', 'BOND', 'ANGL', 'IMPR', 'DIHE']
sec_name_2_param_prefix = {'NONBON':'n' , 'BOND':'b', 'ANGL':'a', 'DIHE':'t', 'IMPR':'i'}
sec_name_2_param_index = dict((sn, 1) for sn in sec_names)
# Tags that will be used in the FFXML for these (same order)
force_tags = ['Atom', 'Bond', 'Angle', 'Improper', 'Proper']
# Force names in the FFXML (same order)
force_sections = ['vdW', 'Bonds', 'Angles', 'ImproperTorsions', 'ProperTorsions']
sec_name_2_force_tag = dict((sn, t) for sn, t in zip(sec_names, force_tags))
sec_name_2_force_section = dict((sn, fs) for sn, fs in zip(sec_names, force_sections))
date = None
author = None
for line in text:
#while ct < len(text):
#line = text[ct]
linesp = line.split()
# Skip lines starting with comment or which are blank
if line[0]=='#' or len(linesp) < 1:
continue
# Check first entry to see if it's a section name, if so initialize storage
if linesp[0] in sec_names:
this_sec = linesp[0]
sections[this_sec] = []
elif linesp[0] in ['DATE', 'AUTHOR']:
this_sec = linesp[0]
# Otherwise store
else:
if this_sec == 'DATE':
date = line.strip()
elif this_sec == 'AUTHOR':
author = line.strip()
else:
sections[this_sec].append(line)
parameter_lists = {}
# Use functions to parse sections from target file and add parameters to force field
for (sec_idx, sec_name) in enumerate(sec_names):
param_list = []
# sections[sec_name] = []
for line in sections[sec_name]:
# Parse line for parameters
if sec_name=='NONBON':
param = _parse_nonbon_line(line)
elif sec_name=='BOND':
param = _parse_bond_line(line)
elif sec_name=='DIHE':
param = _parse_dihe_line(line)
elif sec_name=='IMPR':
param = _parse_impr_line(line)
elif sec_name=='ANGL':
param = _parse_angl_line(line)
# Add parameter ID
param_prefix = sec_name_2_param_prefix[sec_name]
param_index = sec_name_2_param_index[sec_name]
param['id'] = param_prefix + str( param_index )
# If we're dealing with a simple parameter, just append it to the list
if not (sec_name in ['IMPR', 'DIHE']):
param_list.append(param)
# Increment parameter index
sec_name_2_param_index[sec_name] = sec_name_2_param_index[sec_name] + 1
# If we're dealing with a potentially multi-term parameter, check if this SMIRKS is already in the list
else:
param_matched = False
for existing_param in param_list:
if existing_param['smirks'] != param['smirks']:
continue
param_matched = True
# Find the lowest unoccupied torsion term index
term_index = 1
while f'k{term_index}' in existing_param:
term_index += 1
existing_param[f'k{term_index}'] = param['k1']
existing_param[f'phase{term_index}'] = param['phase1']
existing_param[f'periodicity{term_index}'] = param['periodicity1']
existing_param[f'idivf{term_index}'] = param['idivf1']
break
# If the SMIRKS isn't already known, initialize this as a new parameter, since it could be the
# first term of a multiterm torsion
if not(param_matched):
param_list.append(param)
# Increment parameter index ONLY for the first term of a torsion that's found.
sec_name_2_param_index[sec_name] = sec_name_2_param_index[sec_name] + 1
force_section = sec_name_2_force_section[sec_name]
force_tag = sec_name_2_force_tag[sec_name]
parameter_lists[force_section] = {force_tag: param_list.copy()}
return parameter_lists, author, date
# Main conversion functionality
def convert_frcmod_to_ffxml( infile, inxml, outxml ):
"""Convert a modified AMBER frcmod (with SMIRKS replacing atom types) to SMIRNOFF ffxml format by inserting parameters into a template ffxml file.
Parameters
----------
infile : str
File name of input SMIRKS-ified frcmod file containing parameters
inxml : str
File name of template SMIRNOFF FFXML file into which to insert these parameters.
outxml : str
File name of resulting output SMIRNOFF FFXML
Notes:
-------
Input XML file will normally be the template of a SMIRNOFF XML file without any parameters present (but with requisite force types already specified).
"""
from openff.toolkit.typing.engines.smirnoff import XMLParameterIOHandler
io_handler = XMLParameterIOHandler()
smirnoff_data = io_handler.parse_file(inxml)
parameter_lists, author, date = parse_frcmod(infile)
for section_tag in parameter_lists:
# Nest the actual parameter lists one level deeper, by their parameter_list_tag (eg Bonds > Bond > List)
parameter_list_tag = list(parameter_lists[section_tag].keys())[0]
smirnoff_data['SMIRNOFF'][section_tag][parameter_list_tag] = parameter_lists[section_tag][parameter_list_tag]
ff=ForceField()
ff._load_smirnoff_data(smirnoff_data)
print(ff.to_string())
# TODO: Add author and date
ff.to_file(outxml, io_format='XML')
# # Write SMIRNOFF XML file
# ff.writeFile(outxml)
#
# # Roundtrip to fix formatting (for some reason etree won't format it properly on first write after modification)
# tmp = ForceField(outxml)
# tmp.writeFile(outxml)
if __name__=="__main__":
from optparse import OptionParser
usage_string="""\
Convert specified SMIRKS-ified AMBER frcmod file into SMIRNOFF FFXML format, inserting converted parameters into a template FFXML file and writing to a new output file.
usage: convert_frcmod_0.2.py --frcmod test.frcmod --template template.offxml --xml test.offxml
"""
parser = OptionParser(usage=usage_string)
parser.add_option('-f', '--frcmod', type = "string", dest='infile', default = None, action="store", help="Name of input smirks-ified frcmod file.")
parser.add_option('-t', '--template', type="string", dest='inxml', default = None, action ="store", help="Name of template SMIRNOFF offxml file.")
parser.add_option('-o', '--xml', type="string", dest='outxml', default =None, action="store", help="Name of output SMIRNOFF offxml file.")
(options,args) = parser.parse_args()
if (options.infile is None) or (options.inxml is None) or (options.outxml is None):
parser.print_help()
parser.error("Input frcmod and template files and output FFXML file must be specified.")
convert_frcmod_to_ffxml( options.infile, options.inxml, options.outxml )
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example / benchmark for building a PTB LSTM model.
Trains the model described in:
(Zaremba, et. al.) Recurrent Neural Network Regularization
http://arxiv.org/abs/1409.2329
There are 3 supported model configurations:
===========================================
| config | epochs | train | valid | test
===========================================
| small | 13 | 37.99 | 121.39 | 115.91
| medium | 39 | 48.45 | 86.16 | 82.07
| large | 55 | 37.87 | 82.62 | 78.29
The exact results may vary depending on the random initialization.
The hyperparameters used in the model:
- init_scale - the initial scale of the weights
- learning_rate - the initial value of the learning rate
- max_grad_norm - the maximum permissible norm of the gradient
- num_layers - the number of LSTM layers
- num_steps - the number of unrolled steps of LSTM
- hidden_size - the number of LSTM units
- max_epoch - the number of epochs trained with the initial learning rate
- max_max_epoch - the total number of epochs for training
- keep_prob - the probability of keeping weights in the dropout layer
- lr_decay - the decay of the learning rate for each epoch after "max_epoch"
- batch_size - the batch size
The data required for this example is in the data/ dir of the
PTB dataset from Tomas Mikolov's webpage:
$ wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
$ tar xvf simple-examples.tgz
To run:
$ python ptb_word_lm.py --data_path=simple-examples/data/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.models.rnn import rnn_cell
from tensorflow.models.rnn import seq2seq
from tensorflow.models.rnn.ptb import reader
flags = tf.flags
logging = tf.logging
flags.DEFINE_string(
"model", "small",
"A type of model. Possible options are: small, medium, large.")
flags.DEFINE_string("data_path", None, "data_path")
FLAGS = flags.FLAGS
class PTBModel(object):
"""The PTB model."""
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
size = config.hidden_size
vocab_size = config.vocab_size
self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
lstm_cell = rnn_cell.BasicLSTMCell(size, forget_bias=0.0)
if is_training and config.keep_prob < 1:
lstm_cell = rnn_cell.DropoutWrapper(
lstm_cell, output_keep_prob=config.keep_prob)
cell = rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers)
self._initial_state = cell.zero_state(batch_size, tf.float32)
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, size])
inputs = tf.nn.embedding_lookup(embedding, self._input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of tensorflow.models.rnn.rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# from tensorflow.models.rnn import rnn
# inputs = [tf.squeeze(input_, [1])
# for input_ in tf.split(1, num_steps, inputs)]
# outputs, states = rnn.rnn(cell, inputs, initial_state=self._initial_state)
outputs = []
states = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > 0: tf.get_variable_scope().reuse_variables()
(cell_output, state) = cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
states.append(state)
output = tf.reshape(tf.concat(1, outputs), [-1, size])
logits = tf.nn.xw_plus_b(output,
tf.get_variable("softmax_w", [size, vocab_size]),
tf.get_variable("softmax_b", [vocab_size]))
loss = seq2seq.sequence_loss_by_example([logits],
[tf.reshape(self._targets, [-1])],
[tf.ones([batch_size * num_steps])],
vocab_size)
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = states[-1]
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
def assign_lr(self, session, lr_value):
session.run(tf.assign(self.lr, lr_value))
@property
def input_data(self):
return self._input_data
@property
def targets(self):
return self._targets
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
class SmallConfig(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 13
keep_prob = 1.0
lr_decay = 0.5
batch_size = 20
vocab_size = 10000
class MediumConfig(object):
"""Medium config."""
init_scale = 0.05
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 35
hidden_size = 650
max_epoch = 6
max_max_epoch = 39
keep_prob = 0.5
lr_decay = 0.8
batch_size = 20
vocab_size = 10000
class LargeConfig(object):
"""Large config."""
init_scale = 0.04
learning_rate = 1.0
max_grad_norm = 10
num_layers = 2
num_steps = 35
hidden_size = 1500
max_epoch = 14
max_max_epoch = 55
keep_prob = 0.35
lr_decay = 1 / 1.15
batch_size = 20
vocab_size = 10000
def run_epoch(session, m, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = m.initial_state.eval()
for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
m.num_steps)):
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def get_config():
if FLAGS.model == "small":
return SmallConfig()
elif FLAGS.model == "medium":
return MediumConfig()
elif FLAGS.model == "large":
return LargeConfig()
else:
raise ValueError("Invalid model: %s", FLAGS.model)
def main(unused_args):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config)
mtest = PTBModel(is_training=False, config=eval_config)
tf.initialize_all_variables().run()
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, train_data, m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, mtest, test_data, tf.no_op())
print("Test Perplexity: %.3f" % test_perplexity)
if __name__ == "__main__":
tf.app.run()
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Helpers for evaluating an agent on Jumpy World."""
import io
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow.compat.v2 as tf
sns.set_style('white')
def create_evaluation_grid(nn_model,
imitation_data,
mc_samples=1,
color_name='WHITE'):
"""Evaluates an agent on all environments in imitation_data."""
obstacle_positions = sorted(imitation_data.keys())
floor_heights = sorted(imitation_data[obstacle_positions[0]].keys())
evaluation_grid = np.zeros((len(obstacle_positions), len(floor_heights)))
for i, pos in enumerate(obstacle_positions):
for j, height in enumerate(floor_heights):
input_observations, optimal_actions, _ = imitation_data[pos][height]
predictions = tf.nn.softmax(
nn_model(input_observations, training=False), axis=-1)
# MC Averaging if using RandConv
for _ in range(mc_samples - 1):
predictions += tf.nn.softmax(
nn_model(input_observations, training=False), axis=-1)
predictions /= mc_samples
greedy_actions = np.array(
[1 if pi[1] > pi[0] else 0 for pi in predictions])
action_diff = greedy_actions - np.array(optimal_actions)
if color_name == 'GREEN':
# The collision happens when the agent touches the block
argmax_val = pos - 5
elif color_name in ['WHITE', 'RED']:
argmax_val = np.argmax(optimal_actions)
else:
raise ValueError(f'{color_name} is not a valid obstacle color.')
binary_mask = np.arange(len(optimal_actions)) <= argmax_val
is_optimal = sum(binary_mask * np.abs(action_diff)) == 0
evaluation_grid[i][j] = is_optimal
return evaluation_grid
def neigbhour_indices(x, y, max_x, max_y):
valid_indices = []
for index in [(x - 1, y), (x+1, y), (x, y-1), (x, y+1)]:
is_x_valid = (0 <= index[0]) and (index[0] < max_x)
is_y_valid = (0 <= index[1]) and (index[1] < max_y)
if is_x_valid and is_y_valid:
valid_indices.append(index)
return valid_indices
def generate_validation_positions(training_positions, min_obs_position,
min_floor_height, num_positions, num_heights):
"""Generate validation positions."""
val_pos = []
for (obstacle_pos, floor_height) in training_positions:
pos_index = obstacle_pos - min_obs_position
height_index = floor_height - min_floor_height
validation_indices = neigbhour_indices(
pos_index, height_index, num_positions, num_heights)
for val_pos_index, val_height_index in validation_indices:
val_pos.append((val_pos_index + min_obs_position,
val_height_index + min_floor_height))
return list(set(val_pos))
def num_solved_tasks(evaluation_grid, training_positions, validation_positions,
min_obs_position, min_floor_height):
"""Calculates number of tasks solved in training, validation and test sets."""
solved_envs = {'train': 0, 'test': 0}
if validation_positions:
solved_envs['validation'] = 0
num_positions, num_heights = evaluation_grid.shape
is_train_or_validation = np.zeros_like(evaluation_grid, dtype=np.int32)
for (obstacle_pos, floor_height) in training_positions:
pos_index = obstacle_pos - min_obs_position
height_index = floor_height - min_floor_height
is_train_or_validation[pos_index][height_index] = 1
for (obstacle_pos, floor_height) in validation_positions:
pos_index = obstacle_pos - min_obs_position
height_index = floor_height - min_floor_height
is_train_or_validation[pos_index][height_index] = 2
for pos_index in range(num_positions):
for height_index in range(num_heights):
if is_train_or_validation[pos_index][height_index] == 1:
solved_envs['train'] += evaluation_grid[pos_index][height_index]
elif is_train_or_validation[pos_index][height_index] == 2:
solved_envs['validation'] += evaluation_grid[pos_index][height_index]
else:
solved_envs['test'] += evaluation_grid[pos_index][height_index]
return solved_envs
def plot_evaluation_grid(grid, training_positions, min_obs_position,
min_floor_height):
"""Plots the evaluation grid."""
fig, ax = plt.subplots(figsize=(7, 9))
grid_x, grid_y = grid.shape
extent = (0, grid_x, grid_y, 0)
ax.imshow(grid.T, extent=extent, origin='lower')
x_ticks = np.arange(grid_x)
y_ticks = np.arange(grid_y)
ax.set_xticks(x_ticks)
ax.set_yticks(y_ticks)
ax.tick_params(labelbottom=False, labelleft=False)
# Loop over data dimensions and create text annotations.
for (obstacle_pos, floor_height) in training_positions:
pos_index = obstacle_pos - min_obs_position
height_index = floor_height - min_floor_height
ax.text(
pos_index + 0.5,
height_index + 0.5,
'T',
ha='center',
va='center',
color='r')
ax.grid(color='w', linewidth=1)
fig.tight_layout()
return fig
def plot_to_image(figure):
"""Converts the plot specified by 'figure' to a PNG image and returns it."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
figure.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def np_array_figure(arr):
fig, ax = plt.subplots(figsize=(6, 6))
im = ax.imshow(arr.T, origin='lower', cmap='hot', interpolation='nearest')
fig.colorbar(im, ax=ax)
return plot_to_image(fig)
def sinkhorn_logspace(logits_rows, logits_cols, costs, n_steps,
entropy_strength):
"""Sinkhorn algorithm for (unbalanced) entropy-regularized optimal transport.
The updates are computed in log-space and are thus more stable.
Args:
logits_rows: (..., n) tensor with the logits of the row-sum constraint
logits_cols: (..., m) tensor with the logits of the column-sum constraint
costs: (..., n, m) tensor holding the transportation costs
n_steps: How many Sinkhorn iterations to perform.
entropy_strength: The strength of the entropic regularizer
Returns:
(..., n, m) tensor with the computation optimal transportation matrices
"""
assert n_steps > 0
assert entropy_strength > 0
logits_rows = tf.expand_dims(logits_rows, axis=-1)
logits_cols = tf.expand_dims(logits_cols, axis=-2)
log_kernel = -costs / entropy_strength + logits_rows + logits_cols
log_lbd_cols = tf.zeros_like(logits_cols)
for _ in range(n_steps):
log_lbd_rows = logits_rows - tf.reduce_logsumexp(
log_kernel + log_lbd_cols, axis=-1, keepdims=True)
log_lbd_cols = logits_cols - tf.reduce_logsumexp(
log_kernel + log_lbd_rows, axis=-2, keepdims=True)
return tf.exp(log_lbd_cols + log_kernel + log_lbd_rows)
@tf.function
def induced_coupling(similarity_matrix, n_steps=3, entropy_strength=0.0001):
"""Calculates the coupling induced by the similarity matrix."""
dist_v = tf.ones(similarity_matrix.shape[0])
dist_v /= tf.reduce_sum(dist_v)
dist_v = tf.math.log(dist_v)
coupling = tf.stop_gradient(sinkhorn_logspace(
dist_v,
dist_v,
1 - similarity_matrix,
n_steps=n_steps,
entropy_strength=entropy_strength))
return coupling
|
|
'''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
This script installs RackHD from GitHub source onto blank Ubuntu 14 or 16 OS via Ansible installer.
This script performs the following functions:
- loads prerequisite packages git, ansible, etc.
- downloads RackHD source to management server from repos specified in global_config.json
- installs using rackhd_local.yml playbook
- set up networking
- load configuration files
- startup and verify operations
NOTES:
If the host is rebooted, the RackHD must be restarted by typing 'sudo nf start' at console.
usage:
python run_tests.py -ova <ip or host> -test deploy/rackhd_source_install.py
or
python run_tests.py -stack <stack ID> -test deploy/rackhd_source_install.py
'''
import os
import sys
import subprocess
# set path to common libraries
sys.path.append(subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip("\n") + "/test/fit_tests/common")
import fit_common
# set proxy if required
PROXYVARS = ''
if 'proxy' in fit_common.GLOBAL_CONFIG['repos'] and fit_common.GLOBAL_CONFIG['repos']['proxy'] != '':
PROXYVARS = "export http_proxy=" + fit_common.GLOBAL_CONFIG['repos']['proxy'] + ";" + \
"export https_proxy=" + fit_common.GLOBAL_CONFIG['repos']['proxy'] + ";"
# maven proxy settings
maven_proxy = open('settings.xml', 'w')
maven_proxy.write(
'<settings '
'xmlns="http://maven.apache.org/SETTINGS/1.0.0" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 '
'http://maven.apache.org/xsd/settings-1.0.0.xsd">'
'<proxies>'
'<proxy>'
'<id>mavenproxy</id>'
'<active>true</active>'
'<protocol>https</protocol>'
'<host>' + fit_common.GLOBAL_CONFIG['repos']['proxyhost'] + '</host>'
'<port>' + fit_common.GLOBAL_CONFIG['repos']['proxyport'] + '</port>'
'<nonProxyHosts>localhost</nonProxyHosts>'
'</proxy>'
'</proxies>'
'</settings>')
maven_proxy.close()
fit_common.scp_file_to_ora('settings.xml')
fit_common.remote_shell('mkdir -p ~/.m2;cp settings.xml ~/.m2;mkdir -p /root/.m2;cp settings.xml /root/.m2')
os.remove('settings.xml')
class rackhd_source_install(fit_common.unittest.TestCase):
def test01_install_rackhd_dependencies(self):
print "**** Installing RackHD dependencies."
# update sudoers to preserve proxy environment
sudoersproxy = open("sudoersproxy", 'w')
sudoersproxy.write('Defaults env_keep="HOME no_proxy http_proxy https_proxy"\n')
sudoersproxy.close()
fit_common.remote_shell('pwd')
fit_common.scp_file_to_ora("sudoersproxy")
self.assertEqual(fit_common.remote_shell('cp sudoersproxy /etc/sudoers.d/'
)['exitcode'], 0, "sudoersproxy config failure.")
os.remove('sudoersproxy')
# install git
self.assertEqual(fit_common.remote_shell(PROXYVARS + "apt-get -y install git")['exitcode'], 0, "Git install failure.")
self.assertEqual(fit_common.remote_shell("git config --global http.sslverify false")['exitcode'], 0, "Git config failure.")
if 'proxy' in fit_common.GLOBAL_CONFIG['repos'] and fit_common.GLOBAL_CONFIG['repos']['proxy'] != '':
self.assertEqual(fit_common.remote_shell("git config --global http.proxy " + fit_common.GLOBAL_CONFIG['repos']['proxy']
)['exitcode'], 0, "Git proxy config failure.")
# install Ansible
self.assertEqual(fit_common.remote_shell(PROXYVARS + "apt-get -y update")['exitcode'], 0, "Update failure.")
self.assertEqual(fit_common.remote_shell(PROXYVARS + "cd ~;apt-get -y install ansible")['exitcode'], 0, "Ansible Install failure.")
# create startup files
self.assertEqual(fit_common.remote_shell(
"touch /etc/default/on-dhcp-proxy /etc/default/on-http /etc/default/on-tftp /etc/default/on-syslog /etc/default/on-taskgraph"
)['exitcode'], 0, "Startup files failure.")
def test02_clone_rackhd_source(self):
print "**** Cloning RackHD source."
modules = [
"on-core",
"on-dhcp-proxy",
"on-http",
"on-statsd",
"on-syslog",
"on-taskgraph",
"on-tasks",
"on-tftp",
"on-tools",
"on-wss"
]
# clone base repo
fit_common.remote_shell('rm -rf ~/rackhd')
self.assertEqual(fit_common.remote_shell(PROXYVARS + "git clone "
+ fit_common.GLOBAL_CONFIG['repos']['install']['rackhd']['repo']
+ " ~/rackhd"
)['exitcode'], 0, "RackHD git clone failure.")
self.assertEqual(fit_common.remote_shell("cd ~/rackhd/" + ";git checkout "
+ fit_common.GLOBAL_CONFIG['repos']['install']['rackhd']['branch']
)['exitcode'], 0, "Branch not found on RackHD repo.")
# clone modules
for repo in modules:
self.assertEqual(fit_common.remote_shell(PROXYVARS
+ "rm -rf ~/rackhd/" + repo + ";"
+ "git clone "
+ fit_common.GLOBAL_CONFIG['repos']['install'][repo]['repo']
+ " ~/rackhd/" + repo
)['exitcode'], 0, "RackHD git clone module failure:" + repo)
self.assertEqual(fit_common.remote_shell("cd ~/rackhd/" + repo + ";git checkout "
+ fit_common.GLOBAL_CONFIG['repos']['install'][repo]['branch']
)['exitcode'], 0, "Branch not found on module:" + repo)
def test03_run_ansible_installer(self):
print "**** Run RackHD Ansible installer."
self.assertEqual(fit_common.remote_shell(PROXYVARS +
"cd ~/rackhd/packer/ansible/;"
"ansible-playbook -i 'local,' -c local rackhd_local.yml",
timeout=2000,
)['exitcode'], 0, "RackHD Install failure.")
def test04_install_network_config(self):
print "**** Installing RackHD network config."
# collect nic names
getifs = fit_common.remote_shell("ifconfig -s -a |tail -n +2 |grep -v -e Iface -e lo")
# clean out login stuff
splitifs = getifs['stdout'].split('\n')
ifslit = [] # array of valid eth ports
for item in splitifs:
if "assword" not in item and item.split(" ")[0]:
ifslit.append(item.split(" ")[0])
# install control network config
control_cfg = open('control.cfg', 'w')
control_cfg.write(
'auto ' + ifslit[1] + '\n'
'iface ' + ifslit[1] + ' inet static\n'
'address 172.31.128.1\n'
'netmask 255.255.252.0\n'
)
control_cfg.close()
# copy file to ORA
fit_common.scp_file_to_ora('control.cfg')
self.assertEqual(fit_common.remote_shell('cp control.cfg /etc/network/interfaces.d/')['exitcode'], 0, "Control network config failure.")
os.remove('control.cfg')
# startup NIC
fit_common.remote_shell('ip addr add 172.31.128.1/22 dev ' + ifslit[1])
fit_common.remote_shell('ip link set ' + ifslit[1] + ' up')
self.assertEqual(fit_common.remote_shell('ping -c 1 -w 5 172.31.128.1')['exitcode'], 0, 'Control NIC failure.')
# If PDU network adapter is present, configure
try:
ifslit[2]
except IndexError:
print "**** No PDU network will be configured"
else:
pdudirect_cfg = open('pdudirect.cfg', 'w')
pdudirect_cfg.write(
'auto ' + ifslit[2] + '\n'
'iface ' + ifslit[2] + ' inet static\n'
'address 192.168.1.1\n'
'netmask 255.255.255.0\n'
)
pdudirect_cfg.close()
# copy file to ORA
fit_common.scp_file_to_ora('pdudirect.cfg')
self.assertEqual(fit_common.remote_shell('cp pdudirect.cfg /etc/network/interfaces.d/')['exitcode'], 0, "DHCP Config failure.")
os.remove('pdudirect.cfg')
# startup NIC
fit_common.remote_shell('ip addr add 192.168.1.1/24 dev ' + ifslit[2])
fit_common.remote_shell('ip link set ' + ifslit[2] + ' up')
self.assertEqual(fit_common.remote_shell('ping -c 1 -w 5 192.168.1.1')['exitcode'], 0, 'PDU NIC failure.')
#create DHCP config
fit_common.remote_shell('echo INTERFACES=' + ifslit[1] + ' > /etc/default/isc-dhcp-server')
dhcp_conf = open('dhcpd.conf', 'w')
dhcp_conf.write(
'ddns-update-style none;\n'
'option domain-name "example.org";\n'
'option domain-name-servers ns1.example.org, ns2.example.org;\n'
'default-lease-time 600;\n'
'max-lease-time 7200;\n'
'log-facility local7;\n'
'deny duplicates;\n'
'ignore-client-uids true;\n'
'subnet 172.31.128.0 netmask 255.255.252.0 {\n'
' range 172.31.128.2 172.31.131.254;\n'
' option vendor-class-identifier "PXEClient";\n'
'}\n'
)
dhcp_conf.close()
# copy file to ORA
fit_common.scp_file_to_ora('dhcpd.conf')
self.assertEqual(fit_common.remote_shell('cp dhcpd.conf /etc/dhcp/')['exitcode'], 0, "DHCP Config failure.")
os.remove('dhcpd.conf')
def test05_install_rackhd_config_files(self):
print "**** Installing RackHD config files."
# create RackHD config
hdconfig = {
"CIDRNet": "172.31.128.0/22",
"amqp": "amqp://localhost",
"apiServerAddress": "172.31.128.1",
"apiServerPort": 9080,
"arpCacheEnabled": True,
"broadcastaddr": "172.31.131.255",
"dhcpGateway": "172.31.128.1",
"dhcpProxyBindAddress": "172.31.128.1",
"dhcpProxyBindPort": 4011,
"dhcpSubnetMask": "255.255.252.0",
"gatewayaddr": "172.31.128.1",
"httpEndpoints": [
{
"address": "0.0.0.0",
"port": fit_common.GLOBAL_CONFIG['ports']['http'],
"httpsEnabled": False,
"proxiesEnabled": True,
"authEnabled": False,
"routers": "northbound-api-router"
},
{
"address": "0.0.0.0",
"port": fit_common.GLOBAL_CONFIG['ports']['https'],
"httpsEnabled": True,
"proxiesEnabled": True,
"authEnabled": True,
"routers": "northbound-api-router"
},
{
"address": "172.31.128.1",
"port": 9080,
"httpsEnabled": False,
"proxiesEnabled": True,
"authEnabled": False,
"routers": "southbound-api-router"
}
],
"httpDocsRoot": "./build/apidoc",
"httpFileServiceRoot": "./static/files",
"httpFileServiceType": "FileSystem",
"httpProxies": [{
"localPath": "/mirror",
"remotePath": "/",
"server": fit_common.GLOBAL_CONFIG['repos']['mirror']
}],
"httpStaticRoot": "/opt/monorail/static/http",
"minLogLevel": 3,
"authUsername": "admin",
"authPasswordHash": "KcBN9YobNV0wdux8h0fKNqi4uoKCgGl/j8c6YGlG7iA0PB3P9ojbmANGhDlcSBE0iOTIsYsGbtSsbqP4wvsVcw==",
"authPasswordSalt": "zlxkgxjvcFwm0M8sWaGojh25qNYO8tuNWUMN4xKPH93PidwkCAvaX2JItLA3p7BSCWIzkw4GwWuezoMvKf3UXg==",
"authTokenSecret": "RackHDRocks!",
"authTokenExpireIn": 86400,
"mongo": "mongodb://localhost/pxe",
"sharedKey": "qxfO2D3tIJsZACu7UA6Fbw0avowo8r79ALzn+WeuC8M=",
"statsd": "127.0.0.1:8125",
"subnetmask": "255.255.252.0",
"syslogBindAddress": "172.31.128.1",
"syslogBindPort": 514,
"tftpBindAddress": "172.31.128.1",
"tftpBindPort": 69,
"tftpRoot": "./static/tftp",
}
config_json = open('config.json', 'w')
config_json.write(fit_common.json.dumps(hdconfig, sort_keys=True, indent=4))
config_json.close()
# AMQP config files
rabbitmq_config = open('rabbitmq.config', 'w')
rabbitmq_config.write('[{rabbit,[{tcp_listeners, [5672]},{loopback_users, []}]},{rabbitmq_management,[{listener, [{port, 15672},{ip,"127.0.0.1"}]}]}].')
rabbitmq_config.close()
# copy files to ORA
fit_common.scp_file_to_ora('config.json')
fit_common.scp_file_to_ora('rabbitmq.config')
self.assertEqual(fit_common.remote_shell('cp config.json /opt/monorail/')['exitcode'], 0, "RackHD Config file failure.")
self.assertEqual(fit_common.remote_shell('cp rabbitmq.config /etc/rabbitmq/')['exitcode'], 0, "AMQP Config file failure.")
os.remove('config.json')
os.remove('rabbitmq.config')
fit_common.remote_shell('mkdir -p ~/src/on-http/static/swagger-ui')
def test06_startup(self):
print "Start services."
startup = open('startup.sh', 'w')
startup.write('cd ~/;nf start&\n')
startup.close()
fit_common.scp_file_to_ora('startup.sh')
self.assertEqual(fit_common.remote_shell("chmod 777 startup.sh;/etc/init.d/isc-dhcp-server restart")['exitcode'], 0, "dhcp startup failure.")
self.assertEqual(fit_common.remote_shell("nohup ./startup.sh")['exitcode'], 0, "RackHD startup failure.")
print "**** Check installation."
for dummy in range(0, 10):
try:
fit_common.rackhdapi("/api/2.0/config")
except:
fit_common.time.sleep(10)
else:
break
self.assertEqual(fit_common.rackhdapi("/api/2.0/config")['status'], 200, "Unable to contact RackHD.")
if __name__ == '__main__':
fit_common.unittest.main()
|
|
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from keystoneclient import exceptions as k_exceptions
from keystoneclient.v2_0 import client as k_client
from oslo.config import cfg
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron import context as neutron_context
from neutron.db import agents_db
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils
from neutron.openstack.common import uuidutils
from neutron.plugins.cisco.common import cisco_constants as c_constants
from neutron.plugins.cisco.db.l3 import l3_models
from neutron.plugins.cisco.l3 import service_vm_lib
from neutron.plugins.common import constants as svc_constants
LOG = logging.getLogger(__name__)
DEVICE_HANDLING_OPTS = [
cfg.StrOpt('l3_admin_tenant', default='L3AdminTenant',
help=_('Name of the L3 admin tenant.')),
cfg.StrOpt('management_network', default='osn_mgmt_nw',
help=_('Name of management network for device configuration. '
'Default value is osn_mgmt_nw')),
cfg.StrOpt('default_security_group', default='mgmt_sec_grp',
help=_('Default security group applied on management port. '
'Default value is mgmt_sec_grp.')),
cfg.IntOpt('cfg_agent_down_time', default=60,
help=_('Seconds of no status update until a cfg agent '
'is considered down.')),
cfg.BoolOpt('ensure_nova_running', default=True,
help=_('Ensure that Nova is running before attempting to '
'create any VM.'))
]
CSR1KV_OPTS = [
cfg.StrOpt('csr1kv_image', default='csr1kv_openstack_img',
help=_('Name of Glance image for CSR1kv.')),
cfg.StrOpt('csr1kv_flavor', default=621,
help=_('UUID of Nova flavor for CSR1kv.')),
cfg.StrOpt('csr1kv_plugging_driver',
default=('neutron.plugins.cisco.l3.plugging_drivers.'
'n1kv_trunking_driver.N1kvTrunkingPlugDriver'),
help=_('Plugging driver for CSR1kv.')),
cfg.StrOpt('csr1kv_device_driver',
default=('neutron.plugins.cisco.l3.hosting_device_drivers.'
'csr1kv_hd_driver.CSR1kvHostingDeviceDriver'),
help=_('Hosting device driver for CSR1kv.')),
cfg.StrOpt('csr1kv_cfgagent_router_driver',
default=('neutron.plugins.cisco.cfg_agent.device_drivers.'
'csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver'),
help=_('Config agent driver for CSR1kv.')),
cfg.IntOpt('csr1kv_booting_time', default=420,
help=_('Booting time in seconds before a CSR1kv '
'becomes operational.')),
cfg.StrOpt('csr1kv_username', default='stack',
help=_('Username to use for CSR1kv configurations.')),
cfg.StrOpt('csr1kv_password', default='cisco',
help=_('Password to use for CSR1kv configurations.'))
]
cfg.CONF.register_opts(DEVICE_HANDLING_OPTS, "general")
cfg.CONF.register_opts(CSR1KV_OPTS, "hosting_devices")
class DeviceHandlingMixin(object):
"""A class implementing some functionality to handle devices."""
# The all-mighty tenant owning all hosting devices
_l3_tenant_uuid = None
# The management network for hosting devices
_mgmt_nw_uuid = None
_mgmt_sec_grp_id = None
# Loaded driver modules for CSR1kv
_hosting_device_driver = None
_plugging_driver = None
# Service VM manager object that interacts with Nova
_svc_vm_mgr = None
# Flag indicating is needed Nova services are reported as up.
_nova_running = False
@classmethod
def l3_tenant_id(cls):
"""Returns id of tenant owning hosting device resources."""
if cls._l3_tenant_uuid is None:
auth_url = cfg.CONF.keystone_authtoken.identity_uri + "/v2.0"
user = cfg.CONF.keystone_authtoken.admin_user
pw = cfg.CONF.keystone_authtoken.admin_password
tenant = cfg.CONF.keystone_authtoken.admin_tenant_name
keystone = k_client.Client(username=user, password=pw,
tenant_name=tenant,
auth_url=auth_url)
try:
tenant = keystone.tenants.find(
name=cfg.CONF.general.l3_admin_tenant)
cls._l3_tenant_uuid = tenant.id
except k_exceptions.NotFound:
LOG.error(_('No tenant with a name or ID of %s exists.'),
cfg.CONF.general.l3_admin_tenant)
except k_exceptions.NoUniqueMatch:
LOG.error(_('Multiple tenants matches found for %s'),
cfg.CONF.general.l3_admin_tenant)
return cls._l3_tenant_uuid
@classmethod
def mgmt_nw_id(cls):
"""Returns id of the management network."""
if cls._mgmt_nw_uuid is None:
tenant_id = cls.l3_tenant_id()
if not tenant_id:
return
net = manager.NeutronManager.get_plugin().get_networks(
neutron_context.get_admin_context(),
{'tenant_id': [tenant_id],
'name': [cfg.CONF.general.management_network]},
['id', 'subnets'])
if len(net) == 1:
num_subnets = len(net[0]['subnets'])
if num_subnets == 0:
LOG.error(_('The virtual management network has no '
'subnet. Please assign one.'))
return
elif num_subnets > 1:
LOG.info(_('The virtual management network has %d '
'subnets. The first one will be used.'),
num_subnets)
cls._mgmt_nw_uuid = net[0].get('id')
elif len(net) > 1:
# Management network must have a unique name.
LOG.error(_('The virtual management network does not have '
'unique name. Please ensure that it is.'))
else:
# Management network has not been created.
LOG.error(_('There is no virtual management network. Please '
'create one.'))
return cls._mgmt_nw_uuid
@classmethod
def mgmt_sec_grp_id(cls):
"""Returns id of security group used by the management network."""
if not utils.is_extension_supported(
manager.NeutronManager.get_plugin(), "security-group"):
return
if cls._mgmt_sec_grp_id is None:
# Get the id for the _mgmt_security_group_id
tenant_id = cls.l3_tenant_id()
res = manager.NeutronManager.get_plugin().get_security_groups(
neutron_context.get_admin_context(),
{'tenant_id': [tenant_id],
'name': [cfg.CONF.general.default_security_group]},
['id'])
if len(res) == 1:
cls._mgmt_sec_grp_id = res[0].get('id')
elif len(res) > 1:
# the mgmt sec group must be unique.
LOG.error(_('The security group for the virtual management '
'network does not have unique name. Please ensure '
'that it is.'))
else:
# CSR Mgmt security group is not present.
LOG.error(_('There is no security group for the virtual '
'management network. Please create one.'))
return cls._mgmt_sec_grp_id
@classmethod
def get_hosting_device_driver(self):
"""Returns device driver."""
if self._hosting_device_driver:
return self._hosting_device_driver
else:
try:
self._hosting_device_driver = importutils.import_object(
cfg.CONF.hosting_devices.csr1kv_device_driver)
except (ImportError, TypeError, n_exc.NeutronException):
LOG.exception(_('Error loading hosting device driver'))
return self._hosting_device_driver
@classmethod
def get_hosting_device_plugging_driver(self):
"""Returns plugging driver."""
if self._plugging_driver:
return self._plugging_driver
else:
try:
self._plugging_driver = importutils.import_object(
cfg.CONF.hosting_devices.csr1kv_plugging_driver)
except (ImportError, TypeError, n_exc.NeutronException):
LOG.exception(_('Error loading plugging driver'))
return self._plugging_driver
def get_hosting_devices_qry(self, context, hosting_device_ids,
load_agent=True):
"""Returns hosting devices with <hosting_device_ids>."""
query = context.session.query(l3_models.HostingDevice)
if load_agent:
query = query.options(joinedload('cfg_agent'))
if len(hosting_device_ids) > 1:
query = query.filter(l3_models.HostingDevice.id.in_(
hosting_device_ids))
else:
query = query.filter(l3_models.HostingDevice.id ==
hosting_device_ids[0])
return query
def handle_non_responding_hosting_devices(self, context, host,
hosting_device_ids):
with context.session.begin(subtransactions=True):
e_context = context.elevated()
hosting_devices = self.get_hosting_devices_qry(
e_context, hosting_device_ids).all()
# 'hosting_info' is dictionary with ids of removed hosting
# devices and the affected logical resources for each
# removed hosting device:
# {'hd_id1': {'routers': [id1, id2, ...],
# 'fw': [id1, ...],
# ...},
# 'hd_id2': {'routers': [id3, id4, ...]},
# 'fw': [id1, ...],
# ...},
# ...}
hosting_info = dict((id, {}) for id in hosting_device_ids)
try:
#TODO(bobmel): Modify so service plugins register themselves
self._handle_non_responding_hosting_devices(
context, hosting_devices, hosting_info)
except AttributeError:
pass
for hd in hosting_devices:
if not self._process_non_responsive_hosting_device(e_context,
hd):
# exclude this device since we did not remove it
del hosting_info[hd['id']]
self.l3_cfg_rpc_notifier.hosting_devices_removed(
context, hosting_info, False, host)
def get_device_info_for_agent(self, hosting_device):
"""Returns information about <hosting_device> needed by config agent.
Convenience function that service plugins can use to populate
their resources with information about the device hosting their
logical resource.
"""
credentials = {'username': cfg.CONF.hosting_devices.csr1kv_username,
'password': cfg.CONF.hosting_devices.csr1kv_password}
mgmt_ip = (hosting_device.management_port['fixed_ips'][0]['ip_address']
if hosting_device.management_port else None)
return {'id': hosting_device.id,
'credentials': credentials,
'management_ip_address': mgmt_ip,
'protocol_port': hosting_device.protocol_port,
'created_at': str(hosting_device.created_at),
'booting_time': cfg.CONF.hosting_devices.csr1kv_booting_time,
'cfg_agent_id': hosting_device.cfg_agent_id}
@classmethod
def is_agent_down(cls, heart_beat_time,
timeout=cfg.CONF.general.cfg_agent_down_time):
return timeutils.is_older_than(heart_beat_time, timeout)
def get_cfg_agents_for_hosting_devices(self, context, hosting_device_ids,
admin_state_up=None, active=None,
schedule=False):
if not hosting_device_ids:
return []
query = self.get_hosting_devices_qry(context, hosting_device_ids)
if admin_state_up is not None:
query = query.filter(
agents_db.Agent.admin_state_up == admin_state_up)
if schedule:
agents = []
for hosting_device in query:
if hosting_device.cfg_agent is None:
agent = self._select_cfgagent(context, hosting_device)
if agent is not None:
agents.append(agent)
else:
agents.append(hosting_device.cfg_agent)
else:
agents = [hosting_device.cfg_agent for hosting_device in query
if hosting_device.cfg_agent is not None]
if active is not None:
agents = [agent for agent in agents if not
self.is_agent_down(agent['heartbeat_timestamp'])]
return agents
def auto_schedule_hosting_devices(self, context, agent_host):
"""Schedules unassociated hosting devices to Cisco cfg agent.
Schedules hosting devices to agent running on <agent_host>.
"""
with context.session.begin(subtransactions=True):
# Check if there is a valid Cisco cfg agent on the host
query = context.session.query(agents_db.Agent)
query = query.filter_by(agent_type=c_constants.AGENT_TYPE_CFG,
host=agent_host, admin_state_up=True)
try:
cfg_agent = query.one()
except (exc.MultipleResultsFound, exc.NoResultFound):
LOG.debug('No enabled Cisco cfg agent on host %s',
agent_host)
return False
if self.is_agent_down(
cfg_agent.heartbeat_timestamp):
LOG.warn(_('Cisco cfg agent %s is not alive'), cfg_agent.id)
query = context.session.query(l3_models.HostingDevice)
query = query.filter_by(cfg_agent_id=None)
for hd in query:
hd.cfg_agent = cfg_agent
context.session.add(hd)
return True
def _setup_device_handling(self):
auth_url = cfg.CONF.keystone_authtoken.identity_uri + "/v2.0"
u_name = cfg.CONF.keystone_authtoken.admin_user
pw = cfg.CONF.keystone_authtoken.admin_password
tenant = cfg.CONF.general.l3_admin_tenant
self._svc_vm_mgr = service_vm_lib.ServiceVMManager(
user=u_name, passwd=pw, l3_admin_tenant=tenant, auth_url=auth_url)
def _process_non_responsive_hosting_device(self, context, hosting_device):
"""Host type specific processing of non responsive hosting devices.
:param hosting_device: db object for hosting device
:return: True if hosting_device has been deleted, otherwise False
"""
self._delete_service_vm_hosting_device(context, hosting_device)
return True
def _create_csr1kv_vm_hosting_device(self, context):
"""Creates a CSR1kv VM instance."""
# Note(bobmel): Nova does not handle VM dispatching well before all
# its services have started. This creates problems for the Neutron
# devstack script that creates a Neutron router, which in turn
# triggers service VM dispatching.
# Only perform pool maintenance if needed Nova services have started
if (cfg.CONF.general.ensure_nova_running and not self._nova_running):
if self._svc_vm_mgr.nova_services_up():
self.__class__._nova_running = True
else:
LOG.info(_('Not all Nova services are up and running. '
'Skipping this CSR1kv vm create request.'))
return
plugging_drv = self.get_hosting_device_plugging_driver()
hosting_device_drv = self.get_hosting_device_driver()
if plugging_drv is None or hosting_device_drv is None:
return
# These resources are owned by the L3AdminTenant
complementary_id = uuidutils.generate_uuid()
dev_data = {'complementary_id': complementary_id,
'device_id': 'CSR1kv',
'admin_state_up': True,
'protocol_port': 22,
'created_at': timeutils.utcnow()}
res = plugging_drv.create_hosting_device_resources(
context, complementary_id, self.l3_tenant_id(),
self.mgmt_nw_id(), self.mgmt_sec_grp_id(), 1)
if res.get('mgmt_port') is None:
# Required ports could not be created
return
vm_instance = self._svc_vm_mgr.dispatch_service_vm(
context, 'CSR1kv_nrouter', cfg.CONF.hosting_devices.csr1kv_image,
cfg.CONF.hosting_devices.csr1kv_flavor, hosting_device_drv,
res['mgmt_port'], res.get('ports'))
with context.session.begin(subtransactions=True):
if vm_instance is not None:
dev_data.update(
{'id': vm_instance['id'],
'management_port_id': res['mgmt_port']['id']})
hosting_device = self._create_hosting_device(
context, {'hosting_device': dev_data})
else:
# Fundamental error like could not contact Nova
# Cleanup anything we created
plugging_drv.delete_hosting_device_resources(
context, self.l3_tenant_id(), **res)
return
LOG.info(_('Created a CSR1kv hosting device VM'))
return hosting_device
def _delete_service_vm_hosting_device(self, context, hosting_device):
"""Deletes a <hosting_device> service VM.
This will indirectly make all of its hosted resources unscheduled.
"""
if hosting_device is None:
return
plugging_drv = self.get_hosting_device_plugging_driver()
if plugging_drv is None:
return
res = plugging_drv.get_hosting_device_resources(
context, hosting_device['id'], hosting_device['complementary_id'],
self.l3_tenant_id(), self.mgmt_nw_id())
if not self._svc_vm_mgr.delete_service_vm(context,
hosting_device['id']):
LOG.error(_('Failed to delete hosting device %s service VM. '
'Will un-register it anyway.'),
hosting_device['id'])
plugging_drv.delete_hosting_device_resources(
context, self.l3_tenant_id(), **res)
with context.session.begin(subtransactions=True):
context.session.delete(hosting_device)
def _create_hosting_device(self, context, hosting_device):
LOG.debug('create_hosting_device() called')
hd = hosting_device['hosting_device']
tenant_id = self._get_tenant_id_for_create(context, hd)
with context.session.begin(subtransactions=True):
hd_db = l3_models.HostingDevice(
id=hd.get('id') or uuidutils.generate_uuid(),
complementary_id = hd.get('complementary_id'),
tenant_id=tenant_id,
device_id=hd.get('device_id'),
admin_state_up=hd.get('admin_state_up', True),
management_port_id=hd['management_port_id'],
protocol_port=hd.get('protocol_port'),
cfg_agent_id=hd.get('cfg_agent_id'),
created_at=hd.get('created_at', timeutils.utcnow()),
status=hd.get('status', svc_constants.ACTIVE))
context.session.add(hd_db)
return hd_db
def _select_cfgagent(self, context, hosting_device):
"""Selects Cisco cfg agent that will configure <hosting_device>."""
if not hosting_device:
LOG.debug('Hosting device to schedule not specified')
return
elif hosting_device.cfg_agent:
LOG.debug('Hosting device %(hd_id)s has already been '
'assigned to Cisco cfg agent %(agent_id)s',
{'hd_id': id,
'agent_id': hosting_device.cfg_agent.id})
return
with context.session.begin(subtransactions=True):
active_cfg_agents = self._get_cfg_agents(context, active=True)
if not active_cfg_agents:
LOG.warn(_('There are no active Cisco cfg agents'))
# No worries, once a Cisco cfg agent is started and
# announces itself any "dangling" hosting devices
# will be scheduled to it.
return
chosen_agent = random.choice(active_cfg_agents)
hosting_device.cfg_agent = chosen_agent
context.session.add(hosting_device)
return chosen_agent
def _get_cfg_agents(self, context, active=None, filters=None):
query = context.session.query(agents_db.Agent)
query = query.filter(
agents_db.Agent.agent_type == c_constants.AGENT_TYPE_CFG)
if active is not None:
query = (query.filter(agents_db.Agent.admin_state_up == active))
if filters:
for key, value in filters.iteritems():
column = getattr(agents_db.Agent, key, None)
if column:
query = query.filter(column.in_(value))
cfg_agents = query.all()
if active is not None:
cfg_agents = [cfg_agent for cfg_agent in cfg_agents
if not self.is_agent_down(
cfg_agent['heartbeat_timestamp'])]
return cfg_agents
|
|
"""
This tutorial introduces denoising auto-encoders (dA) using Theano.
Denoising autoencoders are the building blocks for SdA.
They are based on auto-encoders as the ones used in Bengio et al. 2007.
An autoencoder takes an input x and first maps it to a hidden representation
y = f_{\theta}(x) = s(Wx+b), parameterized by \theta={W,b}. The resulting
latent representation y is then mapped back to a "reconstructed" vector
z \in [0,1]^d in input space z = g_{\theta'}(y) = s(W'y + b'). The weight
matrix W' can optionally be constrained such that W' = W^T, in which case
the autoencoder is said to have tied weights. The network is trained such
that to minimize the reconstruction error (the error between x and z).
For the denosing autoencoder, during training, first x is corrupted into
\tilde{x}, where \tilde{x} is a partially destroyed version of x by means
of a stochastic mapping. Afterwards y is computed as before (using
\tilde{x}), y = s(W\tilde{x} + b) and z as s(W'y + b'). The reconstruction
error is now measured between z and the uncorrupted input x, which is
computed as the cross-entropy :
- \sum_{k=1}^d[ x_k \log z_k + (1-x_k) \log( 1-z_k)]
References :
- P. Vincent, H. Larochelle, Y. Bengio, P.A. Manzagol: Extracting and
Composing Robust Features with Denoising Autoencoders, ICML'08, 1096-1103,
2008
- Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise
Training of Deep Networks, Advances in Neural Information Processing
Systems 19, 2007
"""
from inputParser import get_parser_AE
import cPickle as pickle
import gzip
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from logistic_sgd import load_data
from utils import tile_raster_images
# try:
# import PIL.Image as Image
# except ImportError:
# import Image
class dA(object):
"""Denoising Auto-Encoder class (dA)
A denoising autoencoders tries to reconstruct the input from a corrupted
version of it by projecting it first in a latent space and reprojecting
it afterwards back in the input space. Please refer to Vincent et al.,2008
for more details. If x is the input then equation (1) computes a partially
destroyed version of x by means of a stochastic mapping q_D. Equation (2)
computes the projection of the input into the latent space. Equation (3)
computes the reconstruction of the input, while equation (4) computes the
reconstruction error.
.. math::
\tilde{x} ~ q_D(\tilde{x}|x) (1)
y = s(W \tilde{x} + b) (2)
x = s(W' y + b') (3)
L(x,z) = -sum_{k=1}^d [x_k \log z_k + (1-x_k) \log( 1-z_k)] (4)
"""
def __init__(self, numpy_rng, theano_rng=None, input=None,
n_visible=784, n_hidden=1000,
W=None, bhid=None, bvis=None):
"""
Initialize the dA class by specifying the number of visible units (the
dimension d of the input ), the number of hidden units ( the dimension
d' of the latent or hidden space ) and the corruption level. The
constructor also receives symbolic variables for the input, weights and
bias. Such a symbolic variables are useful when, for example the input
is the result of some computations, or when weights are shared between
the dA and an MLP layer. When dealing with SdAs this always happens,
the dA on layer 2 gets as input the output of the dA on layer 1,
and the weights of the dA are used in the second stage of training
to construct an MLP.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: number random generator used to generate weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type input: theano.tensor.TensorType
:param input: a symbolic description of the input or None for
standalone dA
:type n_visible: int
:param n_visible: number of visible units
:type n_hidden: int
:param n_hidden: number of hidden units
:type W: theano.tensor.TensorType
:param W: Theano variable pointing to a set of weights that should be
shared belong the dA and another architecture; if dA should
be standalone set this to None
:type bhid: theano.tensor.TensorType
:param bhid: Theano variable pointing to a set of biases values (for
hidden units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
:type bvis: theano.tensor.TensorType
:param bvis: Theano variable pointing to a set of biases values (for
visible units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None
"""
self.n_visible = n_visible
self.n_hidden = n_hidden
# create a Theano random generator that gives symbolic random values
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# note : W' was written as `W_prime` and b' as `b_prime`
if not W:
# W is initialized with `initial_W` which is uniformely sampled
# from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible))the output of uniform if
# converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
initial_W = numpy.asarray(numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),
high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),
size=(n_visible, n_hidden)), dtype=theano.config.floatX)
W = theano.shared(value=initial_W, name='W', borrow=True)
if not bvis:
bvis = theano.shared(value=numpy.zeros(n_visible,
dtype=theano.config.floatX),
borrow=True)
if not bhid:
bhid = theano.shared(value=numpy.zeros(n_hidden,
dtype=theano.config.floatX),
name='b',
borrow=True)
self.W = W
# b corresponds to the bias of the hidden
self.b = bhid
# b_prime corresponds to the bias of the visible
self.b_prime = bvis
# tied weights, therefore W_prime is W transpose
self.W_prime = self.W.T
self.theano_rng = theano_rng
# if no input is given, generate a variable representing the input
if input == None:
# we use a matrix because we expect a minibatch of several
# examples, each example being a row
self.x = T.dmatrix(name='input')
else:
self.x = input
self.params = [self.W, self.b, self.b_prime]
self.print_inf()
def print_inf(self):
print "hidden : ",self.n_hidden
print "visible : ",self.n_visible
def get_corrupted_input(self, input, corruption_level):
"""This function keeps ``1-corruption_level`` entries of the inputs the
same and zero-out randomly selected subset of size ``coruption_level``
Note : first argument of theano.rng.binomial is the shape(size) of
random numbers that it should produce
second argument is the number of trials
third argument is the probability of success of any trial
this will produce an array of 0s and 1s where 1 has a
probability of 1 - ``corruption_level`` and 0 with
``corruption_level``
The binomial function return int64 data type by
default. int64 multiplicated by the input
type(floatX) always return float64. To keep all data
in floatX when floatX is float32, we set the dtype of
the binomial to floatX. As in our case the value of
the binomial is always 0 or 1, this don't change the
result. This is needed to allow the gpu to work
correctly as it only support float32 for now.
"""
return self.theano_rng.binomial(size=input.shape, n=1,
p=1 - corruption_level,
dtype=theano.config.floatX) * input
def get_hidden_values(self, input):
""" Computes the values of the hidden layer """
return T.nnet.sigmoid(T.dot(input, self.W) + self.b)
def get_reconstructed_input(self, hidden):
"""Computes the reconstructed input given the values of the
hidden layer
"""
return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime)
def get_cost_updates(self, corruption_level, learning_rate):
""" This function computes the cost and the updates for one trainng
step of the dA """
tilde_x = self.get_corrupted_input(self.x, corruption_level)
y = self.get_hidden_values(tilde_x)
z = self.get_reconstructed_input(y)
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, with one entry per
# example in minibatch
L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
# note : L is now a vector, where each element is the
# cross-entropy cost of the reconstruction of the
# corresponding example of the minibatch. We need to
# compute the average of all these to get the cost of
# the minibatch
cost = T.mean(L)
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
# generate the list of updates
updates = []
for param, gparam in zip(self.params, gparams):
updates.append((param, param - learning_rate * gparam))
return (cost, updates)
def test_dA(learning_rate=0.1, n_epochs=50,
dataset='mnist.pkl.gz',
batch_size=20,visible_size=784,hidden_size=1000,corruption_rate=0.3,fname='da-1000-030-mnsit.pkl.out'):
"""
This demo is tested on MNIST
:type learning_rate: float
:param learning_rate: learning rate used for training the DeNosing
AutoEncoder
:type training_epochs: int
:param training_epochs: number of epochs used for training
:type dataset: string
:param dataset: path to the picked dataset
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
####################################
# BUILDING THE MODEL
####################################
rng = numpy.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
da = dA(numpy_rng=rng, theano_rng=theano_rng, input=x,
n_visible= visible_size, n_hidden=hidden_size)
cost, updates = da.get_cost_updates(corruption_level=corruption_rate,
learning_rate=learning_rate)
train_da = theano.function([index], cost, updates=updates,
givens={x: train_set_x[index * batch_size:
(index + 1) * batch_size]})
start_time = time.clock()
############
# TRAINING #
############
# go through training epochs
for epoch in xrange(n_epochs):
# go through trainng set
c = []
for batch_index in xrange(n_train_batches):
c.append(train_da(batch_index))
print 'Training epoch %d, cost ' % epoch, numpy.mean(c)
end_time = time.clock()
training_time = (end_time - start_time)
print >> sys.stderr, ('The '+ str(corruption_rate) +' corruption code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % (training_time / 60.))
output = open(fname, 'wb')
pickle.dump(da, output)
output.close()
if __name__ == '__main__':
parser = get_parser_AE()
p = parser.parse_args()
test_dA(learning_rate = p.learning_rate, n_epochs = p.n_epochs, visible_size = p.visible_size, hidden_size = p.hidden_size, corruption_rate = p.corruption_rate, fname = p.fname, dataset = p.benchmark)
|
|
# -*- coding: utf-8 -*-
import json
import time
import mock
import pytest
from py_zipkin.exception import ZipkinError
from py_zipkin.zipkin import ZipkinAttrs
from webtest import TestApp as WebTestApp
from .app import main
from tests.acceptance.test_helper import generate_app_main
@pytest.mark.parametrize(['set_post_handler_hook', 'called'], [
(False, 0),
(True, 1),
])
def test_sample_server_span_with_100_percent_tracing(
default_trace_id_generator,
get_span,
set_post_handler_hook,
called,
):
settings = {
'zipkin.tracing_percent': 100,
'zipkin.trace_id_generator': default_trace_id_generator,
}
mock_post_handler_hook = mock.Mock()
if set_post_handler_hook:
settings['zipkin.post_handler_hook'] = mock_post_handler_hook
app_main, transport, _ = generate_app_main(settings)
old_time = time.time() * 1000000
with mock.patch(
'pyramid_zipkin.request_helper.generate_random_64bit_string'
) as mock_generate_random_64bit_string:
mock_generate_random_64bit_string.return_value = '1'
WebTestApp(app_main).get('/sample', status=200)
assert mock_post_handler_hook.call_count == called
assert len(transport.output) == 1
spans = json.loads(transport.output[0])
assert len(spans) == 1
span = spans[0]
assert span['id'] == '1'
assert span['kind'] == 'SERVER'
assert span['timestamp'] > old_time
assert span['duration'] > 0
assert 'shared' not in span
assert span == get_span
def test_upstream_zipkin_headers_sampled(default_trace_id_generator):
settings = {'zipkin.trace_id_generator': default_trace_id_generator}
app_main, transport, _ = generate_app_main(settings)
trace_hex = 'aaaaaaaaaaaaaaaa'
span_hex = 'bbbbbbbbbbbbbbbb'
parent_hex = 'cccccccccccccccc'
WebTestApp(app_main).get(
'/sample',
status=200,
headers={
'X-B3-TraceId': trace_hex,
'X-B3-SpanId': span_hex,
'X-B3-ParentSpanId': parent_hex,
'X-B3-Flags': '0',
'X-B3-Sampled': '1',
},
)
spans = json.loads(transport.output[0])
assert len(spans) == 1
span = spans[0]
assert span['traceId'] == trace_hex
assert span['id'] == span_hex
assert span['parentId'] == parent_hex
assert span['kind'] == 'SERVER'
assert span['shared'] is True
@pytest.mark.parametrize(['set_post_handler_hook', 'called'], [
(False, 0),
(True, 1),
])
def test_unsampled_request_has_no_span(
default_trace_id_generator,
set_post_handler_hook,
called,
):
settings = {
'zipkin.tracing_percent': 0,
'zipkin.trace_id_generator': default_trace_id_generator,
}
mock_post_handler_hook = mock.Mock()
if set_post_handler_hook:
settings['zipkin.post_handler_hook'] = mock_post_handler_hook
app_main, transport, _ = generate_app_main(settings)
WebTestApp(app_main).get('/sample', status=200)
assert len(transport.output) == 0
assert mock_post_handler_hook.call_count == called
def test_blacklisted_route_has_no_span(default_trace_id_generator):
settings = {
'zipkin.tracing_percent': 100,
'zipkin.trace_id_generator': default_trace_id_generator,
'zipkin.blacklisted_routes': ['sample_route'],
}
app_main, transport, firehose = generate_app_main(settings, firehose=True)
WebTestApp(app_main).get('/sample', status=200)
assert len(transport.output) == 0
assert len(firehose.output) == 0
def test_blacklisted_path_has_no_span(default_trace_id_generator):
settings = {
'zipkin.tracing_percent': 100,
'zipkin.trace_id_generator': default_trace_id_generator,
'zipkin.blacklisted_paths': [r'^/sample'],
}
app_main, transport, firehose = generate_app_main(settings, firehose=True)
WebTestApp(app_main).get('/sample', status=200)
assert len(transport.output) == 0
assert len(firehose.output) == 0
def test_no_transport_handler_throws_error():
app_main = main({})
del app_main.registry.settings['zipkin.transport_handler']
assert 'zipkin.transport_handler' not in app_main.registry.settings
with pytest.raises(ZipkinError):
WebTestApp(app_main).get('/sample', status=200)
def test_binary_annotations(default_trace_id_generator):
def set_extra_binary_annotations(dummy_request, response):
return {'other': dummy_request.registry.settings['other_attr']}
settings = {
'zipkin.tracing_percent': 100,
'zipkin.trace_id_generator': default_trace_id_generator,
'zipkin.set_extra_binary_annotations': set_extra_binary_annotations,
'other_attr': '42',
}
app_main, transport, _ = generate_app_main(settings)
WebTestApp(app_main).get('/pet/123?test=1', status=200)
assert len(transport.output) == 1
spans = json.loads(transport.output[0])
assert len(spans) == 1
span = spans[0]
assert span['tags'] == {
'http.uri': '/pet/123',
'http.uri.qs': '/pet/123?test=1',
'http.route': '/pet/{petId}',
'response_status_code': '200',
'other': '42',
}
def test_binary_annotations_404(default_trace_id_generator):
settings = {
'zipkin.tracing_percent': 100,
'zipkin.trace_id_generator': default_trace_id_generator,
}
app_main, transport, _ = generate_app_main(settings)
WebTestApp(app_main).get('/abcd?test=1', status=404)
assert len(transport.output) == 1
spans = json.loads(transport.output[0])
assert len(spans) == 1
span = spans[0]
assert span['tags'] == {
'http.uri': '/abcd',
'http.uri.qs': '/abcd?test=1',
'http.route': '',
'response_status_code': '404',
}
def test_custom_create_zipkin_attr():
custom_create_zipkin_attr = mock.Mock(return_value=ZipkinAttrs(
trace_id='1234',
span_id='1234',
parent_span_id='5678',
flags=0,
is_sampled=True,
))
settings = {
'zipkin.create_zipkin_attr': custom_create_zipkin_attr
}
app_main, transport, _ = generate_app_main(settings)
WebTestApp(app_main).get('/sample?test=1', status=200)
assert custom_create_zipkin_attr.called
def test_report_root_timestamp():
settings = {
'zipkin.report_root_timestamp': True,
'zipkin.tracing_percent': 100.0,
}
app_main, transport, _ = generate_app_main(settings)
old_time = time.time() * 1000000
WebTestApp(app_main).get('/sample', status=200)
assert len(transport.output) == 1
spans = json.loads(transport.output[0])
assert len(spans) == 1
span = spans[0]
# report_root_timestamp means there's no client span with the
# same id, so the 'shared' flag should not be set.
assert 'shared' not in span
assert span['timestamp'] > old_time
assert span['duration'] > 0
def test_host_and_port_in_span():
settings = {
'zipkin.tracing_percent': 100,
'zipkin.host': '1.2.2.1',
'zipkin.port': 1231,
}
app_main, transport, _ = generate_app_main(settings)
WebTestApp(app_main).get('/sample?test=1', status=200)
assert len(transport.output) == 1
spans = json.loads(transport.output[0])
assert len(spans) == 1
span = spans[0]
assert span['localEndpoint'] == {
'ipv4': '1.2.2.1',
'port': 1231,
'serviceName': 'acceptance_service',
}
def test_sample_server_span_with_firehose_tracing(
default_trace_id_generator, get_span):
settings = {
'zipkin.tracing_percent': 0,
'zipkin.trace_id_generator': default_trace_id_generator,
'zipkin.firehose_handler': default_trace_id_generator,
}
app_main, normal_transport, firehose_transport = generate_app_main(
settings,
firehose=True,
)
old_time = time.time() * 1000000
with mock.patch(
'pyramid_zipkin.request_helper.generate_random_64bit_string'
) as mock_generate_random_64bit_string:
mock_generate_random_64bit_string.return_value = '1'
WebTestApp(app_main).get('/sample', status=200)
assert len(normal_transport.output) == 0
assert len(firehose_transport.output) == 1
spans = json.loads(firehose_transport.output[0])
assert len(spans) == 1
span = spans[0]
assert span['timestamp'] > old_time
assert span['duration'] > 0
assert span == get_span
def test_max_span_batch_size(default_trace_id_generator):
settings = {
'zipkin.tracing_percent': 0,
'zipkin.trace_id_generator': default_trace_id_generator,
'zipkin.max_span_batch_size': 1,
}
app_main, normal_transport, firehose_transport = generate_app_main(
settings,
firehose=True,
)
WebTestApp(app_main).get('/decorator_context', status=200)
# Assert the expected number of batches for two spans
assert len(normal_transport.output) == 0
assert len(firehose_transport.output) == 2
# Assert proper hierarchy
batch_one = json.loads(firehose_transport.output[0])
assert len(batch_one) == 1
child_span = batch_one[0]
batch_two = json.loads(firehose_transport.output[1])
assert len(batch_two) == 1
server_span = batch_two[0]
assert child_span['parentId'] == server_span['id']
assert child_span['name'] == 'my_span'
def test_use_pattern_as_span_name(default_trace_id_generator):
settings = {
'zipkin.tracing_percent': 100,
'zipkin.trace_id_generator': default_trace_id_generator,
'other_attr': '42',
'zipkin.use_pattern_as_span_name': True,
}
app_main, transport, _ = generate_app_main(settings)
WebTestApp(app_main).get('/pet/123?test=1', status=200)
assert len(transport.output) == 1
spans = json.loads(transport.output[0])
assert len(spans) == 1
span = spans[0]
# Check that the span name is the pyramid pattern and not the raw url
assert span['name'] == 'GET /pet/{petId}'
def test_defaults_at_using_raw_url_path(default_trace_id_generator):
settings = {
'zipkin.tracing_percent': 100,
'zipkin.trace_id_generator': default_trace_id_generator,
'other_attr': '42',
}
app_main, transport, _ = generate_app_main(settings)
WebTestApp(app_main).get('/pet/123?test=1', status=200)
assert len(transport.output) == 1
spans = json.loads(transport.output[0])
assert len(spans) == 1
span = spans[0]
# Check that the span name is the raw url by default
assert span['name'] == 'GET /pet/123'
def test_sample_server_ipv6(
default_trace_id_generator,
get_span,
):
# Assert that pyramid_zipkin and py_zipkin correctly handle ipv6 addresses.
settings = {
'zipkin.tracing_percent': 100,
'zipkin.trace_id_generator': default_trace_id_generator,
}
app_main, transport, _ = generate_app_main(settings)
# py_zipkin uses `socket.gethostbyname` to get the current host ip if it's not
# set in settings.
with mock.patch(
'socket.gethostbyname',
return_value='2001:db8:85a3::8a2e:370:7334',
autospec=True,
):
WebTestApp(app_main).get('/sample', status=200)
assert len(transport.output) == 1
spans = json.loads(transport.output[0])
assert len(spans) == 1
span = spans[0]
# Check that the span name is the raw url by default
assert span['localEndpoint'] == {
'serviceName': 'acceptance_service',
'port': 80,
'ipv6': '2001:db8:85a3::8a2e:370:7334',
}
|
|
"""
Copyright (c) 2020 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import, unicode_literals
import copy
from collections import Counter
import pytest
from ruamel.yaml import YAML
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from atomic_reactor.util import chain_get
from atomic_reactor.utils.operator import (
OperatorCSV,
OperatorManifest,
NotOperatorCSV,
default_pullspec_heuristic,
)
from osbs.utils import ImageName
yaml = YAML()
SHA = "5d141ae1081640587636880dbe8489439353df883379158fa8742d5a3be75475"
@pytest.mark.parametrize("text, expected", [
# Trivial cases
("a.b/c:1", ["a.b/c:1"]),
("a.b/c/d:1", ["a.b/c/d:1"]),
# Digests in tag
("a.b/c@sha256:{sha}".format(sha=SHA), ["a.b/c@sha256:{sha}".format(sha=SHA)]),
("a.b/c/d@sha256:{sha}".format(sha=SHA), ["a.b/c/d@sha256:{sha}".format(sha=SHA)]),
# Port in registry
("a.b:1/c:1", ["a.b:1/c:1"]),
("a.b:5000/c/d:1", ["a.b:5000/c/d:1"]),
# Special characters everywhere
("a-b.c_d/e-f.g_h/i-j.k_l@sha256:{sha}".format(sha=SHA),
["a-b.c_d/e-f.g_h/i-j.k_l@sha256:{sha}".format(sha=SHA)]),
("a-._b/c-._d/e-._f:g-._h", ["a-._b/c-._d/e-._f:g-._h"]),
("1.2-3_4/5.6-7_8/9.0-1_2:3.4-5_6", ["1.2-3_4/5.6-7_8/9.0-1_2:3.4-5_6"]),
# Multiple namespaces
("a.b/c/d/e:1", ["a.b/c/d/e:1"]),
("a.b/c/d/e/f/g/h/i:1", ["a.b/c/d/e/f/g/h/i:1"]),
# Enclosed in various non-pullspec characters
(" a.b/c:1 ", ["a.b/c:1"]),
("\na.b/c:1\n", ["a.b/c:1"]),
("\ta.b/c:1\t", ["a.b/c:1"]),
(",a.b/c:1,", ["a.b/c:1"]),
(";a.b/c:1;", ["a.b/c:1"]),
("'a.b/c:1'", ["a.b/c:1"]),
('"a.b/c:1"', ["a.b/c:1"]),
("<a.b/c:1>", ["a.b/c:1"]),
("`a.b/c:1`", ["a.b/c:1"]),
("*a.b/c:1*", ["a.b/c:1"]),
("(a.b/c:1)", ["a.b/c:1"]),
("[a.b/c:1]", ["a.b/c:1"]),
("{a.b/c:1}", ["a.b/c:1"]),
# Enclosed in various pullspec characters
(".a.b/c:1.", ["a.b/c:1"]),
("-a.b/c:1-", ["a.b/c:1"]),
("_a.b/c:1_", ["a.b/c:1"]),
("/a.b/c:1/", ["a.b/c:1"]),
("@a.b/c:1@", ["a.b/c:1"]),
(":a.b/c:1:", ["a.b/c:1"]),
# Enclosed in multiple pullspec characters
("...a.b/c:1...", ["a.b/c:1"]),
# Redundant but important interaction of ^ with tags
("a.b/c:latest:", ["a.b/c:latest"]),
("a.b/c@sha256:{sha}:".format(sha=SHA), ["a.b/c@sha256:{sha}".format(sha=SHA)]),
("a.b/c@sha256:{sha}...".format(sha=SHA), ["a.b/c@sha256:{sha}".format(sha=SHA)]),
("a.b/c:v1.1...", ["a.b/c:v1.1"]),
# Empty-ish strings
("", []),
("!", []),
(".", []),
("!!!", []),
("...", []),
# Not enough parts
("a.bc:1", []),
# No '.' in registry
("ab/c:1", []),
# No tag
("a.b/c", []),
("a.b/c:", []),
("a.b/c:...", []),
# Invalid digest
("a.b/c:@123", []),
("a.b/c:@:123", []),
("a.b/c:@sha256", []),
("a.b/c:@sha256:", []),
("a.b/c:@sha256:...", []),
("a.b/c:@sha256:123456", []), # Must be 64 characters
("a.b/c:@sha256:{not_b16}".format(not_b16=("a" * 63 + "g")), []),
# Empty part
("a.b//c:1", []),
("https://a.b/c:1", []),
# '@' in registry
("a@b.c/d:1", []),
("a.b@c/d:1", []),
# '@' or ':' in namespace
("a.b/c@d/e:1", []),
("a.b/c:d/e:1", []),
("a.b/c/d@e/f:1", []),
("a.b/c/d:e/f:1", []),
# Invalid port in registry
("a:b.c/d:1", []),
("a.b:c/d:1", []),
("a.b:/c:1", []),
("a.b:11ff/c:1", []),
# Some part does not start/end with an alphanumeric character
("a.b-/c:1", []),
("a.b/-c:1", []),
("a.b/c-:1", []),
("a.b/c:-1", []),
("a.b/-c/d:1", []),
("a.b/c-/d:1", []),
("a.b/c/-d:1", []),
("a.b/c/d-:1", []),
("a.b/c/d:-1", []),
# Separated by various non-pullspec characters
("a.b/c:1 d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1\td.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1\nd.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1\n\t d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1,d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1;d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1, d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1; d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1 , d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1 ; d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
# Separated by pullspec characters
# Note the space on at least one side of the separator, will not work otherwise
("a.b/c:1/ d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1 /d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1- d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1 -d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1: d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1 :d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1. d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1 .d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1_ d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1 _d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1@ d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("a.b/c:1 @d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
# Sentences
("First is a.b/c:1. Second is d.e/f:1.", ["a.b/c:1", "d.e/f:1"]),
("My pullspecs are a.b/c:1 and d.e/f:1.", ["a.b/c:1", "d.e/f:1"]),
("There is/are some pullspec(s) in registry.io: a.b/c:1, d.e/f:1", ["a.b/c:1", "d.e/f:1"]),
("""
Find more info on https://my-site.com/here.
Some pullspec are <i>a.b/c:1<i> and __d.e/f:1__.
There is also g.h/i:latest: that one is cool.
And you can email me at name@server.com for info
about the last one: j.k/l:v1.1.
""", ["a.b/c:1", "d.e/f:1", "g.h/i:latest", "j.k/l:v1.1"]),
("""
I might also decide to do some math: 50.0/2 = 25.0.
Perhaps even with variables: 0.5x/2 = x/4.
And, because I am a psychopath, I will write this: 0.5/2:2 = 1/8,
Which will be a false positive.
""", ["0.5/2:2"]),
# JSON/YAML strings
('["a.b/c:1","d.e/f:1", "g.h/i:1"]', ["a.b/c:1", "d.e/f:1", "g.h/i:1"]),
('{"a":"a.b/c:1","b": "d.e/f:1", "c": "g.h/i:1"}', ["a.b/c:1", "d.e/f:1", "g.h/i:1"]),
("[a.b/c:1,d.e/f:1, g.h/i:1]", ["a.b/c:1", "d.e/f:1", "g.h/i:1"]),
("{a: a.b/c:1,b: d.e/f:1, c: g.h/i:1}", ["a.b/c:1", "d.e/f:1", "g.h/i:1"]),
("""
a: a.b/c:1
b: d.e/f:1
c: g.h/i:1
""", ["a.b/c:1", "d.e/f:1", "g.h/i:1"]),
])
def test_pullspec_heuristic(text, expected):
pullspecs = [text[i:j] for i, j in default_pullspec_heuristic(text)]
assert pullspecs == expected
class PullSpec(object):
def __init__(self, name, value, replace, path):
self._name = name
self._value = ImageName.parse(value)
self._replace = ImageName.parse(replace)
self._path = path
@property
def name(self):
return self._name
@property
def value(self):
return self._value
@property
def replace(self):
return self._replace
@property
def path(self):
return tuple(self._path)
@property
def key(self):
return self.path[-1]
def __str__(self):
return str(self.value)
def find_in_data(self, data):
return ImageName.parse(chain_get(data, self.path))
# Names based on location of pullspec:
# RI = relatedImages
# C = containers
# CE = containers env
# IC = initContainers
# ICE = initContainers env
# AN = annotations
RI1 = PullSpec(
"ri1", "foo:1", "r-foo:2",
["spec", "relatedImages", 0, "image"]
)
RI2 = PullSpec(
"ri2", "registry/bar:1", "r-registry/r-bar:2",
["spec", "relatedImages", 1, "image"]
)
C1 = PullSpec(
"c1", "registry/namespace/spam:1", "r-registry/r-namespace/r-spam:2",
["spec", "install", "spec", "deployments", 0,
"spec", "template", "spec", "containers", 0, "image"]
)
CE1 = PullSpec(
"ce1", "eggs:1", "r-eggs:2",
["spec", "install", "spec", "deployments", 0,
"spec", "template", "spec", "containers", 0, "env", 0, "value"]
)
C2 = PullSpec(
"c2", "ham:1", "r-ham:2",
["spec", "install", "spec", "deployments", 0,
"spec", "template", "spec", "containers", 1, "image"]
)
C3 = PullSpec(
"c3", "jam:1", "r-jam:2",
["spec", "install", "spec", "deployments", 1,
"spec", "template", "spec", "containers", 0, "image"]
)
AN1 = PullSpec(
"an1", "registry/namespace/baz:latest", "r-registry/r-namespace/r-baz:latest",
["metadata", "annotations", "containerImage"]
)
IC1 = PullSpec(
"ic1", "pullspec:1", "r-pullspec:1",
["spec", "install", "spec", "deployments", 1,
"spec", "template", "spec", "initContainers", 0, "image"]
)
ICE1 = PullSpec(
"ice1", "pullspec:2", "r-pullspec:2",
["spec", "install", "spec", "deployments", 1,
"spec", "template", "spec", "initContainers", 0, "env", 0, "value"]
)
AN2 = PullSpec(
"an2", "registry.io/an2:1", "registry.io/r-an2:1",
["metadata", "annotations", "some_pullspec"]
)
AN3 = PullSpec(
"an3", "registry.io/an3:1", "registry.io/r-an3:1",
["metadata", "annotations", "two_pullspecs"]
)
AN4 = PullSpec(
"an4", "registry.io/an4:1", "registry.io/r-an4:1",
["metadata", "annotations", "two_pullspecs"]
)
AN5 = PullSpec(
"an5", "registry.io/an5:1", "registry.io/r-an5:1",
["spec", "install", "spec", "deployments", 0,
"spec", "template", "metadata", "annotations", "some_other_pullspec"]
)
AN6 = PullSpec(
"an6", "registry.io/an6:1", "registry.io/r-an6:1",
["random", "annotations", 0, "metadata", "annotations", "duplicate_pullspecs"]
)
AN7 = PullSpec(
"an7", "registry.io/an7:1", "registry.io/r-an7:1",
["random", "annotations", 0, "metadata", "annotations", "duplicate_pullspecs"]
)
PULLSPECS = {
p.name: p for p in [
RI1, RI2, C1, CE1, C2, C3, AN1, IC1, ICE1, AN2, AN3, AN4, AN5, AN6, AN7
]
}
ORIGINAL_CONTENT = """\
# A meaningful comment
kind: ClusterServiceVersion
metadata:
annotations:
containerImage: {an1}
some_pullspec: {an2}
two_pullspecs: {an3}, {an4}
spec:
relatedImages:
- name: ri1
image: {ri1}
- name: ri2
image: {ri2}
install:
spec:
deployments:
- spec:
template:
metadata:
annotations:
some_other_pullspec: {an5}
spec:
containers:
- name: c1
image: {c1}
env:
- name: RELATED_IMAGE_CE1
value: {ce1}
- name: UNRELATED_IMAGE
value: {ce1}
- name: c2
image: {c2}
- spec:
template:
spec:
containers:
- name: c3
image: {c3}
initContainers:
- name: ic1
image: {ic1}
env:
- name: RELATED_IMAGE_ICE1
value: {ice1}
random:
annotations:
- metadata:
annotations:
duplicate_pullspecs: {an6}, {an7}, {an6}, {an7}
nested:
dict:
a: {ri1}
b: {ri2}
c: {c1}
d: {ce1}
e: {c2}
f: {c3}
g: {an1}
h: {ic1}
i: {ice1}
list:
- {ri1}
- {ri2}
- {c1}
- {ce1}
- {c2}
- {c3}
- {an1}
- {ic1}
- {ice1}
""".format(**PULLSPECS)
REPLACED_CONTENT = """\
# A meaningful comment
kind: ClusterServiceVersion
metadata:
annotations:
containerImage: {an1.replace}
some_pullspec: {an2.replace}
two_pullspecs: {an3.replace}, {an4.replace}
spec:
relatedImages:
- name: ri1
image: {ri1.replace}
- name: ri2
image: {ri2.replace}
install:
spec:
deployments:
- spec:
template:
metadata:
annotations:
some_other_pullspec: {an5.replace}
spec:
containers:
- name: c1
image: {c1.replace}
env:
- name: RELATED_IMAGE_CE1
value: {ce1.replace}
- name: UNRELATED_IMAGE
value: {ce1}
- name: c2
image: {c2.replace}
- spec:
template:
spec:
containers:
- name: c3
image: {c3.replace}
initContainers:
- name: ic1
image: {ic1.replace}
env:
- name: RELATED_IMAGE_ICE1
value: {ice1.replace}
random:
annotations:
- metadata:
annotations:
duplicate_pullspecs: {an6.replace}, {an7.replace}, {an6.replace}, {an7.replace}
nested:
dict:
a: {ri1}
b: {ri2}
c: {c1}
d: {ce1}
e: {c2}
f: {c3}
g: {an1}
h: {ic1}
i: {ice1}
list:
- {ri1}
- {ri2}
- {c1}
- {ce1}
- {c2}
- {c3}
- {an1}
- {ic1}
- {ice1}
""".format(**PULLSPECS)
REPLACED_EVERYWHERE_CONTENT = """\
# A meaningful comment
kind: ClusterServiceVersion
metadata:
annotations:
containerImage: {an1.replace}
some_pullspec: {an2.replace}
two_pullspecs: {an3.replace}, {an4.replace}
spec:
relatedImages:
- name: ri1
image: {ri1.replace}
- name: ri2
image: {ri2.replace}
install:
spec:
deployments:
- spec:
template:
metadata:
annotations:
some_other_pullspec: {an5.replace}
spec:
containers:
- name: c1
image: {c1.replace}
env:
- name: RELATED_IMAGE_CE1
value: {ce1.replace}
- name: UNRELATED_IMAGE
value: {ce1.replace}
- name: c2
image: {c2.replace}
- spec:
template:
spec:
containers:
- name: c3
image: {c3.replace}
initContainers:
- name: ic1
image: {ic1.replace}
env:
- name: RELATED_IMAGE_ICE1
value: {ice1.replace}
random:
annotations:
- metadata:
annotations:
duplicate_pullspecs: {an6.replace}, {an7.replace}, {an6.replace}, {an7.replace}
nested:
dict:
a: {ri1.replace}
b: {ri2.replace}
c: {c1.replace}
d: {ce1.replace}
e: {c2.replace}
f: {c3.replace}
g: {an1.replace}
h: {ic1.replace}
i: {ice1.replace}
list:
- {ri1.replace}
- {ri2.replace}
- {c1.replace}
- {ce1.replace}
- {c2.replace}
- {c3.replace}
- {an1.replace}
- {ic1.replace}
- {ice1.replace}
""".format(**PULLSPECS)
YAML_LIST_CONTENT = """\
- op: replace
path: /spec/foo
value:
type: object
properties:
name:
type: string
enum:
- "bar"
"""
class CSVFile(object):
def __init__(self, content):
self.content = content
self._data = yaml.load(content)
@property
def data(self):
return copy.deepcopy(self._data)
ORIGINAL = CSVFile(ORIGINAL_CONTENT)
REPLACED = CSVFile(REPLACED_CONTENT)
REPLACED_EVERYWHERE = CSVFile(REPLACED_EVERYWHERE_CONTENT)
YAML_LIST = CSVFile(YAML_LIST_CONTENT)
def delete_all_annotations(obj):
if isinstance(obj, (dict, CommentedMap)):
obj.get("metadata", {}).pop("annotations", None)
for v in obj.values():
delete_all_annotations(v)
elif isinstance(obj, (list, CommentedSeq)):
for item in obj:
delete_all_annotations(item)
class TestOperatorCSV(object):
_original_pullspecs = {p.value for p in PULLSPECS.values()}
_replacement_pullspecs = {p.value: p.replace for p in PULLSPECS.values()}
def test_wrong_kind(self):
data = ORIGINAL.data
del data["kind"]
with pytest.raises(NotOperatorCSV) as exc_info:
OperatorCSV("original.yaml", data)
assert str(exc_info.value) == "Not a ClusterServiceVersion"
data["kind"] = "ClusterResourceDefinition"
with pytest.raises(NotOperatorCSV) as exc_info:
OperatorCSV("original.yaml", data)
assert str(exc_info.value) == "Not a ClusterServiceVersion"
def test_yaml_not_object(self):
data = YAML_LIST.data
with pytest.raises(NotOperatorCSV) as exc_info:
OperatorCSV("original.yaml", data)
assert str(exc_info.value) == "File does not contain a YAML object"
def test_from_file(self, tmpdir):
path = tmpdir.join("original.yaml")
path.write(ORIGINAL.content)
csv = OperatorCSV.from_file(str(path))
assert csv.path == str(path)
assert csv.data == ORIGINAL.data
def test_get_pullspecs(self, caplog):
csv = OperatorCSV("original.yaml", ORIGINAL.data)
pullspecs = csv.get_pullspecs()
assert pullspecs == self._original_pullspecs
expected_logs = [
"original.yaml - Found pullspec for relatedImage ri1: {ri1}",
"original.yaml - Found pullspec for relatedImage ri2: {ri2}",
"original.yaml - Found pullspec for RELATED_IMAGE_CE1 var: {ce1}",
"original.yaml - Found pullspec for RELATED_IMAGE_ICE1 var: {ice1}",
"original.yaml - Found pullspec for container c1: {c1}",
"original.yaml - Found pullspec for container c2: {c2}",
"original.yaml - Found pullspec for container c3: {c3}",
"original.yaml - Found pullspec for initContainer ic1: {ic1}",
"original.yaml - Found pullspec for {an1.key} annotation: {an1}",
"original.yaml - Found pullspec for {an2.key} annotation: {an2}",
"original.yaml - Found pullspec for {an2.key} annotation: {an2}",
"original.yaml - Found pullspec for {an3.key} annotation: {an3}",
"original.yaml - Found pullspec for {an4.key} annotation: {an4}",
"original.yaml - Found pullspec for {an5.key} annotation: {an5}",
"original.yaml - Found pullspec for {an6.key} annotation: {an6}",
"original.yaml - Found pullspec for {an7.key} annotation: {an7}",
]
for log in expected_logs:
assert log.format(**PULLSPECS) in caplog.text
def test_replace_pullspecs(self, caplog):
csv = OperatorCSV("original.yaml", ORIGINAL.data)
csv.replace_pullspecs(self._replacement_pullspecs)
assert csv.data == REPLACED.data
expected_logs = [
"{file} - Replaced pullspec for relatedImage ri1: {ri1} -> {ri1.replace}",
"{file} - Replaced pullspec for relatedImage ri2: {ri2} -> {ri2.replace}",
"{file} - Replaced pullspec for RELATED_IMAGE_CE1 var: {ce1} -> {ce1.replace}",
"{file} - Replaced pullspec for RELATED_IMAGE_ICE1 var: {ice1} -> {ice1.replace}",
"{file} - Replaced pullspec for container c1: {c1} -> {c1.replace}",
"{file} - Replaced pullspec for container c2: {c2} -> {c2.replace}",
"{file} - Replaced pullspec for container c3: {c3} -> {c3.replace}",
"{file} - Replaced pullspec for initContainer ic1: {ic1} -> {ic1.replace}",
"{file} - Replaced pullspec for {an1.key} annotation: {an1} -> {an1.replace}",
"{file} - Replaced pullspec for {an2.key} annotation: {an2} -> {an2.replace}",
"{file} - Replaced pullspec for {an3.key} annotation: {an3} -> {an3.replace}",
"{file} - Replaced pullspec for {an4.key} annotation: {an4} -> {an4.replace}",
"{file} - Replaced pullspec for {an5.key} annotation: {an5} -> {an5.replace}",
"{file} - Replaced pullspec for {an6.key} annotation: {an6} -> {an6.replace}",
"{file} - Replaced pullspec for {an7.key} annotation: {an7} -> {an7.replace}",
]
for log in expected_logs:
assert log.format(file="original.yaml", **PULLSPECS) in caplog.text
def test_replace_pullspecs_everywhere(self, caplog):
csv = OperatorCSV("original.yaml", ORIGINAL.data)
csv.replace_pullspecs_everywhere(self._replacement_pullspecs)
assert csv.data == REPLACED_EVERYWHERE.data
expected_logs = {
"{file} - Replaced pullspec: {ri1} -> {ri1.replace}": 3,
"{file} - Replaced pullspec: {ri2} -> {ri2.replace}": 3,
"{file} - Replaced pullspec: {ce1} -> {ce1.replace}": 4,
"{file} - Replaced pullspec: {c1} -> {c1.replace}": 3,
"{file} - Replaced pullspec: {c2} -> {c2.replace}": 3,
"{file} - Replaced pullspec: {c3} -> {c3.replace}": 3,
"{file} - Replaced pullspec: {an1} -> {an1.replace}": 2,
"{file} - Replaced pullspec: {ic1} -> {ic1.replace}": 3,
"{file} - Replaced pullspec: {ice1} -> {ice1.replace}": 3,
"{file} - Replaced pullspec for {an1.key} annotation: {an1} -> {an1.replace}": 1,
"{file} - Replaced pullspec for {an2.key} annotation: {an2} -> {an2.replace}": 1,
"{file} - Replaced pullspec for {an3.key} annotation: {an3} -> {an3.replace}": 1,
"{file} - Replaced pullspec for {an4.key} annotation: {an4} -> {an4.replace}": 1,
"{file} - Replaced pullspec for {an5.key} annotation: {an5} -> {an5.replace}": 1,
"{file} - Replaced pullspec for {an6.key} annotation: {an6} -> {an6.replace}": 2,
"{file} - Replaced pullspec for {an7.key} annotation: {an7} -> {an7.replace}": 2,
}
# NOTE: an1 gets replaced once as an annotation and twice in other places
# an6 and an7 both get replaced twice in the same annotation
for log, count in expected_logs.items():
assert caplog.text.count(log.format(file="original.yaml", **PULLSPECS)) == count
def test_dump(self, tmpdir):
path = tmpdir.join("original.yaml")
csv = OperatorCSV(str(path), ORIGINAL.data)
csv.dump()
content = path.read()
# Formatting does not necessarily have to match, at least check the data...
assert yaml.load(content) == csv.data
# ...and that the comment was preserved
assert content.startswith('# A meaningful comment')
def test_replace_only_some_pullspecs(self, caplog):
replacement_pullspecs = self._replacement_pullspecs.copy()
# ri1 won't be replaced because replacement is identical
replacement_pullspecs[RI1.value] = RI1.value
# ri2 won't be replaced because no replacement available
del replacement_pullspecs[RI2.value]
csv = OperatorCSV("original.yaml", ORIGINAL.data)
csv.replace_pullspecs(replacement_pullspecs)
assert RI1.find_in_data(csv.data) == RI1.value
assert RI2.find_in_data(csv.data) == RI2.value
ri1_log = "original.yaml - Replaced pullspec for relatedImage ri1: {ri1}"
ri2_log = "original.yaml - Replaced pullspec for relatedImage ri2: {ri2}"
assert ri1_log.format(ri1=RI1) not in caplog.text
assert ri2_log.format(ri2=RI2) not in caplog.text
@pytest.mark.parametrize("rel_images", [True, False])
@pytest.mark.parametrize("rel_envs, containers", [
(False, False),
(False, True),
# (True, False) - Cannot have envs without containers
(True, True),
])
@pytest.mark.parametrize("annotations", [True, False])
@pytest.mark.parametrize("init_rel_envs, init_containers", [
(False, False),
(False, True),
# (True, False) - Cannot have initContainer envs without initContainers
(True, True),
])
def test_get_pullspecs_some_locations(self, rel_images, rel_envs, containers,
annotations, init_rel_envs, init_containers):
data = ORIGINAL.data
expected = {p.value for p in PULLSPECS.values()}
if not rel_images:
expected -= {RI1.value, RI2.value}
del data["spec"]["relatedImages"]
deployments = chain_get(data, ["spec", "install", "spec", "deployments"])
if not rel_envs:
expected -= {CE1.value}
for d in deployments:
for c in chain_get(d, ["spec", "template", "spec", "containers"]):
c.pop("env", None)
if not containers:
expected -= {C1.value, C2.value, C3.value}
for d in deployments:
del d["spec"]["template"]["spec"]["containers"]
if not annotations:
expected -= {AN1.value, AN2.value, AN3.value,
AN4.value, AN5.value, AN6.value, AN7.value}
delete_all_annotations(data)
if not init_rel_envs:
expected -= {ICE1.value}
for d in deployments:
for c in chain_get(d, ["spec", "template", "spec", "initContainers"], default=[]):
c.pop("env", None)
if not init_containers:
expected -= {IC1.value}
for d in deployments:
d["spec"]["template"]["spec"].pop("initContainers", None)
csv = OperatorCSV("x.yaml", data)
assert csv.get_pullspecs() == expected
def test_valuefrom_references_not_allowed(self):
data = ORIGINAL.data
env_path = CE1.path[:-1]
env = chain_get(data, env_path)
env["valueFrom"] = "somewhere"
csv = OperatorCSV("original.yaml", data)
with pytest.raises(RuntimeError) as exc_info:
csv.get_pullspecs()
assert '"valueFrom" references are not supported' in str(exc_info.value)
def test_set_related_images(self, caplog):
data = ORIGINAL.data
csv = OperatorCSV("original.yaml", data)
csv.set_related_images()
# the order is:
# 1. existing relatedImages
# 2. known annotations
# 3. containers
# 4. initContainers
# 5. container env vars
# 6. initContainer env vars
# 7. other annotations (in reverse order - quirky, I know)
expected_related_images = [
CommentedMap([("name", name), ("image", pullspec.value.to_str())])
for name, pullspec in [
("ri1", RI1),
("ri2", RI2),
("baz-latest-annotation", AN1),
("c1", C1),
("c2", C2),
("c3", C3),
("ic1", IC1),
("ce1", CE1),
("ice1", ICE1),
("an7-1-annotation", AN7),
("an6-1-annotation", AN6),
("an5-1-annotation", AN5),
("an4-1-annotation", AN4),
("an3-1-annotation", AN3),
("an2-1-annotation", AN2),
]
]
assert csv.data["spec"]["relatedImages"] == expected_related_images
expected_logs = [
"{path} - Set relatedImage ri1 (from relatedImage ri1): {ri1}",
"{path} - Set relatedImage ri2 (from relatedImage ri2): {ri2}",
"{path} - Set relatedImage baz-latest-annotation (from {an1.key} annotation): {an1}",
"{path} - Set relatedImage c1 (from container c1): {c1}",
"{path} - Set relatedImage c2 (from container c2): {c2}",
"{path} - Set relatedImage c3 (from container c3): {c3}",
"{path} - Set relatedImage ic1 (from initContainer ic1): {ic1}",
"{path} - Set relatedImage ce1 (from RELATED_IMAGE_CE1 var): {ce1}",
"{path} - Set relatedImage ice1 (from RELATED_IMAGE_ICE1 var): {ice1}",
"{path} - Set relatedImage an2-1-annotation (from {an2.key} annotation): {an2}",
"{path} - Set relatedImage an3-1-annotation (from {an3.key} annotation): {an3}",
"{path} - Set relatedImage an4-1-annotation (from {an4.key} annotation): {an4}",
"{path} - Set relatedImage an5-1-annotation (from {an5.key} annotation): {an5}",
"{path} - Set relatedImage an6-1-annotation (from {an6.key} annotation): {an6}",
"{path} - Set relatedImage an7-1-annotation (from {an7.key} annotation): {an7}",
]
for log in expected_logs:
assert log.format(path="original.yaml", **PULLSPECS) in caplog.text
@pytest.mark.parametrize('pullspec, name', [
('registry.io/foo:v1', 'foo-v1-annotation'),
('registry.io/namespace/foo:v1', 'foo-v1-annotation'),
('registry.io/foo@sha256:{}'.format(SHA), 'foo-{}-annotation'.format(SHA)),
('registry.io/namespace/foo@sha256:{}'.format(SHA), 'foo-{}-annotation'.format(SHA)),
])
def test_related_annotation_names(self, pullspec, name):
data = {
'kind': 'ClusterServiceVersion',
'metadata': {
'annotations': {
'foo': pullspec
}
}
}
csv = OperatorCSV("original.yaml", data)
csv.set_related_images()
generated_name = csv.data["spec"]["relatedImages"][0]["name"]
assert generated_name == name
@pytest.mark.parametrize('p1, p2, should_fail', [
# Different tag, no conflict
('registry.io/foo:v1', 'registry.io/foo:v2', False),
# Identical pullspec, no conflict
('registry.io/foo:v1', 'registry.io/foo:v1', False),
# Same repo and tag but different pullspec
('registry.io/foo:v1', 'registry.io/namespace/foo:v1', True),
# Sha in digest happens to be the same as the tag
('registry.io/foo@sha256:{0}'.format(SHA), 'registry.io/foo:{0}'.format(SHA), True),
])
def test_related_annotation_name_conflicts(self, p1, p2, should_fail):
data = {
'kind': 'ClusterServiceVersion',
'metadata': {
'annotations': {
'foo': "{}, {}".format(p1, p2)
}
}
}
csv = OperatorCSV("original.yaml", data)
if should_fail:
with pytest.raises(RuntimeError) as exc_info:
csv.set_related_images()
msg = ("original.yaml - Found conflicts when setting relatedImages:\n"
"foo annotation: {} X foo annotation: {}".format(p2, p1))
assert str(exc_info.value) == msg
else:
csv.set_related_images()
@pytest.mark.parametrize("related_images, containers, err_msg", [
(
# conflict in original relatedImages
[{"name": "foo", "image": "foo"}, {"name": "foo", "image": "bar"}],
[],
("{path} - Found conflicts when setting relatedImages:\n"
"relatedImage foo: foo X relatedImage foo: bar")
),
(
# conflict in new relatedImages
[],
[{"name": "foo", "image": "foo"}, {"name": "foo", "image": "bar"}],
("{path} - Found conflicts when setting relatedImages:\n"
"container foo: foo X container foo: bar")
),
(
# conflict between original and new relatedImages
[{"name": "foo", "image": "foo"}],
[{"name": "foo", "image": "bar"}],
("{path} - Found conflicts when setting relatedImages:\n"
"relatedImage foo: foo X container foo: bar")
),
(
# duplicate in original relatedImages, no conflict
[{"name": "foo", "image": "foo"}, {"name": "foo", "image": "foo"}],
[],
None
),
(
# duplicate in new relatedImages, no conflict
[],
[{"name": "foo", "image": "foo"}, {"name": "foo", "image": "foo"}],
None
),
(
# duplicate between original and new relatedImages, no conflict
[{"name": "foo", "image": "foo"}],
[{"name": "foo", "image": "foo"}],
None
),
(
# multiple conflicts in original and new relatedImages
[{"name": "foo", "image": "foo"}, {"name": "foo", "image": "bar"}],
[{"name": "foo", "image": "baz"}, {"name": "foo", "image": "spam"}],
# all messages should be (first found pullspec X conflicting pullspec)
("{path} - Found conflicts when setting relatedImages:\n"
"relatedImage foo: foo X relatedImage foo: bar\n"
"relatedImage foo: foo X container foo: baz\n"
"relatedImage foo: foo X container foo: spam")
)
])
def test_set_related_images_conflicts(self, related_images, containers, err_msg):
data = {
"kind": "ClusterServiceVersion",
"spec": {
"relatedImages": related_images,
"install": {
"spec": {
"deployments": [
{
"spec": {
"template": {
"spec": {
"containers": containers
}
}
}
}
]
}
}
}
}
csv = OperatorCSV("original.yaml", data)
if err_msg is not None:
with pytest.raises(RuntimeError) as exc_info:
csv.set_related_images()
assert str(exc_info.value) == err_msg.format(path="original.yaml")
else:
csv.set_related_images()
updated_counts = Counter(x['name'] for x in csv.data['spec']['relatedImages'])
# check that there are no duplicates in .spec.relatedImages
for name, count in updated_counts.items():
assert count == 1, 'Duplicate in relatedImages: {}'.format(name)
@pytest.mark.parametrize('pullspecs, does_have', [
(None, False),
([], False),
({'name': 'foo', 'image': 'bar'}, True),
])
def test_has_related_images(self, pullspecs, does_have):
data = {
'kind': 'ClusterServiceVersion',
'spec': {}
}
if pullspecs is not None:
data['spec']['relatedImages'] = pullspecs
csv = OperatorCSV('original.yaml', data)
assert csv.has_related_images() == does_have
@pytest.mark.parametrize('var, does_have', [
(None, False),
({'name': 'UNRELATED_IMAGE', 'value': 'foo'}, False),
({'name': 'RELATED_IMAGE_BAR', 'value': 'baz'}, True),
])
def test_has_related_image_envs(self, var, does_have):
data = {
'kind': 'ClusterServiceVersion',
'spec': {
'install': {
'spec': {
'deployments': [
{
'spec': {
'template': {
'spec': {
'containers': [
{'name': 'spam', 'image': 'eggs', 'env': []}
]
}
}
}
}
]
}
}
}
}
if var is not None:
deployment = data['spec']['install']['spec']['deployments'][0]
deployment['spec']['template']['spec']['containers'][0]['env'].append(var)
csv = OperatorCSV('original.yaml', data)
assert csv.has_related_image_envs() == does_have
@pytest.mark.parametrize('pullspecs, replacements, expected', [
# 1st is a substring of 2nd
(['a.b/c:1', 'a.b/c:1.1'],
{'a.b/c:1': 'foo:1', 'a.b/c:1.1': 'bar:1'},
['foo:1', 'bar:1']),
# Same but reversed
(['a.b/c:1.1', 'a.b/c:1'],
{'a.b/c:1': 'foo:1', 'a.b/c:1.1': 'bar:1'},
['bar:1', 'foo:1']),
# 2nd is 1st after replacement
(['a.b/c:1', 'd.e/f:1'],
{'a.b/c:1': 'd.e/f:1', 'd.e/f:1': 'g.h/i:1'},
['d.e/f:1', 'g.h/i:1']),
# Same but reversed
(['d.e/f:1', 'a.b/c:1'],
{'a.b/c:1': 'd.e/f:1', 'd.e/f:1': 'g.h/i:1'},
['g.h/i:1', 'd.e/f:1']),
# Replacement is a swap
(['a.b/c:1', 'd.e/f:1'],
{'a.b/c:1': 'd.e/f:1', 'd.e/f:1': 'a.b/c:1'},
['d.e/f:1', 'a.b/c:1']),
])
def test_tricky_annotation_replacements(self, pullspecs, replacements, expected):
replacements = {
ImageName.parse(old): ImageName.parse(new)
for old, new in replacements.items()
}
data = {
'kind': 'ClusterServiceVersion',
'metadata': {
'annotations': {
'foo': ", ".join(pullspecs)
}
}
}
csv = OperatorCSV("original.yaml", data)
csv.replace_pullspecs(replacements)
assert csv.data['metadata']['annotations']['foo'] == ", ".join(expected)
def test_known_vs_other_annotations(self):
# All annotation must be found and replaced exactly once, heuristic
# must not look in keys that are known pullspec sources
data = {
'kind': 'ClusterServiceVersion',
'metadata': {
'annotations': {
'containerImage': 'a.b/c:1',
'notContainerImage': 'a.b/c:1'
}
},
'spec': {
'metadata': {
'annotations': {
'containerImage': 'a.b/c:1',
'notContainerImage': 'a.b/c:1'
}
}
}
}
replacements = {
ImageName.parse(old): ImageName.parse(new) for old, new in [
('a.b/c:1', 'd.e/f:1'),
('d.e/f:1', 'g.h/i:1'),
]
}
csv = OperatorCSV("original.yaml", data)
csv.replace_pullspecs(replacements)
assert csv.data["metadata"]["annotations"]["containerImage"] == 'd.e/f:1'
assert csv.data["metadata"]["annotations"]["notContainerImage"] == 'd.e/f:1'
assert csv.data["spec"]["metadata"]["annotations"]["containerImage"] == 'd.e/f:1'
assert csv.data["spec"]["metadata"]["annotations"]["notContainerImage"] == 'd.e/f:1'
def test_ignored_annotations(self):
data = {
'kind': 'ClusterServiceVersion',
'metadata': {
'annotations': {
'some_text': 'abcdef',
'some_number': 123,
'some_array': [],
'some_object': {},
'metadata': {
'annotations': {
'pullspec': 'metadata.annotations/nested.in:metadata.annotations'
}
}
},
'not_an_annotation': 'something.that/looks-like:a-pullspec',
'not_annotations': {
'also_not_an_annotation': 'other.pullspec/lookalike:thing'
},
'metadata': {
'annotations': {
'pullspec': 'metadata.annotations/nested.in:metadata'
}
}
}
}
csv = OperatorCSV("original.yaml", data)
assert csv.get_pullspecs() == set()
class TestOperatorManifest(object):
def test_from_directory(self, tmpdir):
subdir = tmpdir.mkdir("nested")
original = tmpdir.join("original.yaml")
original.write(ORIGINAL.content)
replaced = subdir.join("replaced.yaml")
replaced.write(REPLACED.content)
manifest = OperatorManifest.from_directory(str(tmpdir))
original_csv = manifest.files[0]
replaced_csv = manifest.files[1]
assert original_csv.path == str(original)
assert replaced_csv.path == str(replaced)
assert original_csv.data == ORIGINAL.data
assert replaced_csv.data == REPLACED.data
def test_from_directory_no_csvs(self, tmpdir):
subdir = tmpdir.mkdir("nested")
original = tmpdir.join("original.yaml")
replaced = subdir.join("replaced.yaml")
original_data = ORIGINAL.data
original_data["kind"] = "IDK"
with open(str(original), "w") as f:
yaml.dump(original_data, f)
replaced_data = REPLACED.data
del replaced_data["kind"]
with open(str(replaced), "w") as f:
yaml.dump(replaced_data, f)
manifest = OperatorManifest.from_directory(str(tmpdir))
assert manifest.files == []
def test_from_directory_yaml_list(self, tmpdir):
yaml_list = tmpdir.join("list.yaml")
yaml_list_data = YAML_LIST.data
with open(str(yaml_list), "w") as f:
yaml.dump(yaml_list_data, f)
manifest = OperatorManifest.from_directory(str(tmpdir))
assert manifest.files == []
def test_directory_does_not_exist(self, tmpdir):
nonexistent = tmpdir.join("nonexistent")
with pytest.raises(RuntimeError) as exc_info:
OperatorManifest.from_directory(str(nonexistent))
msg = "Path does not exist or is not a directory: {}".format(nonexistent)
assert str(exc_info.value) == msg
regular_file = tmpdir.join("some_file")
regular_file.write("hello")
with pytest.raises(RuntimeError) as exc_info:
OperatorManifest.from_directory(str(regular_file))
msg = "Path does not exist or is not a directory: {}".format(regular_file)
assert str(exc_info.value) == msg
|
|
from __future__ import unicode_literals
from django import forms
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.test import TestCase
from django.utils import six
from .models import (TaggedItem, ValuableTaggedItem, Comparison, Animal,
Vegetable, Mineral, Gecko, Rock, ManualPK,
ForProxyModelModel, ForConcreteModelModel,
ProxyRelatedModel, ConcreteRelatedModel, AllowsNullGFK)
class GenericRelationsTests(TestCase):
def test_generic_relations(self):
# Create the world in 7 lines of code...
lion = Animal.objects.create(common_name="Lion", latin_name="Panthera leo")
platypus = Animal.objects.create(
common_name="Platypus", latin_name="Ornithorhynchus anatinus"
)
Vegetable.objects.create(name="Eggplant", is_yucky=True)
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
# Objects with declared GenericRelations can be tagged directly -- the
# API mimics the many-to-many API.
bacon.tags.create(tag="fatty")
bacon.tags.create(tag="salty")
lion.tags.create(tag="yellow")
lion.tags.create(tag="hairy")
platypus.tags.create(tag="fatty")
self.assertQuerysetEqual(lion.tags.all(), [
"<TaggedItem: hairy>",
"<TaggedItem: yellow>"
])
self.assertQuerysetEqual(bacon.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: salty>"
])
# You can easily access the content object like a foreign key.
t = TaggedItem.objects.get(tag="salty")
self.assertEqual(t.content_object, bacon)
qs = TaggedItem.objects.filter(animal__isnull=False).order_by('animal__common_name', 'tag')
self.assertQuerysetEqual(
qs, ["<TaggedItem: hairy>", "<TaggedItem: yellow>", "<TaggedItem: fatty>"]
)
mpk = ManualPK.objects.create(id=1)
mpk.tags.create(tag='mpk')
from django.db.models import Q
qs = TaggedItem.objects.filter(Q(animal__isnull=False) | Q(manualpk__id=1)).order_by('tag')
self.assertQuerysetEqual(
qs, ["fatty", "hairy", "mpk", "yellow"], lambda x: x.tag)
mpk.delete()
# Recall that the Mineral class doesn't have an explicit GenericRelation
# defined. That's OK, because you can create TaggedItems explicitly.
tag1 = TaggedItem.objects.create(content_object=quartz, tag="shiny")
TaggedItem.objects.create(content_object=quartz, tag="clearish")
# However, excluding GenericRelations means your lookups have to be a
# bit more explicit.
ctype = ContentType.objects.get_for_model(quartz)
q = TaggedItem.objects.filter(
content_type__pk=ctype.id, object_id=quartz.id
)
self.assertQuerysetEqual(q, [
"<TaggedItem: clearish>",
"<TaggedItem: shiny>"
])
# You can set a generic foreign key in the way you'd expect.
tag1.content_object = platypus
tag1.save()
self.assertQuerysetEqual(platypus.tags.all(), [
"<TaggedItem: fatty>",
"<TaggedItem: shiny>"
])
q = TaggedItem.objects.filter(
content_type__pk=ctype.id, object_id=quartz.id
)
self.assertQuerysetEqual(q, ["<TaggedItem: clearish>"])
# Queries across generic relations respect the content types. Even
# though there are two TaggedItems with a tag of "fatty", this query
# only pulls out the one with the content type related to Animals.
self.assertQuerysetEqual(Animal.objects.order_by('common_name'), [
"<Animal: Lion>",
"<Animal: Platypus>"
])
# Create another fatty tagged instance with different PK to ensure
# there is a content type restriction in the generated queries below.
mpk = ManualPK.objects.create(id=lion.pk)
mpk.tags.create(tag="fatty")
self.assertQuerysetEqual(Animal.objects.filter(tags__tag='fatty'), [
"<Animal: Platypus>"
])
self.assertQuerysetEqual(Animal.objects.exclude(tags__tag='fatty'), [
"<Animal: Lion>"
])
mpk.delete()
# If you delete an object with an explicit Generic relation, the related
# objects are deleted when the source object is deleted.
# Original list of tags:
comp_func = lambda obj: (
obj.tag, obj.content_type.model_class(), obj.object_id
)
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('clearish', Mineral, quartz.pk),
('fatty', Animal, platypus.pk),
('fatty', Vegetable, bacon.pk),
('hairy', Animal, lion.pk),
('salty', Vegetable, bacon.pk),
('shiny', Animal, platypus.pk),
('yellow', Animal, lion.pk)
],
comp_func
)
lion.delete()
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('clearish', Mineral, quartz.pk),
('fatty', Animal, platypus.pk),
('fatty', Vegetable, bacon.pk),
('salty', Vegetable, bacon.pk),
('shiny', Animal, platypus.pk)
],
comp_func
)
# If Generic Relation is not explicitly defined, any related objects
# remain after deletion of the source object.
quartz_pk = quartz.pk
quartz.delete()
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('clearish', Mineral, quartz_pk),
('fatty', Animal, platypus.pk),
('fatty', Vegetable, bacon.pk),
('salty', Vegetable, bacon.pk),
('shiny', Animal, platypus.pk)
],
comp_func
)
# If you delete a tag, the objects using the tag are unaffected
# (other than losing a tag)
tag = TaggedItem.objects.order_by("id")[0]
tag.delete()
self.assertQuerysetEqual(bacon.tags.all(), ["<TaggedItem: salty>"])
self.assertQuerysetEqual(TaggedItem.objects.all(), [
('clearish', Mineral, quartz_pk),
('fatty', Animal, platypus.pk),
('salty', Vegetable, bacon.pk),
('shiny', Animal, platypus.pk)
],
comp_func
)
TaggedItem.objects.filter(tag='fatty').delete()
ctype = ContentType.objects.get_for_model(lion)
self.assertQuerysetEqual(Animal.objects.filter(tags__content_type=ctype), [
"<Animal: Platypus>"
])
def test_generic_relation_related_name_default(self):
# Test that GenericRelation by default isn't usable from
# the reverse side.
with self.assertRaises(FieldError):
TaggedItem.objects.filter(vegetable__isnull=True)
def test_multiple_gfk(self):
# Simple tests for multiple GenericForeignKeys
# only uses one model, since the above tests should be sufficient.
tiger = Animal.objects.create(common_name="tiger")
cheetah = Animal.objects.create(common_name="cheetah")
bear = Animal.objects.create(common_name="bear")
# Create directly
Comparison.objects.create(
first_obj=cheetah, other_obj=tiger, comparative="faster"
)
Comparison.objects.create(
first_obj=tiger, other_obj=cheetah, comparative="cooler"
)
# Create using GenericRelation
tiger.comparisons.create(other_obj=bear, comparative="cooler")
tiger.comparisons.create(other_obj=cheetah, comparative="stronger")
self.assertQuerysetEqual(cheetah.comparisons.all(), [
"<Comparison: cheetah is faster than tiger>"
])
# Filtering works
self.assertQuerysetEqual(tiger.comparisons.filter(comparative="cooler"), [
"<Comparison: tiger is cooler than cheetah>",
"<Comparison: tiger is cooler than bear>",
], ordered=False)
# Filtering and deleting works
subjective = ["cooler"]
tiger.comparisons.filter(comparative__in=subjective).delete()
self.assertQuerysetEqual(Comparison.objects.all(), [
"<Comparison: cheetah is faster than tiger>",
"<Comparison: tiger is stronger than cheetah>"
], ordered=False)
# If we delete cheetah, Comparisons with cheetah as 'first_obj' will be
# deleted since Animal has an explicit GenericRelation to Comparison
# through first_obj. Comparisons with cheetah as 'other_obj' will not
# be deleted.
cheetah.delete()
self.assertQuerysetEqual(Comparison.objects.all(), [
"<Comparison: tiger is stronger than None>"
])
def test_gfk_subclasses(self):
# GenericForeignKey should work with subclasses (see #8309)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
valuedtag = ValuableTaggedItem.objects.create(
content_object=quartz, tag="shiny", value=10
)
self.assertEqual(valuedtag.content_object, quartz)
def test_generic_inline_formsets(self):
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet()
self.assertHTMLEqual(''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /><input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>""")
formset = GenericFormSet(instance=Animal())
self.assertHTMLEqual(''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /><input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p>""")
platypus = Animal.objects.create(
common_name="Platypus", latin_name="Ornithorhynchus anatinus"
)
platypus.tags.create(tag="shiny")
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet(instance=platypus)
tagged_item_id = TaggedItem.objects.get(
tag='shiny', object_id=platypus.id
).id
self.assertHTMLEqual(''.join(form.as_p() for form in formset.forms), """<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" value="shiny" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE" /><input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" value="%s" id="id_generic_relations-taggeditem-content_type-object_id-0-id" /></p><p><label for="id_generic_relations-taggeditem-content_type-object_id-1-tag">Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-1-tag" type="text" name="generic_relations-taggeditem-content_type-object_id-1-tag" maxlength="50" /></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-1-DELETE">Delete:</label> <input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-1-DELETE" id="id_generic_relations-taggeditem-content_type-object_id-1-DELETE" /><input type="hidden" name="generic_relations-taggeditem-content_type-object_id-1-id" id="id_generic_relations-taggeditem-content_type-object_id-1-id" /></p>""" % tagged_item_id)
lion = Animal.objects.create(common_name="Lion", latin_name="Panthera leo")
formset = GenericFormSet(instance=lion, prefix='x')
self.assertHTMLEqual(''.join(form.as_p() for form in formset.forms), """<p><label for="id_x-0-tag">Tag:</label> <input id="id_x-0-tag" type="text" name="x-0-tag" maxlength="50" /></p>
<p><label for="id_x-0-DELETE">Delete:</label> <input type="checkbox" name="x-0-DELETE" id="id_x-0-DELETE" /><input type="hidden" name="x-0-id" id="id_x-0-id" /></p>""")
def test_gfk_manager(self):
# GenericForeignKey should not use the default manager (which may filter objects) #16048
tailless = Gecko.objects.create(has_tail=False)
tag = TaggedItem.objects.create(content_object=tailless, tag="lizard")
self.assertEqual(tag.content_object, tailless)
def test_subclasses_with_gen_rel(self):
"""
Test that concrete model subclasses with generic relations work
correctly (ticket 11263).
"""
granite = Rock.objects.create(name='granite', hardness=5)
TaggedItem.objects.create(content_object=granite, tag="countertop")
self.assertEqual(Rock.objects.filter(tags__tag="countertop").count(), 1)
def test_generic_inline_formsets_initial(self):
"""
Test for #17927 Initial values support for BaseGenericInlineFormSet.
"""
quartz = Mineral.objects.create(name="Quartz", hardness=7)
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
ctype = ContentType.objects.get_for_model(quartz)
initial_data = [{
'tag': 'lizard',
'content_type': ctype.pk,
'object_id': quartz.pk,
}]
formset = GenericFormSet(initial=initial_data)
self.assertEqual(formset.forms[0].initial, initial_data[0])
def test_get_or_create(self):
# get_or_create should work with virtual fields (content_object)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
tag, created = TaggedItem.objects.get_or_create(tag="shiny",
defaults={'content_object': quartz})
self.assertTrue(created)
self.assertEqual(tag.tag, "shiny")
self.assertEqual(tag.content_object.id, quartz.id)
def test_update_or_create_defaults(self):
# update_or_create should work with virtual fields (content_object)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
diamond = Mineral.objects.create(name="Diamond", hardness=7)
tag, created = TaggedItem.objects.update_or_create(tag="shiny",
defaults={'content_object': quartz})
self.assertTrue(created)
self.assertEqual(tag.content_object.id, quartz.id)
tag, created = TaggedItem.objects.update_or_create(tag="shiny",
defaults={'content_object': diamond})
self.assertFalse(created)
self.assertEqual(tag.content_object.id, diamond.id)
def test_query_content_type(self):
with six.assertRaisesRegex(self, FieldError, "^Cannot resolve keyword 'content_object' into field."):
TaggedItem.objects.get(content_object='')
class GetOrCreateAndUpdateOrCreateTests(TestCase):
"""
GenericRelationsTests has changed significantly on master, this
standalone TestCase is part of the backport for #23611.
"""
def setUp(self):
self.bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
self.bacon.tags.create(tag="fatty")
self.bacon.tags.create(tag="salty")
def test_generic_update_or_create_when_created(self):
"""
Should be able to use update_or_create from the generic related manager
to create a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag, created = self.bacon.tags.update_or_create(tag='stinky')
self.assertTrue(created)
self.assertEqual(count + 1, self.bacon.tags.count())
def test_generic_update_or_create_when_updated(self):
"""
Should be able to use update_or_create from the generic related manager
to update a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag = self.bacon.tags.create(tag='stinky')
self.assertEqual(count + 1, self.bacon.tags.count())
tag, created = self.bacon.tags.update_or_create(defaults={'tag': 'juicy'}, id=tag.id)
self.assertFalse(created)
self.assertEqual(count + 1, self.bacon.tags.count())
self.assertEqual(tag.tag, 'juicy')
def test_generic_get_or_create_when_created(self):
"""
Should be able to use get_or_create from the generic related manager
to create a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag, created = self.bacon.tags.get_or_create(tag='stinky')
self.assertTrue(created)
self.assertEqual(count + 1, self.bacon.tags.count())
def test_generic_get_or_create_when_exists(self):
"""
Should be able to use get_or_create from the generic related manager
to get a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag = self.bacon.tags.create(tag="stinky")
self.assertEqual(count + 1, self.bacon.tags.count())
tag, created = self.bacon.tags.get_or_create(id=tag.id, defaults={'tag': 'juicy'})
self.assertFalse(created)
self.assertEqual(count + 1, self.bacon.tags.count())
# shouldn't had changed the tag
self.assertEqual(tag.tag, 'stinky')
class CustomWidget(forms.TextInput):
pass
class TaggedItemForm(forms.ModelForm):
class Meta:
model = TaggedItem
fields = '__all__'
widgets = {'tag': CustomWidget}
class GenericInlineFormsetTest(TestCase):
def test_generic_inlineformset_factory(self):
"""
Regression for #14572: Using base forms with widgets
defined in Meta should not raise errors.
"""
Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm)
form = Formset().forms[0]
self.assertIsInstance(form['tag'].field.widget, CustomWidget)
def test_save_new_uses_form_save(self):
"""
Regression for #16260: save_new should call form.save()
"""
class SaveTestForm(forms.ModelForm):
def save(self, *args, **kwargs):
self.instance.saved_by = "custom method"
return super(SaveTestForm, self).save(*args, **kwargs)
Formset = generic_inlineformset_factory(
ForProxyModelModel, fields='__all__', form=SaveTestForm)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj = formset.save()[0]
self.assertEqual(new_obj.saved_by, "custom method")
def test_save_new_for_proxy(self):
Formset = generic_inlineformset_factory(ForProxyModelModel,
fields='__all__', for_concrete_model=False)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertEqual(new_obj.obj, instance)
def test_save_new_for_concrete(self):
Formset = generic_inlineformset_factory(ForProxyModelModel,
fields='__all__', for_concrete_model=True)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel)
class ProxyRelatedModelTest(TestCase):
def test_default_behavior(self):
"""
The default for for_concrete_model should be True
"""
base = ForConcreteModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
base = ForConcreteModelModel.objects.get(pk=base.pk)
rel = ConcreteRelatedModel.objects.get(pk=rel.pk)
self.assertEqual(base.obj, rel)
def test_works_normally(self):
"""
When for_concrete_model is False, we should still be able to get
an instance of the concrete class.
"""
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertEqual(base.obj, rel)
def test_proxy_is_returned(self):
"""
Instances of the proxy should be returned when
for_concrete_model is False.
"""
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertIsInstance(base.obj, ProxyRelatedModel)
def test_query(self):
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ConcreteRelatedModel.objects.get(bases__id=base.id))
def test_query_proxy(self):
base = ForProxyModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ProxyRelatedModel.objects.get(bases__id=base.id))
def test_generic_relation(self):
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
rel = ProxyRelatedModel.objects.get(pk=base.obj.pk)
self.assertEqual(base, rel.bases.get())
def test_generic_relation_set(self):
base = ForProxyModelModel()
base.obj = ConcreteRelatedModel.objects.create()
base.save()
newrel = ConcreteRelatedModel.objects.create()
newrel.bases = [base]
newrel = ConcreteRelatedModel.objects.get(pk=newrel.pk)
self.assertEqual(base, newrel.bases.get())
class TestInitWithNoneArgument(TestCase):
def test_none_not_allowed(self):
# TaggedItem requires a content_type, initializing with None should
# raise a ValueError.
with six.assertRaisesRegex(self, ValueError,
'Cannot assign None: "TaggedItem.content_type" does not allow null values'):
TaggedItem(content_object=None)
def test_none_allowed(self):
# AllowsNullGFK doesn't require a content_type, so None argument should
# also be allowed.
AllowsNullGFK(content_object=None)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Virtual batch normalization.
This technique was first introduced in `Improved Techniques for Training GANs`
(Salimans et al, https://arxiv.org/abs/1606.03498). Instead of using batch
normalization on a minibatch, it fixes a reference subset of the data to use for
calculating normalization statistics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
__all__ = [
'VBN',
]
def _static_or_dynamic_batch_size(tensor, batch_axis):
"""Returns the static or dynamic batch size."""
batch_size = array_ops.shape(tensor)[batch_axis]
static_batch_size = tensor_util.constant_value(batch_size)
return static_batch_size or batch_size
def _statistics(x, axes):
"""Calculate the mean and mean square of `x`.
Modified from the implementation of `tf.nn.moments`.
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
Returns:
Two `Tensor` objects: `mean` and `square mean`.
"""
# The dynamic range of fp16 is too limited to support the collection of
# sufficient statistics. As a workaround we simply perform the operations
# on 32-bit floats before converting the mean and variance back to fp16
y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x
# Compute true mean while keeping the dims for proper broadcasting.
shift = array_ops.stop_gradient(math_ops.reduce_mean(y, axes, keepdims=True))
shifted_mean = math_ops.reduce_mean(y - shift, axes, keepdims=True)
mean = shifted_mean + shift
mean_squared = math_ops.reduce_mean(math_ops.square(y), axes, keepdims=True)
mean = array_ops.squeeze(mean, axes)
mean_squared = array_ops.squeeze(mean_squared, axes)
if x.dtype == dtypes.float16:
return (math_ops.cast(mean, dtypes.float16),
math_ops.cast(mean_squared, dtypes.float16))
else:
return (mean, mean_squared)
def _validate_init_input_and_get_axis(reference_batch, axis):
"""Validate input and return the used axis value."""
if reference_batch.shape.ndims is None:
raise ValueError('`reference_batch` has unknown dimensions.')
ndims = reference_batch.shape.ndims
if axis < 0:
used_axis = ndims + axis
else:
used_axis = axis
if used_axis < 0 or used_axis >= ndims:
raise ValueError('Value of `axis` argument ' + str(used_axis) +
' is out of range for input with rank ' + str(ndims))
return used_axis
def _validate_call_input(tensor_list, batch_dim):
"""Verifies that tensor shapes are compatible, except for `batch_dim`."""
def _get_shape(tensor):
shape = tensor.shape.as_list()
del shape[batch_dim]
return shape
base_shape = tensor_shape.TensorShape(_get_shape(tensor_list[0]))
for tensor in tensor_list:
base_shape.assert_is_compatible_with(_get_shape(tensor))
class VBN(object):
"""A class to perform virtual batch normalization.
This technique was first introduced in `Improved Techniques for Training GANs`
(Salimans et al, https://arxiv.org/abs/1606.03498). Instead of using batch
normalization on a minibatch, it fixes a reference subset of the data to use
for calculating normalization statistics.
To do this, we calculate the reference batch mean and mean square, and modify
those statistics for each example. We use mean square instead of variance,
since it is linear.
Note that if `center` or `scale` variables are created, they are shared
between all calls to this object.
The `__init__` API is intended to mimic `tf.layers.batch_normalization` as
closely as possible.
"""
def __init__(self,
reference_batch,
axis=-1,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
trainable=True,
name=None,
batch_axis=0):
"""Initialize virtual batch normalization object.
We precompute the 'mean' and 'mean squared' of the reference batch, so that
`__call__` is efficient. This means that the axis must be supplied when the
object is created, not when it is called.
We precompute 'square mean' instead of 'variance', because the square mean
can be easily adjusted on a per-example basis.
Args:
reference_batch: A minibatch tensors. This will form the reference data
from which the normalization statistics are calculated. See
https://arxiv.org/abs/1606.03498 for more details.
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False,
`beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can
be disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the ops.
batch_axis: The axis of the batch dimension. This dimension is treated
differently in `virtual batch normalization` vs `batch normalization`.
Raises:
ValueError: If `reference_batch` has unknown dimensions at graph
construction.
ValueError: If `batch_axis` is the same as `axis`.
"""
axis = _validate_init_input_and_get_axis(reference_batch, axis)
self._epsilon = epsilon
self._beta = 0
self._gamma = 1
self._batch_axis = _validate_init_input_and_get_axis(
reference_batch, batch_axis)
if axis == self._batch_axis:
raise ValueError('`axis` and `batch_axis` cannot be the same.')
with variable_scope.variable_scope(name, 'VBN',
values=[reference_batch]) as self._vs:
self._reference_batch = reference_batch
# Calculate important shapes:
# 1) Reduction axes for the reference batch
# 2) Broadcast shape, if necessary
# 3) Reduction axes for the virtual batchnormed batch
# 4) Shape for optional parameters
input_shape = self._reference_batch.shape
ndims = input_shape.ndims
reduction_axes = list(range(ndims))
del reduction_axes[axis]
self._broadcast_shape = [1] * len(input_shape)
self._broadcast_shape[axis] = input_shape[axis].value
self._example_reduction_axes = list(range(ndims))
del self._example_reduction_axes[max(axis, self._batch_axis)]
del self._example_reduction_axes[min(axis, self._batch_axis)]
params_shape = self._reference_batch.shape[axis]
# Determines whether broadcasting is needed. This is slightly different
# than in the `nn.batch_normalization` case, due to `batch_dim`.
self._needs_broadcasting = (
sorted(self._example_reduction_axes) != list(range(ndims))[:-2])
# Calculate the sufficient statistics for the reference batch in a way
# that can be easily modified by additional examples.
self._ref_mean, self._ref_mean_squares = _statistics(
self._reference_batch, reduction_axes)
self._ref_variance = (self._ref_mean_squares -
math_ops.square(self._ref_mean))
# Virtual batch normalization uses a weighted average between example
# statistics and the reference batch statistics.
ref_batch_size = _static_or_dynamic_batch_size(
self._reference_batch, self._batch_axis)
self._example_weight = 1. / (math_ops.to_float(ref_batch_size) + 1.)
self._ref_weight = 1. - self._example_weight
# Make the variables, if necessary.
if center:
self._beta = variable_scope.get_variable(
name='beta',
shape=(params_shape,),
initializer=beta_initializer,
regularizer=beta_regularizer,
trainable=trainable)
if scale:
self._gamma = variable_scope.get_variable(
name='gamma',
shape=(params_shape,),
initializer=gamma_initializer,
regularizer=gamma_regularizer,
trainable=trainable)
def _virtual_statistics(self, inputs, reduction_axes):
"""Compute the statistics needed for virtual batch normalization."""
cur_mean, cur_mean_sq = _statistics(inputs, reduction_axes)
vb_mean = (self._example_weight * cur_mean +
self._ref_weight * self._ref_mean)
vb_mean_sq = (self._example_weight * cur_mean_sq +
self._ref_weight * self._ref_mean_squares)
return (vb_mean, vb_mean_sq)
def _broadcast(self, v, broadcast_shape=None):
# The exact broadcast shape depends on the current batch, not the reference
# batch, unless we're calculating the batch normalization of the reference
# batch.
b_shape = broadcast_shape or self._broadcast_shape
if self._needs_broadcasting and v is not None:
return array_ops.reshape(v, b_shape)
return v
def reference_batch_normalization(self):
"""Return the reference batch, but batch normalized."""
with ops.name_scope(self._vs.name):
return nn.batch_normalization(self._reference_batch,
self._broadcast(self._ref_mean),
self._broadcast(self._ref_variance),
self._broadcast(self._beta),
self._broadcast(self._gamma),
self._epsilon)
def __call__(self, inputs):
"""Run virtual batch normalization on inputs.
Args:
inputs: Tensor input.
Returns:
A virtual batch normalized version of `inputs`.
Raises:
ValueError: If `inputs` shape isn't compatible with the reference batch.
"""
_validate_call_input([inputs, self._reference_batch], self._batch_axis)
with ops.name_scope(self._vs.name, values=[inputs, self._reference_batch]):
# Calculate the statistics on the current input on a per-example basis.
vb_mean, vb_mean_sq = self._virtual_statistics(
inputs, self._example_reduction_axes)
vb_variance = vb_mean_sq - math_ops.square(vb_mean)
# The exact broadcast shape of the input statistic Tensors depends on the
# current batch, not the reference batch. The parameter broadcast shape
# is independent of the shape of the input statistic Tensor dimensions.
b_shape = self._broadcast_shape[:] # deep copy
b_shape[self._batch_axis] = _static_or_dynamic_batch_size(
inputs, self._batch_axis)
return nn.batch_normalization(
inputs,
self._broadcast(vb_mean, b_shape),
self._broadcast(vb_variance, b_shape),
self._broadcast(self._beta, self._broadcast_shape),
self._broadcast(self._gamma, self._broadcast_shape),
self._epsilon)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.utils.http import urlencode
from paypal.pro.forms import PaymentForm, ConfirmForm
from paypal.pro.models import PayPalNVP
from paypal.pro.helpers import PayPalWPP, TEST
from paypal.pro.signals import payment_was_successful, payment_was_flagged
# PayPal Edit IPN URL:
# https://www.sandbox.paypal.com/us/cgi-bin/webscr?cmd=_profile-ipn-notify
EXPRESS_ENDPOINT = "https://www.paypal.com/webscr?cmd=_express-checkout&%s"
SANDBOX_EXPRESS_ENDPOINT = "https://www.sandbox.paypal.com/webscr?cmd=_express-checkout&%s"
class PayPalPro(object):
"""
This class-based view takes care of PayPal WebsitePaymentsPro (WPP).
PayPalPro has two separate flows - DirectPayment and ExpressPayFlow. In
DirectPayment the user buys on your site. In ExpressPayFlow the user is
direct to PayPal to confirm their purchase. PayPalPro implements both
flows. To it create an instance using the these parameters:
item: a dictionary that holds information about the item being purchased.
For single item purchase (pay once):
Required Keys:
* amt: Float amount of the item.
Optional Keys:
* custom: You can set this to help you identify a transaction.
* invnum: Unique ID that identifies this transaction.
For recurring billing:
Required Keys:
* amt: Float amount for each billing cycle.
* billingperiod: String unit of measure for the billing cycle (Day|Week|SemiMonth|Month|Year)
* billingfrequency: Integer number of periods that make up a cycle.
* profilestartdate: The date to begin billing. "2008-08-05T17:00:00Z" UTC/GMT
* desc: Description of what you're billing for.
Optional Keys:
* trialbillingperiod: String unit of measure for trial cycle (Day|Week|SemiMonth|Month|Year)
* trialbillingfrequency: Integer # of periods in a cycle.
* trialamt: Float amount to bill for the trial period.
* trialtotalbillingcycles: Integer # of cycles for the trial payment period.
* failedinitamtaction: set to continue on failure (ContinueOnFailure / CancelOnFailure)
* maxfailedpayments: number of payments before profile is suspended.
* autobilloutamt: automatically bill outstanding amount.
* subscribername: Full name of the person who paid.
* profilereference: Unique reference or invoice number.
* taxamt: How much tax.
* initamt: Initial non-recurring payment due upon creation.
* currencycode: defaults to USD
* + a bunch of shipping fields
payment_form_cls: form class that will be used to display the payment form.
It should inherit from `paypal.pro.forms.PaymentForm` if you're adding more.
payment_template: template used to ask the dude for monies. To comply with
PayPal regs. it must include a link to PayPal Express Checkout.
confirm_form_cls: form class that will be used to display the confirmation form.
It should inherit from `paypal.pro.forms.ConfirmForm`. It is only used in the Express flow.
success_url / fail_url: URLs to be redirected to when the payment is comlete or fails.
"""
# ERRORS = should move errors into dict.
def __init__(self, item=None,
payment_form_cls=PaymentForm,
payment_template="pro/payment.html",
confirm_form_cls=ConfirmForm,
confirm_template="pro/confirm.html",
success_url="?success", fail_url=None, context=None):
self.item = item
self.is_recurring = False
if 'billingperiod' in item:
self.is_recurring = True
self.payment_form_cls = payment_form_cls
self.payment_template = payment_template
self.confirm_form_cls = confirm_form_cls
self.confirm_template = confirm_template
self.success_url = success_url
self.fail_url = fail_url
self.context = context or {}
def __call__(self, request):
"""Return the appropriate response for the state of the transaction."""
self.request = request
if request.method == "GET":
if self.should_redirect_to_express():
return self.redirect_to_express()
elif self.should_render_confirm_form():
return self.render_confirm_form()
elif self.should_render_payment_form():
return self.render_payment_form()
else:
if self.should_validate_confirm_form():
return self.validate_confirm_form()
elif self.should_validate_payment_form():
return self.validate_payment_form()
# If nothing was returned default to the rendering the payment form.
return self.render_payment_form()
def should_redirect_to_express(self):
return 'express' in self.request.GET
def should_render_confirm_form(self):
return 'token' in self.request.GET and 'PayerID' in self.request.GET
def should_render_payment_form(self):
return True
def should_validate_confirm_form(self):
return 'token' in self.request.POST and 'PayerID' in self.request.POST
def should_validate_payment_form(self):
return True
def render_payment_form(self):
"""Display the DirectPayment for entering payment information."""
self.context['form'] = self.payment_form_cls()
return render_to_response(self.payment_template, self.context, RequestContext(self.request))
def validate_payment_form(self):
"""Try to validate and then process the DirectPayment form."""
form = self.payment_form_cls(self.request.POST)
if form.is_valid():
success = form.process(self.request, self.item)
if success:
payment_was_successful.send(sender=self.item)
return HttpResponseRedirect(self.success_url)
else:
self.context['errors'] = "There was an error processing your payment. Check your information and try again."
self.context['form'] = form
self.context.setdefault("errors", "Please correct the errors below and try again.")
return render_to_response(self.payment_template, self.context, RequestContext(self.request))
def get_endpoint(self):
if TEST:
return SANDBOX_EXPRESS_ENDPOINT
else:
return EXPRESS_ENDPOINT
def redirect_to_express(self):
"""
First step of ExpressCheckout. Redirect the request to PayPal using the
data returned from setExpressCheckout.
"""
wpp = PayPalWPP(self.request)
nvp_obj = wpp.setExpressCheckout(self.item)
if not nvp_obj.flag:
pp_params = dict(token=nvp_obj.token,
AMT=self.item['amt'],
RETURNURL=self.item['returnurl'],
CANCELURL=self.item['cancelurl'])
pp_url = self.get_endpoint() % urlencode(pp_params)
return HttpResponseRedirect(pp_url)
else:
self.context = {'errors': 'There was a problem contacting PayPal. Please try again later.'}
return self.render_payment_form()
def render_confirm_form(self):
"""
Second step of ExpressCheckout. Display an order confirmation form which
contains hidden fields with the token / PayerID from PayPal.
"""
initial = dict(token=self.request.GET['token'], PayerID=self.request.GET['PayerID'])
self.context['form'] = self.confirm_form_cls(initial=initial)
return render_to_response(self.confirm_template, self.context, RequestContext(self.request))
def validate_confirm_form(self):
"""
Third and final step of ExpressCheckout. Request has pressed the confirmation but
and we can send the final confirmation to PayPal using the data from the POST'ed form.
"""
wpp = PayPalWPP(self.request)
pp_data = dict(token=self.request.POST['token'], payerid=self.request.POST['PayerID'])
self.item.update(pp_data)
if self.is_recurring:
success = wpp.createRecurringPaymentsProfile(self.item)
else:
success = wpp.doExpressCheckoutPayment(self.item)
if success:
payment_was_successful.send(sender=self.item)
return HttpResponseRedirect(self.success_url)
else:
self.context['errors'] = "There was a problem processing the payment. Check your information and try again."
return self.render_payment_form()
|
|
#!/usr/bin/env/python
# -*- coding: utf-8 -*-
###
### Author: Chris Iatrou (ichrispa@core-vector.net)
### Version: rev 13
###
### This program was created for educational purposes and has been
### contributed to the open62541 project by the author. All licensing
### terms for this source is inherited by the terms and conditions
### specified for by the open62541 project (see the projects readme
### file for more information on the LGPL terms and restrictions).
###
### This program is not meant to be used in a production environment. The
### author is not liable for any complications arising due to the use of
### this program.
###
from __future__ import print_function
import sys
from time import struct_time, strftime, strptime, mktime
from struct import pack as structpack
import logging
from ua_builtin_types import *;
from ua_node_types import *;
from ua_constants import *;
logger = logging.getLogger(__name__)
def getNextElementNode(xmlvalue):
if xmlvalue == None:
return None
xmlvalue = xmlvalue.nextSibling
while not xmlvalue == None and not xmlvalue.nodeType == xmlvalue.ELEMENT_NODE:
xmlvalue = xmlvalue.nextSibling
return xmlvalue
###
### Namespace Organizer
###
class opcua_namespace():
""" Class holding and managing a set of OPCUA nodes.
This class handles parsing XML description of namespaces, instantiating
nodes, linking references, graphing the namespace and compiling a binary
representation.
Note that nodes assigned to this class are not restricted to having a
single namespace ID. This class represents the entire physical address
space of the binary representation and all nodes that are to be included
in that segment of memory.
"""
nodes = []
nodeids = {}
aliases = {}
__linkLater__ = []
__binaryIndirectPointers__ = []
name = ""
knownNodeTypes = ""
namespaceIdentifiers = {} # list of 'int':'string' giving different namespace an array-mapable name
def __init__(self, name):
self.nodes = []
self.knownNodeTypes = ['variable', 'object', 'method', 'referencetype', \
'objecttype', 'variabletype', 'methodtype', \
'datatype', 'referencetype', 'aliases']
self.name = name
self.nodeids = {}
self.aliases = {}
self.namespaceIdentifiers = {}
self.__binaryIndirectPointers__ = []
def addNamespace(self, numericId, stringURL):
self.namespaceIdentifiers[numericId] = stringURL
def linkLater(self, pointer):
""" Called by nodes or references who have parsed an XML reference to a
node represented by a string.
No return value
XML String representations of references have the form 'i=xy' or
'ns=1;s="This unique Node"'. Since during the parsing of this attribute
only a subset of nodes are known/parsed, this reference string cannot be
linked when encountered.
References register themselves with the namespace to have their target
attribute (string) parsed by linkOpenPointers() when all nodes are
created, so that target can be dereferenced an point to an actual node.
"""
self.__linkLater__.append(pointer)
def getUnlinkedPointers(self):
""" Return the list of references registered for linking during the next call
of linkOpenPointers()
"""
return self.__linkLater__
def unlinkedItemCount(self):
""" Returns the number of unlinked references that will be processed during
the next call of linkOpenPointers()
"""
return len(self.__linkLater__)
def buildAliasList(self, xmlelement):
""" Parses the <Alias> XML Element present in must XML NodeSet definitions.
No return value
Contents the Alias element are stored in a dictionary for further
dereferencing during pointer linkage (see linkOpenPointer()).
"""
if not xmlelement.tagName == "Aliases":
logger.error("XMLElement passed is not an Aliaslist")
return
for al in xmlelement.childNodes:
if al.nodeType == al.ELEMENT_NODE:
if al.hasAttribute("Alias"):
aliasst = al.getAttribute("Alias")
if sys.version_info[0] < 3:
aliasnd = unicode(al.firstChild.data)
else:
aliasnd = al.firstChild.data
if not aliasst in self.aliases:
self.aliases[aliasst] = aliasnd
logger.debug("Added new alias \"" + str(aliasst) + "\" == \"" + str(aliasnd) + "\"")
else:
if self.aliases[aliasst] != aliasnd:
logger.error("Alias definitions for " + aliasst + " differ. Have " + self.aliases[aliasst] + " but XML defines " + aliasnd + ". Keeping current definition.")
def getNodeByBrowseName(self, idstring):
""" Returns the first node in the nodelist whose browseName matches idstring.
"""
matches = []
for n in self.nodes:
if idstring==str(n.browseName()):
matches.append(n)
if len(matches) > 1:
logger.error("Found multiple nodes with same ID!?")
if len(matches) == 0:
return None
else:
return matches[0]
def getNodeByIDString(self, idstring):
""" Returns the first node in the nodelist whose id string representation
matches idstring.
"""
matches = []
for n in self.nodes:
if idstring==str(n.id()):
matches.append(n)
if len(matches) > 1:
logger.error("Found multiple nodes with same ID!?")
if len(matches) == 0:
return None
else:
return matches[0]
def createNode(self, ndtype, xmlelement):
""" createNode is instantiates a node described by xmlelement, its type being
defined by the string ndtype.
No return value
If the xmlelement is an <Alias>, the contents will be parsed and stored
for later dereferencing during pointer linking (see linkOpenPointers).
Recognized types are:
* UAVariable
* UAObject
* UAMethod
* UAView
* UAVariableType
* UAObjectType
* UAMethodType
* UAReferenceType
* UADataType
For every recognized type, an appropriate node class is added to the node
list of the namespace. The NodeId of the given node is created and parsing
of the node attributes and elements is delegated to the parseXML() and
parseXMLSubType() functions of the instantiated class.
If the NodeID attribute is non-unique in the node list, the creation is
deferred and an error is logged.
"""
if not isinstance(xmlelement, dom.Element):
logger.error( "Error: Can not create node from invalid XMLElement")
return
# An ID is mandatory for everything but aliases!
id = None
for idname in ['NodeId', 'NodeID', 'nodeid']:
if xmlelement.hasAttribute(idname):
id = xmlelement.getAttribute(idname)
if ndtype == 'aliases':
self.buildAliasList(xmlelement)
return
elif id == None:
logger.info( "Error: XMLElement has no id, node will not be created!")
return
else:
id = opcua_node_id_t(id)
if str(id) in self.nodeids:
# Normal behavior: Do not allow duplicates, first one wins
#logger.error( "XMLElement with duplicate ID " + str(id) + " found, node will not be created!")
#return
# Open62541 behavior for header generation: Replace the duplicate with the new node
logger.info( "XMLElement with duplicate ID " + str(id) + " found, node will be replaced!")
nd = self.getNodeByIDString(str(id))
self.nodes.remove(nd)
self.nodeids.pop(str(nd.id()))
node = None
if (ndtype == 'variable'):
node = opcua_node_variable_t(id, self)
elif (ndtype == 'object'):
node = opcua_node_object_t(id, self)
elif (ndtype == 'method'):
node = opcua_node_method_t(id, self)
elif (ndtype == 'objecttype'):
node = opcua_node_objectType_t(id, self)
elif (ndtype == 'variabletype'):
node = opcua_node_variableType_t(id, self)
elif (ndtype == 'methodtype'):
node = opcua_node_methodType_t(id, self)
elif (ndtype == 'datatype'):
node = opcua_node_dataType_t(id, self)
elif (ndtype == 'referencetype'):
node = opcua_node_referenceType_t(id, self)
else:
logger.error( "No node constructor for type " + ndtype)
if node != None:
node.parseXML(xmlelement)
self.nodes.append(node)
self.nodeids[str(node.id())] = node
def removeNodeById(self, nodeId):
nd = self.getNodeByIDString(nodeId)
if nd == None:
return False
logger.debug("Removing nodeId " + str(nodeId))
self.nodes.remove(nd)
if nd.getInverseReferences() != None:
for ref in nd.getInverseReferences():
src = ref.target();
src.removeReferenceToNode(nd)
return True
def registerBinaryIndirectPointer(self, node):
""" Appends a node to the list of nodes that should be contained in the
first 765 bytes (255 pointer slots a 3 bytes) in the binary
representation (indirect referencing space).
This function is reserved for references and dataType pointers.
"""
if not node in self.__binaryIndirectPointers__:
self.__binaryIndirectPointers__.append(node)
return self.__binaryIndirectPointers__.index(node)
def getBinaryIndirectPointerIndex(self, node):
""" Returns the slot/index of a pointer in the indirect referencing space
(first 765 Bytes) of the binary representation.
"""
if not node in self.__binaryIndirectPointers__:
return -1
return self.__binaryIndirectPointers__.index(node)
def parseXML(self, xmldoc):
""" Reads an XML Namespace definition and instantiates node.
No return value
parseXML open the file xmldoc using xml.dom.minidom and searches for
the first UANodeSet Element. For every Element encountered, createNode
is called to instantiate a node of the appropriate type.
"""
typedict = {}
UANodeSet = dom.parse(xmldoc).getElementsByTagName("UANodeSet")
if len(UANodeSet) == 0:
logger.error( "Error: No NodeSets found")
return
if len(UANodeSet) != 1:
logger.error( "Error: Found more than 1 Nodeset in XML File")
UANodeSet = UANodeSet[0]
for nd in UANodeSet.childNodes:
if nd.nodeType != nd.ELEMENT_NODE:
continue
ndType = nd.tagName.lower()
if ndType[:2] == "ua":
ndType = ndType[2:]
elif not ndType in self.knownNodeTypes:
logger.warn("XML Element or NodeType " + ndType + " is unknown and will be ignored")
continue
if not ndType in typedict:
typedict[ndType] = 1
else:
typedict[ndType] = typedict[ndType] + 1
self.createNode(ndType, nd)
logger.debug("Currently " + str(len(self.nodes)) + " nodes in address space. Type distribution for this run was: " + str(typedict))
def linkOpenPointers(self):
""" Substitutes symbolic NodeIds in references for actual node instances.
No return value
References that have registered themselves with linkLater() to have
their symbolic NodeId targets ("ns=2;i=32") substituted for an actual
node will be iterated by this function. For each reference encountered
in the list of unlinked/open references, the target string will be
evaluated and searched for in the node list of this namespace. If found,
the target attribute of the reference will be substituted for the
found node.
If a reference fails to get linked, it will remain in the list of
unlinked references. The individual items in this list can be
retrieved using getUnlinkedPointers().
"""
linked = []
logger.debug( str(self.unlinkedItemCount()) + " pointers need to get linked.")
for l in self.__linkLater__:
targetLinked = False
if not l.target() == None and not isinstance(l.target(), opcua_node_t):
if isinstance(l.target(),str) or isinstance(l.target(),unicode):
# If is not a node ID, it should be an alias. Try replacing it
# with a proper node ID
if l.target() in self.aliases:
l.target(self.aliases[l.target()])
# If the link is a node ID, try to find it hopening that no ass has
# defined more than one kind of id for that sucker
if l.target()[:2] == "i=" or l.target()[:2] == "g=" or \
l.target()[:2] == "b=" or l.target()[:2] == "s=" or \
l.target()[:3] == "ns=" :
tgt = self.getNodeByIDString(str(l.target()))
if tgt == None:
logger.error("Failed to link pointer to target (node not found) " + l.target())
else:
l.target(tgt)
targetLinked = True
else:
logger.error("Failed to link pointer to target (target not Alias or Node) " + l.target())
else:
logger.error("Failed to link pointer to target (don't know dummy type + " + str(type(l.target())) + " +) " + str(l.target()))
else:
logger.error("Pointer has null target: " + str(l))
referenceLinked = False
if not l.referenceType() == None:
if l.referenceType() in self.aliases:
l.referenceType(self.aliases[l.referenceType()])
tgt = self.getNodeByIDString(str(l.referenceType()))
if tgt == None:
logger.error("Failed to link reference type to target (node not found) " + l.referenceType())
else:
l.referenceType(tgt)
referenceLinked = True
else:
referenceLinked = True
if referenceLinked == True and targetLinked == True:
linked.append(l)
# References marked as "not forward" must be inverted (removed from source node, assigned to target node and relinked)
logger.warn("Inverting reference direction for all references with isForward==False attribute (is this correct!?)")
for n in self.nodes:
for r in n.getReferences():
if r.isForward() == False:
tgt = r.target()
if isinstance(tgt, opcua_node_t):
nref = opcua_referencePointer_t(n, parentNode=tgt)
nref.referenceType(r.referenceType())
tgt.addReference(nref)
# Create inverse references for all nodes
logger.debug("Updating all referencedBy fields in nodes for inverse lookups.")
for n in self.nodes:
n.updateInverseReferences()
for l in linked:
self.__linkLater__.remove(l)
if len(self.__linkLater__) != 0:
logger.warn(str(len(self.__linkLater__)) + " could not be linked.")
def sanitize(self):
remove = []
logger.debug("Sanitizing nodes and references...")
for n in self.nodes:
if n.sanitize() == False:
remove.append(n)
if not len(remove) == 0:
logger.warn(str(len(remove)) + " nodes will be removed because they failed sanitation.")
# FIXME: Some variable ns=0 nodes fail because they don't have DataType fields...
# How should this be handles!?
logger.warn("Not actually removing nodes... it's unclear if this is valid or not")
def getRoot(self):
""" Returns the first node instance with the browseName "Root".
"""
return self.getNodeByBrowseName("Root")
def buildEncodingRules(self):
""" Calls buildEncoding() for all DataType nodes (opcua_node_dataType_t).
No return value
"""
stat = {True: 0, False: 0}
for n in self.nodes:
if isinstance(n, opcua_node_dataType_t):
n.buildEncoding()
stat[n.isEncodable()] = stat[n.isEncodable()] + 1
logger.debug("Type definitions built/passed: " + str(stat))
def allocateVariables(self):
for n in self.nodes:
if isinstance(n, opcua_node_variable_t):
n.allocateValue()
def printDot(self, filename="namespace.dot"):
""" Outputs a graphiz/dot description of all nodes in the namespace.
Output will written into filename to be parsed by dot/neato...
Note that for namespaces with more then 20 nodes the reference structure
will lead to a mostly illegible and huge graph. Use printDotGraphWalk()
for plotting specific portions of a large namespace.
"""
file=open(filename, 'w+')
file.write("digraph ns {\n")
for n in self.nodes:
file.write(n.printDot())
file.write("}\n")
file.close()
def getSubTypesOf(self, tdNodes = None, currentNode = None, hasSubtypeRefNode = None):
# If this is a toplevel call, collect the following information as defaults
if tdNodes == None:
tdNodes = []
if currentNode == None:
currentNode = self.getNodeByBrowseName("HasTypeDefinition")
tdNodes.append(currentNode)
if len(tdNodes) < 1:
return []
if hasSubtypeRefNode == None:
hasSubtypeRefNode = self.getNodeByBrowseName("HasSubtype")
if hasSubtypeRefNode == None:
return tdNodes
# collect all subtypes of this node
for ref in currentNode.getReferences():
if ref.isForward() and ref.referenceType().id() == hasSubtypeRefNode.id():
tdNodes.append(ref.target())
self.getTypeDefinitionNodes(tdNodes=tdNodes, currentNode = ref.target(), hasSubtypeRefNode=hasSubtypeRefNode)
return tdNodes
def printDotGraphWalk(self, depth=1, filename="out.dot", rootNode=None, followInverse = False, excludeNodeIds=[]):
""" Outputs a graphiz/dot description the nodes centered around rootNode.
References beginning from rootNode will be followed for depth steps. If
"followInverse = True" is passed, then inverse (not Forward) references
will also be followed.
Nodes can be excluded from the graph by passing a list of NodeIds as
string representation using excludeNodeIds (ex ["i=53", "ns=2;i=453"]).
Output is written into filename to be parsed by dot/neato/srfp...
"""
iter = depth
processed = []
if rootNode == None or \
not isinstance(rootNode, opcua_node_t) or \
not rootNode in self.nodes:
root = self.getRoot()
else:
root = rootNode
file=open(filename, 'w+')
if root == None:
return
file.write("digraph ns {\n")
file.write(root.printDot())
refs=[]
if followInverse == True:
refs = root.getReferences(); # + root.getInverseReferences()
else:
for ref in root.getReferences():
if ref.isForward():
refs.append(ref)
while iter > 0:
tmp = []
for ref in refs:
if isinstance(ref.target(), opcua_node_t):
tgt = ref.target()
if not str(tgt.id()) in excludeNodeIds:
if not tgt in processed:
file.write(tgt.printDot())
processed.append(tgt)
if ref.isForward() == False and followInverse == True:
tmp = tmp + tgt.getReferences(); # + tgt.getInverseReferences()
elif ref.isForward() == True :
tmp = tmp + tgt.getReferences();
refs = tmp
iter = iter - 1
file.write("}\n")
file.close()
def __reorder_getMinWeightNode__(self, nmatrix):
rcind = -1
rind = -1
minweight = -1
minweightnd = None
for row in nmatrix:
rcind += 1
if row[0] == None:
continue
w = sum(row[1:])
if minweight < 0:
rind = rcind
minweight = w
minweightnd = row[0]
elif w < minweight:
rind = rcind
minweight = w
minweightnd = row[0]
return (rind, minweightnd, minweight)
def reorderNodesMinDependencies(self):
# create a matrix represtantion of all node
#
nmatrix = []
for n in range(0,len(self.nodes)):
nmatrix.append([None] + [0]*len(self.nodes))
typeRefs = []
tn = self.getNodeByBrowseName("HasTypeDefinition")
if tn != None:
typeRefs.append(tn)
typeRefs = typeRefs + self.getSubTypesOf(currentNode=tn)
subTypeRefs = []
tn = self.getNodeByBrowseName("HasSubtype")
if tn != None:
subTypeRefs.append(tn)
subTypeRefs = subTypeRefs + self.getSubTypesOf(currentNode=tn)
logger.debug("Building connectivity matrix for node order optimization.")
# Set column 0 to contain the node
for node in self.nodes:
nind = self.nodes.index(node)
nmatrix[nind][0] = node
# Determine the dependencies of all nodes
for node in self.nodes:
nind = self.nodes.index(node)
#print "Examining node " + str(nind) + " " + str(node)
for ref in node.getReferences():
if isinstance(ref.target(), opcua_node_t):
tind = self.nodes.index(ref.target())
# Typedefinition of this node has precedence over this node
if ref.referenceType() in typeRefs and ref.isForward():
nmatrix[nind][tind+1] += 1
# isSubTypeOf/typeDefinition of this node has precedence over this node
elif ref.referenceType() in subTypeRefs and not ref.isForward():
nmatrix[nind][tind+1] += 1
# Else the target depends on us
elif ref.isForward():
nmatrix[tind][nind+1] += 1
logger.debug("Using Djikstra topological sorting to determine printing order.")
reorder = []
while len(reorder) < len(self.nodes):
(nind, node, w) = self.__reorder_getMinWeightNode__(nmatrix)
#print str(100*float(len(reorder))/len(self.nodes)) + "% " + str(w) + " " + str(node) + " " + str(node.browseName())
reorder.append(node)
for ref in node.getReferences():
if isinstance(ref.target(), opcua_node_t):
tind = self.nodes.index(ref.target())
if ref.referenceType() in typeRefs and ref.isForward():
nmatrix[nind][tind+1] -= 1
elif ref.referenceType() in subTypeRefs and not ref.isForward():
nmatrix[nind][tind+1] -= 1
elif ref.isForward():
nmatrix[tind][nind+1] -= 1
nmatrix[nind][0] = None
self.nodes = reorder
logger.debug("Nodes reordered.")
return
###
### Testing
###
class testing:
def __init__(self):
self.namespace = opcua_namespace("testing")
logger.debug("Phase 1: Reading XML file nodessets")
self.namespace.parseXML("Opc.Ua.NodeSet2.xml")
#self.namespace.parseXML("Opc.Ua.NodeSet2.Part4.xml")
#self.namespace.parseXML("Opc.Ua.NodeSet2.Part5.xml")
#self.namespace.parseXML("Opc.Ua.SimulationNodeSet2.xml")
logger.debug("Phase 2: Linking address space references and datatypes")
self.namespace.linkOpenPointers()
self.namespace.sanitize()
logger.debug("Phase 3: Comprehending DataType encoding rules")
self.namespace.buildEncodingRules()
logger.debug("Phase 4: Allocating variable value data")
self.namespace.allocateVariables()
bin = self.namespace.buildBinary()
f = open("binary.base64","w+")
f.write(bin.encode("base64"))
f.close()
allnodes = self.namespace.nodes;
ns = [self.namespace.getRoot()]
i = 0
#print "Starting depth search on " + str(len(allnodes)) + " nodes starting with from " + str(ns)
while (len(ns) < len(allnodes)):
i = i + 1;
tmp = [];
print("Iteration: " + str(i))
for n in ns:
tmp.append(n)
for r in n.getReferences():
if (not r.target() in tmp):
tmp.append(r.target())
print("...tmp, " + str(len(tmp)) + " nodes discovered")
ns = []
for n in tmp:
ns.append(n)
print("...done, " + str(len(ns)) + " nodes discovered")
logger.debug("Phase 5: Printing pretty graph")
self.namespace.printDotGraphWalk(depth=1, rootNode=self.namespace.getNodeByIDString("i=84"), followInverse=False, excludeNodeIds=["i=29", "i=22", "i=25"])
#self.namespace.printDot()
|
|
# This file is part of the ISIS IBEX application.
# Copyright (C) 2012-2016 Science & Technology Facilities Council.
# All rights reserved.
#
# This program is distributed in the hope that it will be useful.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution.
# EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM
# AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details.
#
# You should have received a copy of the Eclipse Public License v1.0
# along with this program; if not, you can obtain a copy from
# https://www.eclipse.org/org/documents/epl-v10.php or
# http://opensource.org/licenses/eclipse-1.0.php
from time import sleep, time
from typing import List
from BlockServer.epics.procserv_utils import ProcServWrapper
from BlockServer.alarm.load_alarm_config import AlarmConfigLoader
from server_common.utilities import print_and_log
from server_common.constants import IOCS_NOT_TO_STOP
class IocControl:
"""A class for starting, stopping and restarting IOCs"""
def __init__(self, prefix):
"""Constructor.
Args:
prefix (string): The PV prefix for the instrument
"""
self._proc = ProcServWrapper(prefix)
def start_ioc(self, ioc: str, restart_alarm_server: bool = True):
"""Start an IOC.
Args:
ioc (string): The name of the IOC
restart_alarm_server (bool): whether to also restart the alarm server
"""
try:
self._proc.start_ioc(ioc)
if ioc != "ALARM" and restart_alarm_server:
AlarmConfigLoader.restart_alarm_server(self)
except Exception as err:
print_and_log(f"Could not start IOC {ioc}: {err}", "MAJOR")
def restart_ioc(self, ioc: str, force: bool = False, restart_alarm_server: bool = True):
"""Restart an IOC.
Note: restarting an IOC automatically sets the IOC to auto-restart, so it is neccessary to reapply the
previous auto-restart setting
Args:
ioc (string): The name of the IOC
force (bool): Force it to restart even if it is an IOC not to stop
restart_alarm_server (bool): whether to also restart the alarm server
"""
# Check it is okay to stop it
if not force and ioc.startswith(IOCS_NOT_TO_STOP):
return
try:
self._proc.restart_ioc(ioc)
if ioc != "ALARM" and restart_alarm_server:
AlarmConfigLoader.restart_alarm_server(self)
except Exception as err:
print_and_log(f"Could not restart IOC {ioc}: {err}", "MAJOR")
def stop_ioc(self, ioc: str, force: bool = False):
"""Stop an IOC.
Args:
ioc (string): The name of the IOC
force (bool): Force it to stop even if it is an IOC not to stop
"""
# Check it is okay to stop it
if not force and ioc.startswith(IOCS_NOT_TO_STOP):
return
try:
self._proc.stop_ioc(ioc)
if ioc != "ALARM":
AlarmConfigLoader.restart_alarm_server(self)
except Exception as err:
print_and_log(f"Could not stop IOC {ioc}: {err}", "MAJOR")
def get_ioc_status(self, ioc: str):
"""Get the running status of an IOC.
Args:
ioc (string): The name of the IOC
Returns:
string : The status of the IOC (RUNNING or SHUTDOWN)
"""
return self._proc.get_ioc_status(ioc)
def ioc_restart_pending(self, ioc: str):
"""Tests if the IOC has a pending restart
Args:
ioc (string): The name of the IOC
Returns:
bool : Whether a restart is pending
"""
return self._proc.ioc_restart_pending(ioc)
def start_iocs(self, iocs: List[str]):
""" Start a number of IOCs.
Args:
iocs (list): The IOCs to start
"""
for ioc in iocs:
self.start_ioc(ioc)
def restart_iocs(self, iocs: List[str], reapply_auto: bool = False):
""" Restart a number of IOCs.
Args:
iocs (list): The IOCs to restart
reapply_auto (bool): Whether to reapply auto restart settings automatically
"""
auto = dict()
for ioc in iocs:
auto[ioc] = self.get_autorestart(ioc)
self.restart_ioc(ioc)
# Reapply auto-restart settings
if reapply_auto:
for ioc in iocs:
self.waitfor_running(ioc)
self.set_autorestart(ioc, auto[ioc])
def stop_iocs(self, iocs: List[str]):
""" Stop a number of IOCs.
Args:
iocs (list): The IOCs to stop
"""
for ioc in iocs:
self.stop_ioc(ioc)
def ioc_exists(self, ioc: str) -> bool:
"""Checks an IOC exists.
Args:
ioc (string): The name of the IOC
Returns:
bool : Whether the IOC exists
"""
try:
self.get_ioc_status(ioc)
return True
except:
return False
def set_autorestart(self, ioc: str, enable: bool):
"""Used to set the auto-restart property.
Args:
ioc (string): The name of the IOC
enable (bool): Whether to enable auto-restart
"""
try:
if self.get_ioc_status(ioc) == "RUNNING":
# Get current auto-restart status
curr = self._proc.get_autorestart(ioc)
if curr != enable:
# If different to requested then change it
self._proc.toggle_autorestart(ioc)
return
print_and_log(f"Auto-restart for IOC {ioc} unchanged as value has not changed")
else:
print_and_log(f"Auto-restart for IOC {ioc} unchanged as IOC is not running")
except Exception as err:
print_and_log(f"Could not set auto-restart IOC {ioc}: {err}", "MAJOR")
def get_autorestart(self, ioc: str) -> bool:
"""Gets the current auto-restart setting of the specified IOC.
Args:
ioc (string): The name of the IOC
Returns:
bool : Whether auto-restart is enabled
"""
try:
return self._proc.get_autorestart(ioc)
except Exception as err:
print_and_log(f"Could not get auto-restart setting for IOC {ioc}: {err}", "MAJOR")
def waitfor_running(self, ioc: str, timeout: int = 5):
"""Waits for the IOC to start running.
Args:
ioc (string): The name of the IOC
timeout(int, optional): Maximum time to wait before returning
"""
if self.ioc_exists(ioc):
start = time()
while self.ioc_restart_pending(ioc) or self.get_ioc_status(ioc) != "RUNNING":
sleep(0.5)
if time() - start >= timeout:
print_and_log(f"Gave up waiting for IOC {ioc} to be running", "MAJOR")
return
|
|
__name__ = "rfeed"
__version__ = (1, 0, 0)
__author__ = "Santiago L. Valdarrama - https://blog.svpino.com"
_generator = __name__ + " v" + ".".join(map(str, __version__))
_docs = "https://github.com/svpino/rfeed/blob/master/README.md"
import itertools
import sys
from xml.sax import saxutils
if sys.version_info[0] == 3:
basestring = str
from io import StringIO
else:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class Serializable:
""" Represents an object that can be serialized as part of the feed.
"""
def __init__(self):
""" Initializes the extension. In your implementation, make sure you always call this base class method
before adding your own code.
"""
self.handler = None
def publish(self, handler):
""" This method produces the XML representation of the object to be included in the feed. In your implementation,
make sure you always call this base class method before adding your own code.
Keyword arguments:
handler -- An xml.sax.saxutils.XMLGenerator instance that you can use to create the XML representation of the object.
"""
self.handler = handler
def _date(self, date):
""" Converts a datetime into an RFC 2822 formatted date.
Returns None if None is provided as an argument.
Keyword arguments:
date -- A datetime object in GMT format.
"""
# Alright, I admit it: this method looks hideous. The thing is that RFC 822 requires a specific format for dates, and strftime is
# locale dependent, so I can't use it to create the final date unless I force change the system locale.
#
# I looked into that (locale.setlocale, then restore), but I got the feeling that I was doing things that I was going to regret later.
# Maybe it's just me, but it doesn't feel right to force change the locale just to create a simple date.
#
# So, not having a better solution, I went ahead and used the original method from the PyRSS2Gen library.
if date is None:
return None
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][date.weekday()], date.day,
["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][date.month-1], date.year, date.hour, date.minute, date.second)
def _write_element(self, name, value, attributes = {}):
def parse_cdata(string):
cdata_begin = string.find("<![CDATA[")
if cdata_begin != -1:
cdata_end = string[cdata_begin:].find("]]>")
if cdata_end != -1:
return {"begin": cdata_begin,
"end": cdata_begin + cdata_end + 3}
else:
return None
else:
return None
if value is not None or attributes != {}:
self.handler.startElement(name, attributes)
if value is not None:
str_value = value if isinstance(value, basestring) else str(value)
while len(str_value):
cdata_section = parse_cdata(str_value)
if cdata_section is not None:
self.handler.characters(str_value[:cdata_section["begin"]])
self.handler.ignorableWhitespace(
str_value[cdata_section["begin"]:cdata_section["end"]])
str_value = str_value[cdata_section["end"]:]
else:
self.handler.characters(str_value)
break
self.handler.endElement(name)
class Extension(Serializable):
def get_namespace(self):
""" Returns the namespace (if any) for this extension. The namespace information is added as an attribute in
the <rss> element of the feed. The return value should be a dictionary.
For example, here is the code for this method on the iTunes extension: return {"xmlns:itunes": "http://www.itunes.com/dtds/podcast-1.0.dtd"}
"""
pass
class Host(Serializable):
""" Represents an object that can be host to other extensions.
"""
def __init__(self, extensions = None):
Serializable.__init__(self)
self.extensions = [] if extensions is None else extensions
def add_extension(self, extension):
""" You can use this method to add new extensions to the feed.
To create new extensions, make sure you inherit from the Serializable or Extension class.
"""
if not isinstance(extension, Serializable):
raise TypeError("The provided extension should be a subclass of the Serializable class")
self.extensions.append(extension)
class Category(Serializable):
""" A Category object specifies one or more categories that the channel or item belongs to.
More information at http://cyber.law.harvard.edu/rss/rss.html#ltcategorygtSubelementOfLtitemgt
"""
def __init__(self, category, domain = None):
""" Keyword arguments:
category -- The name of the category
domain -- Optional. A string that identifies a categorization taxonomy.
"""
Serializable.__init__(self)
if category is None: raise ElementRequiredError("category")
self.category = category
self.domain = domain
def publish(self, handler):
Serializable.publish(self, handler)
self._write_element("category", self.category, { "domain": self.domain } if self.domain is not None else {})
class Cloud(Serializable):
""" A Cloud object specifies a web service that supports the rssCloud interface which can be implemented in HTTP-POST, XML-RPC or SOAP 1.1.
More information at http://cyber.law.harvard.edu/rss/rss.html#ltcloudgtSubelementOfLtchannelgt
"""
def __init__(self, domain, port, path, registerProcedure, protocol):
""" Keyword arguments:
domain -- The domain name or IP address of the cloud.
port -- TCP port that the cloud is running on.
path -- The location of its responder.
registerProcedure -- The name of the procedure to call to request notification.
protocol -- Indication of which protocol is to be used.
"""
Serializable.__init__(self)
if domain is None: raise ElementRequiredError("domain")
if port is None: raise ElementRequiredError("port")
if path is None: raise ElementRequiredError("path")
if registerProcedure is None: raise ElementRequiredError("registerProcedure")
if protocol is None: raise ElementRequiredError("protocol")
self.domain = domain
self.port = port
self.path = path
self.registerProcedure = registerProcedure
self.protocol = protocol
def publish(self, handler):
Serializable.publish(self, handler)
self._write_element("cloud", None, { "domain": self.domain, "port": str(self.port), "path": self.path, "registerProcedure": self.registerProcedure, "protocol": self.protocol })
class Image(Serializable):
""" An Image object specifies a GIF, JPEG or PNG image that can be displayed with the channel.
More information at http://cyber.law.harvard.edu/rss/rss.html#ltimagegtSubelementOfLtchannelgt
"""
def __init__(self, url, title, link, width = None, height = None, description = None):
""" Keyword arguments:
url -- The URL of the image that represents the channel.
title -- Describes the image. It's used in the ALT attribute of the HTML <img> tag when the channel is rendered in HTML.
link -- The URL of the site. When the channel is rendered the image is a link to the site.
width -- Optional. The width of the image in pixels.
height -- Optional. The height of the image in pixels.
description -- Optional. Contains text that is included in the TITLE attribute of the link formed around the image in the HTML rendering.
"""
Serializable.__init__(self)
if url is None: raise ElementRequiredError("url")
if title is None: raise ElementRequiredError("title")
if link is None: raise ElementRequiredError("link")
self.url = url
self.title = title
self.link = link
self.width = width
self.height = height
self.description = description
def publish(self, handler):
Serializable.publish(self, handler)
self.handler.startElement("image", {})
self._write_element("url", self.url)
self._write_element("title", self.title)
self._write_element("link", self.link)
self._write_element("width", self.width)
self._write_element("height", self.height)
self._write_element("description", self.description)
self.handler.endElement("image")
class TextInput(Serializable):
""" A TextInput object specifies a text input box that can be displayed with the channel.
More information at http://cyber.law.harvard.edu/rss/rss.html#lttextinputgtSubelementOfLtchannelgt
"""
def __init__(self, title, description, name, link):
""" Keyword arguments:
title -- The label of the submit button in the text input area.
description -- Explains the text input area.
name -- The name of the text object in the text input area.
link -- The URL of the CGI script that processes text input requests.
"""
Serializable.__init__(self)
if title is None: raise ElementRequiredError("title")
if description is None: raise ElementRequiredError("description")
if name is None: raise ElementRequiredError("name")
if link is None: raise ElementRequiredError("link")
self.title = title
self.description = description
self.name = name
self.link = link
def publish(self, handler):
Serializable.publish(self, handler)
self.handler.startElement("textInput", {})
self._write_element("title", self.title)
self._write_element("description", self.description)
self._write_element("name", self.name)
self._write_element("link", self.link)
self.handler.endElement("textInput")
class SkipHours(Serializable):
""" A SkipHours object is a hint for aggregators telling them which hours they can skip.
More information at http://cyber.law.harvard.edu/rss/skipHoursDays.html#skiphours
"""
def __init__(self, hours):
""" Keyword arguments:
hours -- A list containing up to 24 values between 0 and 23, representing a time in GMT.
"""
Serializable.__init__(self)
if hours is None: raise ElementRequiredError("hours")
self.hours = hours
def publish(self, handler):
Serializable.publish(self, handler)
if self.hours:
self.handler.startElement("skipHours", {})
for hour in self.hours:
self._write_element("hour", hour)
self.handler.endElement("skipHours")
class SkipDays(Serializable):
""" A SkipDays object is a hint for aggregators telling them which days they can skip.
More information at http://cyber.law.harvard.edu/rss/skipHoursDays.html#skipdays
"""
def __init__(self, days):
""" Keyword arguments:
days -- A list containing up to 7 values. Possible values are Monday, Tuesday, Wednesday, Thursday, Friday, Saturday or Sunday.
"""
Serializable.__init__(self)
if days is None: raise ElementRequiredError("days")
self.days = days
def publish(self, handler):
Serializable.publish(self, handler)
if self.days:
self.handler.startElement("skipDays", {})
for day in self.days:
self._write_element("day", day)
self.handler.endElement("skipDays")
class Enclosure(Serializable):
""" An Enclosure object describes a media object that is attached to the item.
More information at http://cyber.law.harvard.edu/rss/rss.html#ltenclosuregtSubelementOfLtitemgt
"""
def __init__(self, url, length, type):
""" Keyword arguments:
url -- Indicates where the enclosure is located.
length -- Specifies how big the enclosure is in bytes.
type -- Specifies the standard MIME type of the enclosure.
"""
Serializable.__init__(self)
if url is None: raise ElementRequiredError("url")
if length is None: raise ElementRequiredError("length")
if type is None: raise ElementRequiredError("type")
self.url = url
self.length = length
self.type = type
def publish(self, handler):
Serializable.publish(self, handler)
self._write_element("enclosure", None, { "url": self.url, "length": str(self.length), "type": self.type })
class Guid(Serializable):
""" A Guid object represents a string that uniquely identifies the item.
More information at http://cyber.law.harvard.edu/rss/rss.html#ltguidgtSubelementOfLtitemgt
"""
def __init__(self, guid, isPermaLink = True):
""" Keyword arguments:
guid -- This is a string that uniquely identifies the item. When present, an aggregator may choose to use this string to determine if an item is new.
isPermaLink -- Indicates whether the guid is a url that points to the item.
"""
Serializable.__init__(self)
if guid is None: raise ElementRequiredError("guid")
self.guid = guid
self.isPermaLink = True if isPermaLink is None else isPermaLink
def publish(self, handler):
Serializable.publish(self, handler)
self._write_element("guid", self.guid, { "isPermaLink": "true" if self.isPermaLink else "false" })
class Source(Serializable):
""" A Source object represents the RSS channel that the item came from.
More information at http://cyber.law.harvard.edu/rss/rss.html#ltsourcegtSubelementOfLtitemgt
"""
def __init__(self, name, url):
""" Keyword arguments:
name -- The name of the RSS channel that the item came from.
url -- Links to the XMLization of the source.
"""
Serializable.__init__(self)
if name is None: raise ElementRequiredError("name")
if url is None: raise ElementRequiredError("url")
self.name = name
self.url = url
def publish(self, handler):
Serializable.publish(self, handler)
self._write_element("source", self.name, { "url": self.url })
class iTunesOwner(Serializable):
""" An iTunesOwner object contains contact information for the owner of the podcast intended to be used for administrative communication.
More information at https://www.apple.com/itunes/podcasts/specs.html#owner
"""
def __init__(self, name, email):
""" Keyword arguments
name -- The name of the owner.
email -- The email address of the owner.
"""
Serializable.__init__(self)
if name is None: raise ElementRequiredError("name")
if email is None: raise ElementRequiredError("email")
self.name = name
self.email = email
def publish(self, handler):
Serializable.publish(self, handler)
self.handler.startElement("itunes:owner", {})
self._write_element("itunes:name", self.name)
self._write_element("itunes:email", self.email)
self.handler.endElement("itunes:owner")
class iTunesCategory(Serializable):
""" An iTunesCategory object specified the browsing category of the feed.
More information at https://www.apple.com/itunes/podcasts/specs.html#category
"""
def __init__(self, name, subcategory = None):
""" Keyword arguments
name -- The name of the category
subcategory -- Optional. The name of the subcategory.
"""
Serializable.__init__(self)
if name is None: raise ElementRequiredError("name")
self.name = name
self.subcategory = subcategory
def publish(self, handler):
Serializable.publish(self, handler)
self.handler.startElement("itunes:category", { "text": self.name })
if self.subcategory is not None:
self._write_element("itunes:category", None, { "text": self.subcategory })
self.handler.endElement("itunes:category")
class iTunes(Extension):
""" Extension for iTunes metatags.
More information at https://www.apple.com/itunes/podcasts/specs.html
"""
def __init__(self, author = None, block = None, categories = None, image = None, explicit = None, complete = None, owner = None, subtitle = None,
summary = None, new_feed_url = None, type=None):
""" Keyword arguments:
author -- The author of the podcast. Visible under podcast title and in iTunes Store Browse.
block -- Whether the podcast should appear in the iTunes Store podcast directory.
categories -- The browsing categories for this podcast.
image -- The URL of the artwork of your podcast.
explicit -- Whether your podcast contains explicit material.
complete -- Whether your podcast has been completed and no further episodes will be posted in the future.
owner -- Contains contact information for the owner of the podcast.
subtitle -- A few words that represent the description of the podcast.
summary -- An extended summary of the podcast.
new_feed_url -- When changing the podcast RSS URL, this is the new URL where the podcast is located.
type -- The type of podcast.
"""
Extension.__init__(self)
self.author = author
self.block = True if (isinstance(block, basestring) and block.lower() == 'yes') else block
self.image = image
self.explicit = True if (isinstance(explicit, basestring) and explicit.lower() == 'yes') else explicit
self.complete = True if (isinstance(complete, basestring) and complete.lower() == 'yes') else complete
self.owner = owner
self.subtitle = subtitle
self.summary = summary
self.new_feed_url = new_feed_url
self.type = type
self.categories = [] if categories is None else categories
if isinstance(self.categories, iTunesCategory):
self.categories = [self.categories]
elif isinstance(self.categories, basestring):
self.categories = [iTunesCategory(self.categories)]
def get_namespace(self):
return {"xmlns:itunes": "http://www.itunes.com/dtds/podcast-1.0.dtd"}
def publish(self, handler):
Extension.publish(self, handler)
self._write_element("itunes:author", self.author)
if self.block is not None:
self._write_element("itunes:block", "yes" if self.block is True else "no")
if self.image is not None:
self._write_element("itunes:image", None, {"href" : self.image })
if self.explicit is not None:
self._write_element("itunes:explicit", "yes" if self.explicit is True else "clean")
if self.complete is not None:
self._write_element("itunes:complete", "yes" if self.complete is True else "no")
if self.owner is not None:
self.owner.publish(self.handler)
self._write_element("itunes:subtitle", self.subtitle)
self._write_element("itunes:summary", self.summary)
self._write_element("itunes:new-feed-url", self.new_feed_url)
self._write_element("itunes:type", self.type)
for category in self.categories:
if isinstance(category, basestring):
category = iTunesCategory(category)
category.publish(self.handler)
class iTunesItem(Serializable):
""" Extension for iTunes Item metatags.
More information at https://www.apple.com/itunes/podcasts/specs.html
"""
def __init__(self, author = None, block = None, image = None, duration = None, explicit = None, is_closed_captioned = None, order = None, subtitle = None, summary = None,
title=None, episode=None, episodeType=None, season=None):
""" Keyword arguments:
author -- The author of the episode.
block -- Whether the episode should appear in the iTunes Store podcast directory.
image -- The URL of the artwork of your podcast.
duration -- Specifies the duration of the podcast episode.
explicit -- Whether your episode contains explicit material.
is_closed_captioned -- Whether your episode has embedded closed captioning.
order -- Used to override the default ordering of episodes in the iTunes Store.
subtitle -- A few words that represent the description of the episode.
summary -- An extended summary of the episode.
title -- An episode title.
episode -- An episode number.
episodeType -- The episode type.
season -- The episode season number.
"""
Serializable.__init__(self)
self.author = author
self.block = True if (isinstance(block, basestring) and block.lower() == 'yes') else block
self.image = image
self.duration = duration
self.explicit = True if (isinstance(explicit, basestring) and explicit.lower() == 'yes') else explicit
self.is_closed_captioned = True if (isinstance(is_closed_captioned, basestring) and is_closed_captioned.lower() == 'yes') else is_closed_captioned
self.order = order
self.subtitle = subtitle
self.summary = summary
self.title = title
self.episode = episode
self.episodeType = episodeType
self.season = season
def publish(self, handler):
Serializable.publish(self, handler)
self._write_element("itunes:author", self.author)
if self.block is not None:
self._write_element("itunes:block", "yes" if self.block is True else "no")
if self.image is not None:
self._write_element("itunes:image", None, {"href" : self.image })
self._write_element("itunes:duration", self.duration)
if self.explicit is not None:
self._write_element("itunes:explicit", "yes" if self.explicit is True else "clean")
if self.is_closed_captioned is not None:
self._write_element("itunes:is_closed_captioned", "yes" if self.is_closed_captioned is True else "no")
if self.order is not None:
self._write_element("itunes:order", str(self.order))
self._write_element("itunes:subtitle", self.subtitle)
self._write_element("itunes:summary", self.summary)
self._write_element("itunes:title", self.title)
self._write_element("itunes:episode", self.episode)
self._write_element("itunes:episodeType", self.episodeType)
self._write_element("itunes:season", self.season)
class Item(Host):
""" An Item object may represent a "story" - much like a story in a newspaper or magazine; if so its description is a synopsis of the story, and the link points to the full story.
An item may also be complete in itself, if so, the description contains the text, and the link and title may be omitted. All elements of an item are optional, however at least one
of title or description must be present.
More information at http://cyber.law.harvard.edu/rss/rss.html#hrelementsOfLtitemgt
"""
def __init__(self, title = None, link = None, description = None, author = None,
creator = None, categories = None, comments = None, enclosure = None,
guid = None, pubDate = None, source = None, extensions = None):
""" Keyword arguments:
title -- Optional. The title of the item.
link -- Optional. The URL of the item.
description -- Optional. The item synopsis.
author -- Optional. Email address of the author of the item.
creator -- Optional. Identifies the person or entity who wrote an item.
categories -- Optional. Includes the item in one or more categories.
comments -- Optional. URL of a page for comments relating to the item.
enclosure -- Optional. Describes a media object that is attached to the item.
guid -- Optional. A string that uniquely identifies the item.
pubDate -- Optional. Indicates when the item was published.
source -- Optional. The RSS channel that the item came from.
extensions -- Optional. The list of extensions added to the item.
"""
Host.__init__(self, extensions)
if title is None and description is None:
raise ElementRequiredError("title", "description")
self.title = title
self.link = link
self.description = description
self.author = author
self.creator = creator
self.comments = comments
self.enclosure = enclosure
self.guid = guid
self.pubDate = pubDate
self.source = source
self.categories = [] if categories is None else categories
if isinstance(self.categories, Category):
self.categories = [self.categories]
elif isinstance(self.categories, basestring):
self.categories = [Category(self.categories)]
def publish(self, handler):
Serializable.publish(self, handler)
self.handler.startElement("item", {})
self._write_element("title", self.title)
self._write_element("link", self.link)
self._write_element("description", self.description)
self._write_element("author", self.author)
self._write_element("dc:creator", self.creator)
self._write_element("comments", self.comments)
self._write_element("pubDate", self._date(self.pubDate))
for category in self.categories:
if isinstance(category, basestring):
category = Category(category)
category.publish(self.handler)
if self.enclosure is not None:
self.enclosure.publish(self.handler)
if self.guid is not None:
self.guid.publish(self.handler)
if self.source is not None:
self.source.publish(self.handler)
for extension in self.extensions:
extension.publish(self.handler)
self.handler.endElement("item")
class Feed(Host):
def __init__(self, title, link, description, language = None, copyright = None, managingEditor = None, webMaster = None, pubDate = None,
lastBuildDate = None, categories = None, generator = None, docs = None, cloud = None, ttl = None, image = None, rating = None,
textInput = None, skipHours = None, skipDays = None, items = None, extensions = None):
""" Keyword arguments:
title -- The name of the channel.
link -- The URL to the HTML website corresponding to the channel.
description -- Phrase or sentence describing the channel.
language -- Optional. The language the channel is written in.
copyright -- Optional. Copyright notice for content in the channel.
managingEditor -- Optional. Email address for person responsible for editorial content.
webMaster -- Optional. Email address for person responsible for technical issues relating to channel.
pubDate -- Optional. The publication date for the content in the channel. This should be a datetime in GMT format.
lastBuildDate -- Optional. The last time the content of the channel changed. This should be a datetime in GMT format.
categories -- Optional. Specify one or more categories that the channel belongs to.
generator -- Optional. A string indicating the program used to generate the channel.
docs -- Optional. A URL that points to the documentation for the format used in the RSS file.
cloud -- Optional. Allows processes to register with a cloud to be notified of updates to the channel. This is a Cloud object.
ttl -- Optional. The number of minutes that indicates how long a channel can be cached before refreshing from the source. This should be an integer value.
image -- Optional. Specifies an image that can be displayed with the channel. This is an Image object.
rating -- Optional. The PICS rating for the channel. See http://www.w3.org/PICS/.
textInput -- Optional. Specifies a text input box that can be displayed with the channel.
skipHours -- Optional. A hint for aggregators telling them which hours they can skip.
skipDays -- Optional. A hint for aggregators telling them which days they can skip.
items -- Optional. The list of items included in this channel.
extensions -- Optional. The list of extensions added to the feed.
"""
Host.__init__(self, extensions)
if title is None: raise ElementRequiredError("title")
if link is None: raise ElementRequiredError("link")
if description is None: raise ElementRequiredError("description")
self.title = title
self.link = link
self.description = description
self.language = language
self.copyright = copyright
self.managingEditor = managingEditor
self.webMaster = webMaster
self.pubDate = pubDate
self.lastBuildDate = lastBuildDate
self.generator = _generator if generator is None else generator
self.docs = _docs if docs is None else docs
self.cloud = cloud
self.ttl = ttl
self.image = image
self.rating = rating
self.textInput = textInput
self.skipHours = skipHours
self.skipDays = skipDays
self.categories = [] if categories is None else categories
if isinstance(self.categories, Category):
self.categories = [self.categories]
elif isinstance(self.categories, basestring):
self.categories = [Category(self.categories)]
self.items = [] if items is None else items
def rss(self):
output = StringIO()
handler = saxutils.XMLGenerator(output, 'UTF-8')
handler.startDocument()
handler.startElement("rss", self._get_attributes())
self.publish(handler)
handler.endElement("rss")
handler.endDocument()
return output.getvalue()
def publish(self, handler):
Serializable.publish(self, handler)
handler.startElement("channel", {})
self._write_element("title", self.title)
self._write_element("link", self.link)
self._write_element("description", self.description)
self._write_element("language", self.language)
self._write_element("copyright", self.copyright)
self._write_element("managingEditor", self.managingEditor)
self._write_element("webMaster", self.webMaster)
self._write_element("pubDate", self._date(self.pubDate))
self._write_element("lastBuildDate", self._date(self.lastBuildDate))
self._write_element("generator", self.generator)
self._write_element("docs", self.docs)
self._write_element("ttl", self.ttl)
self._write_element("rating", self.rating)
for category in self.categories:
if isinstance(category, basestring):
category = Category(category)
category.publish(self.handler)
if self.cloud is not None:
self.cloud.publish(self.handler)
if self.image is not None:
self.image.publish(self.handler)
if self.textInput is not None:
self.textInput.publish(self.handler)
if self.skipHours is not None:
self.skipHours.publish(self.handler)
if self.skipDays is not None:
self.skipDays.publish(self.handler)
for extension in self.extensions:
extension.publish(self.handler)
for item in self.items:
item.publish(self.handler)
handler.endElement("channel")
def _get_attributes(self):
attributes = {"version": "2.0", "xmlns:dc" : "http://purl.org/dc/elements/1.1/"}
for extension in self.extensions:
if isinstance(extension, Extension):
namespace = extension.get_namespace()
if namespace is not None:
attributes = dict(itertools.chain(attributes.items(), namespace.items()))
return attributes
class ElementRequiredError(Exception):
def __init__(self, element1, element2 = None):
self.element1 = element1
self.element2 = element2
def __str__(self):
if self.element2 is not None:
return 'Either "' + self.element1 + '" or "' + self.element2 + '" must be defined'
return '"' + self.element1 + '" must be defined'
|
|
import modules.utils as utils
import numpy as np
import cv2
import scipy
import keras
from modules.logging import logger
import modules.utils as utils
import random
import tensorflow as tf
import keras
from keras import models
from keras import layers
from keras.layers import convolutional
from keras.layers import core
CLASS_LABELS = ['0-adult_male', '1-subadult_male', '2-adult_female', '3-juvenile', '4-pup', '5-non lion']
#each index is a min/max color for a class mark
C_MIN = [
np.array([0, 0, 160]),
np.array([200, 0, 200]),
np.array([10, 40, 75]),
np.array([150, 40, 0]),
np.array([25, 140, 40])
]
C_MAX = [
np.array([50, 50, 255]),
np.array([255, 55, 255]),
np.array([20, 55, 130]),
np.array([255, 80, 40]),
np.array([50, 255, 65])
]
#adapted from alexnet
def convnet_alexnet_lion_keras(image_dims):
# model = Sequential()
# model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))
NR_CLASSES = 6
input = layers.Input(shape=image_dims, name="Input")
conv_1 = convolutional.Convolution2D(96, 11, 11, border_mode='valid', name="conv_1", activation='relu', init='glorot_uniform')(input)
pool_1 = convolutional.MaxPooling2D(pool_size=(3, 3), name="pool_1")(conv_1)
zero_padding_1 = convolutional.ZeroPadding2D(padding=(1, 1), name="zero_padding_1")(pool_1)
conv_2 = convolutional.Convolution2D(256, 3, 3, border_mode='valid', name="conv_2", activation='relu', init='glorot_uniform')(zero_padding_1)
pool_2 = convolutional.MaxPooling2D(pool_size=(3, 3), name="pool_2")(conv_2)
zero_padding_2 = keras.layers.convolutional.ZeroPadding2D(padding=(1, 1), name="zero_padding_2")(pool_2)
conv_3 = convolutional.Convolution2D(384, 3, 3, border_mode='valid', name="conv_3", activation='relu', init='glorot_uniform')(zero_padding_2)
conv_4 = convolutional.Convolution2D(384, 3, 3, border_mode='valid', name="conv_4", activation='relu', init='glorot_uniform')(conv_3)
conv_5 = convolutional.Convolution2D(256, 3, 3, border_mode='valid', name="conv_5", activation='relu', init='glorot_uniform')(conv_4)
pool_3 = convolutional.MaxPooling2D(pool_size=(3, 3), name="pool_3")(conv_5)
flatten = core.Flatten(name="flatten")(pool_3)
fc_1 = core.Dense(4096, name="fc_1", activation='relu', init='glorot_uniform')(flatten)
fc_1 = core.Dropout(0.5, name="fc_1_dropout")(fc_1)
output = core.Dense(4096, name="Output", activation='relu', init='glorot_uniform')(fc_1)
output = core.Dropout(0.5, name="Output_dropout")(output)
fc_2 = core.Dense(NR_CLASSES, name="fc_2", activation='softmax', init='glorot_uniform')(output)
return models.Model([input], [fc_2])
def convnet_medium1_lion_keras(image_dims):
model = keras.models.Sequential()
model.add(core.Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))
model.add(convolutional.Conv2D(64, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(128, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(256, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(core.Flatten())
model.add(core.Dense(1024, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(1024, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(6, activation='softmax', init='glorot_uniform'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
return model
def convnet_medium2_lion_keras(image_dims):
model = keras.models.Sequential()
model.add(core.Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))
model.add(convolutional.Conv2D(128, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(256, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.Conv2D(256, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(core.Flatten())
model.add(core.Dense(1024, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(2048, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(6, activation='softmax', init='glorot_uniform'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
return model
def convnet_medium3_lion_keras(image_dims):
model = keras.models.Sequential()
model.add(core.Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))
model.add(convolutional.Conv2D(128, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(64, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.Conv2D(256, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(core.Flatten())
model.add(core.Dense(2048, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(2048, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(6, activation='softmax', init='glorot_uniform'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
return model
#don't change. there are already good train for this net (72% acc)
def convnet_simple_lion_keras(image_dims):
model = keras.models.Sequential()
model.add(core.Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))
model.add(convolutional.Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(core.Flatten())
model.add(core.Dense(512, activation='relu'))
model.add(core.Dropout(0.5))
model.add(core.Dense(1024, activation='relu'))
model.add(core.Dropout(0.5))
model.add(core.Dense(6, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
return model
def convnet_medium1_boolean(image_dims):
model = keras.models.Sequential()
model.add(core.Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))
model.add(convolutional.Conv2D(64, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(128, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(256, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(core.Flatten())
model.add(core.Dense(1024, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(1024, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
return model
#adapted from alexnet
def convnet_alexnet_lion_tflearn(image_dims):
#image augmentation
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_flip_updown()
img_aug.add_random_rotation(max_angle=360.)
img_aug.add_random_blur(sigma_max=5.)
#image pre-processing
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
#AlexNet
network = layers.core.input_data(shape=[None, image_dims[0], image_dims[1], image_dims[2]], dtype=tf.float32, data_preprocessing=img_prep, data_augmentation=img_aug)
network = layers.conv.conv_2d(network, 96, 11, strides=4, activation='relu')
network = layers.conv.max_pool_2d(network, 3, strides=2)
network = layers.normalization.local_response_normalization(network)
network = layers.conv.conv_2d(network, 256, 5, activation='relu')
network = layers.conv.max_pool_2d(network, 3, strides=2)
network = layers.normalization.local_response_normalization(network)
network = layers.conv.conv_2d(network, 384, 3, activation='relu')
network = layers.conv.conv_2d(network, 384, 3, activation='relu')
network = layers.conv.conv_2d(network, 256, 3, activation='relu')
network = layers.conv.max_pool_2d(network, 3, strides=2)
network = layers.normalization.local_response_normalization(network)
network = layers.core.fully_connected(network, 4096, activation='tanh')
network = layers.core.dropout(network, 0.5)
network = layers.core.fully_connected(network, 4096, activation='tanh')
network = layers.core.dropout(network, 0.5)
network = layers.core.fully_connected(network, 5, activation='softmax')
network = layers.estimator.regression(network, optimizer='momentum',
loss='categorical_crossentropy', learning_rate=0.001)
return network
def find_class(image, point):
image = image[point[1]-3:point[1]+3,point[0]-3:point[0]+3]
result = -1
max = 0
for col in range(5):
cmsk = cv2.inRange(image, C_MIN[col], C_MAX[col])
sm = np.sum(cmsk)
if(sm!=None and sm>max):
max = sm
result = col
return result
def export_lions(image_raw, image_dotted, target_x_ds, target_y_ds, image_dims, debug=False, min_distance_others=50, non_lion_distance=150, export_non_lion=True):
NR_CLASSES = 6
#BLACKOUT PORTIONS OF IMAGE IN RAW PICTURE
image_dotted_bw = cv2.cvtColor(image_dotted, cv2.COLOR_BGR2GRAY)
#utils.show_image(image_dotted_bw, size=8)
mask = cv2.threshold(image_dotted_bw, 5, 255, cv2.THRESH_BINARY)[1]
#utils.show_image(mask, size=8)
image_raw_bw = cv2.cvtColor(image_raw, cv2.COLOR_BGR2GRAY)
image_raw = cv2.bitwise_and(image_raw, image_raw, mask=mask)
#utils.show_image(image_raw, size=8, is_bgr=True)
#ISOLATE HUMAN MARKS ON DOTTED PICTURE
diff_color = cv2.absdiff(image_dotted, image_raw)
diff = cv2.cvtColor(diff_color, cv2.COLOR_BGR2GRAY)
kernel = np.ones((2,2),np.uint8)
diff = cv2.morphologyEx(diff, cv2.MORPH_OPEN, kernel)
ret,diff = cv2.threshold(diff,10,255,cv2.THRESH_TOZERO)
ret,diff = cv2.threshold(diff,0,255,cv2.THRESH_BINARY)
#debug data
debug_image = image_dotted.copy()
images = []
#find all dotted sea lions
count1 = 0
count_class = np.zeros(NR_CLASSES)
lion_positions = []
lion_classes = []
im2, contours, hierarchy = cv2.findContours(diff, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
x,y,w,h = cv2.boundingRect(c)
if(w>4 and h>4):
count1 = count1 + 1
center = (x+round(w/3),y+round(h/3))
clazz = find_class(image_dotted, center)
if(clazz==-1):
logger.warning('could not detect sea lion class at ' + str(center))
continue
lion_positions.append(center)
count_class[clazz] = count_class[clazz] + 1
lion_classes.append(clazz)
if(debug):
cv2.circle(debug_image,center,round(w/2),(255,0,0),1)
count_class_added = np.zeros(NR_CLASSES)
#add found sea lions to training dataset
#filter out lions that are too near each other to minimize noise on training set
count2 = 0
for i, lion_pos in enumerate(lion_positions):
lion_class = lion_classes[i]
is_far = True
if(min_distance_others>0):
is_far = utils.is_far_from_others(lion_pos, lion_positions, min_distance_others)
if(is_far):
#export patch to train dataset
count2 = count2 + 1
pw = round(image_dims[1]/2)
ph = image_dims[1] - pw
#trainX = image_raw[lion_pos[1]-pw:lion_pos[1]+ph,lion_pos[0]-pw:lion_pos[0]+ph]
trainX = utils.crop_image_fill(image_raw, (lion_pos[1]-pw,lion_pos[0]-pw), (lion_pos[1]+ph,lion_pos[0]+ph))
m = np.mean(trainX)
if(m>30 and m<225 and m!=127):
if(debug):
images.append(trainX)
cv2.circle(debug_image,lion_pos,round(w/2),(0,0,255),2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(debug_image,str(lion_class),lion_pos, font, 1.1,(255,255,255),2,cv2.LINE_AA)
#normalize between 0-1
#trainX = trainX/255
trainY = keras.utils.np_utils.to_categorical([lion_class], NR_CLASSES)[0]
if(target_x_ds!=None and target_y_ds!=None):
utils.add_sample_to_dataset(target_x_ds, target_y_ds, trainX, trainY)
count_class_added[lion_class] = count_class_added[lion_class] + 1
#identify non sea lion patches
count3 = 0
if(export_non_lion):
s = np.shape(image_raw)
for i in range(int(count2*1.1)):
patch_pos = (random.randint(image_dims[1]*2, s[1]-image_dims[1]*2), random.randint(image_dims[0]*2, s[0]-image_dims[0]*2))
is_far = utils.is_far_from_others(patch_pos, lion_positions, non_lion_distance)
if(is_far):
#export patch to train dataset
pw = round(image_dims[1]/2)
ph = image_dims[1] - pw
#trainX = image_raw[lion_pos[1]-pw:lion_pos[1]+ph,lion_pos[0]-pw:lion_pos[0]+ph]
trainX = utils.crop_image_fill(image_raw, (patch_pos[1]-pw,patch_pos[0]-pw), (patch_pos[1]+ph,patch_pos[0]+ph))
m = np.mean(trainX)
if(m>50 and m<200):
count3 = count3 + 1
if(debug):
images.append(trainX)
cv2.circle(debug_image,patch_pos,round(w/2),(0,255,0),3)
#normalize between 0-1
#trainX = trainX/255
trainY = keras.utils.np_utils.to_categorical([5], NR_CLASSES)[0]
if(target_x_ds!=None and target_y_ds!=None):
utils.add_sample_to_dataset(target_x_ds, target_y_ds, trainX, trainY)
count_class[5] = count_class[5] + 1
count_class_added[5] = count_class_added[5] + 1
logger.info('sea lions found: ' + str(count1))
logger.info('sea lions added to dataset: ' + str(count2))
logger.info('non sea lions added to dataset: ' + str(count3))
if(target_x_ds!=None and target_y_ds!=None):
logger.info('dataset size: ' + str(len(target_x_ds)))
if(debug):
utils.show_image(debug_image, size=40, is_bgr=True)
utils.show_images(images, cols=10, is_bgr=True, size=1.5)
return count_class, count_class_added, lion_positions, lion_classes
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from future.builtins import zip
from collections import defaultdict
from skbio.workflow import (Exists, NotExecuted, NotNone, Workflow, not_none,
requires, method)
from unittest import TestCase, main
def construct_iterator(**kwargs):
"""make an iterator for testing purposes"""
to_gen = []
for k in sorted(kwargs):
if k.startswith('iter'):
to_gen.append(kwargs[k])
if len(to_gen) == 1:
return (x for x in to_gen[0])
else:
return zip(*to_gen)
class MockWorkflow(Workflow):
def initialize_state(self, item):
self.state[0] = None
self.state[1] = item
@method(priority=90)
@requires(option='A', values=True)
def wf_groupA(self):
self.methodA1()
self.methodA2()
@method()
@requires(option='B', values=True)
def wf_groupB(self):
self.methodB1()
self.methodB2()
@method(priority=10)
@requires(option='C', values=True)
def wf_groupC(self):
self.methodC1()
self.methodC2()
def methodA1(self):
name = 'A1'
self.stats[name] += 1
if self.state[-1] == 'fail %s' % name:
self.failed = True
self.state = [name, self.state[-1]]
def methodA2(self):
name = 'A2'
self.stats[name] += 1
if self.state[-1] == 'fail %s' % name:
self.failed = True
self.state = [name, self.state[-1]]
def methodB1(self):
name = 'B1'
self.stats[name] += 1
if self.state[-1] == 'fail %s' % name:
self.failed = True
self.state = 'failed'
else:
self.state = [name, self.state[-1]]
@requires(option='foo', values=[1, 2, 3])
def methodB2(self):
name = 'B2'
self.stats[name] += 1
if self.state[-1] == 'fail %s' % name:
self.failed = True
self.state = 'failed'
else:
self.state = [name, self.state[-1]]
def methodC1(self):
name = 'C1'
self.stats[name] += 1
if self.state[-1] == 'fail %s' % name:
self.failed = True
self.state = [name, self.state[-1]]
@requires(option='C2', values=[1, 2, 3])
def methodC2(self):
name = 'C2'
self.stats[name] += 1
if self.state[-1] == 'fail %s' % name:
self.failed = True
self.state = [name, self.state[-1]]
class WorkflowTests(TestCase):
def setUp(self):
opts = {'A': True, 'C': True}
self.obj_short = MockWorkflow([None, None], options=opts,
stats=defaultdict(int))
self.obj_debug = MockWorkflow([None, None], debug=True, options=opts,
stats=defaultdict(int))
self.obj_noshort = MockWorkflow([None, None], short_circuit=False,
options=opts,
stats=defaultdict(int))
def test_debug_trace(self):
gen = construct_iterator(**{'iter_x': [1, 2, 3, 4, 5]})
obj = self.obj_debug(gen)
exp = ['C1', 1]
obs = next(obj)
self.assertEqual(obs, exp)
exp_trace = set([('wf_groupA', 0),
('methodA1', 1),
('methodA2', 2),
('wf_groupC', 3),
('methodC1', 4)])
exp_pre_state = {('wf_groupA', 0): [None, 1],
('methodA1', 1): [None, 1],
('methodA2', 2): ['A1', 1],
('wf_groupC', 3): ['A2', 1],
('methodC1', 4): ['A2', 1]}
exp_post_state = {('wf_groupA', 0): ['A2', 1],
('methodA1', 1): ['A1', 1],
('methodA2', 2): ['A2', 1],
('wf_groupC', 3): ['C1', 1],
('methodC1', 4): ['C1', 1]}
obs_trace = self.obj_debug.debug_trace
obs_pre_state = self.obj_debug.debug_pre_state
obs_post_state = self.obj_debug.debug_post_state
self.assertEqual(obs_trace, exp_trace)
self.assertEqual(obs_pre_state, exp_pre_state)
self.assertEqual(obs_post_state, exp_post_state)
def test_init(self):
self.assertEqual(self.obj_short.options, {'A': True, 'C': True})
self.assertEqual(self.obj_short.stats, {})
self.assertTrue(self.obj_short.short_circuit)
self.assertEqual(self.obj_noshort.options, {'A': True, 'C': True})
self.assertEqual(self.obj_noshort.stats, {})
self.assertFalse(self.obj_noshort.short_circuit)
def test_init_reserved_attributes(self):
with self.assertRaises(AttributeError):
Workflow('foo', failed=True)
def test_all_wf_methods(self):
# note on priority: groupA:90, groupC:10, groupB:0 (default)
exp = [self.obj_short.wf_groupA, self.obj_short.wf_groupC,
self.obj_short.wf_groupB]
obs = self.obj_short._all_wf_methods()
self.assertEqual(obs, exp)
def test_call_AC_no_fail(self):
iter_ = construct_iterator(**{'iter_x': [1, 2, 3, 4, 5]})
# success function
def sf(x):
return x.state[:]
exp_stats = {'A1': 5, 'A2': 5, 'C1': 5}
# C2 isn't executed as its requirements aren't met in the options
exp_result = [['C1', 1], ['C1', 2], ['C1', 3], ['C1', 4], ['C1', 5]]
obs_result = list(self.obj_short(iter_, sf, None))
self.assertEqual(obs_result, exp_result)
self.assertEqual(self.obj_short.stats, exp_stats)
def test_call_AC_fail(self):
iter_ = construct_iterator(**{'iter_x': [1, 2, 'fail A2', 4, 5]})
# success function
def sf(x):
return x.state[:]
ff = sf # failed function
exp_stats = {'A1': 5, 'A2': 5, 'C1': 4, 'C2': 4}
self.obj_short.options['C2'] = 1
# pass in a failed callback to capture the result, and pause execution
gen = self.obj_short(iter_, sf, ff)
r1 = next(gen)
self.assertEqual(r1, ['C2', 1])
self.assertFalse(self.obj_short.failed)
r2 = next(gen)
self.assertEqual(r2, ['C2', 2])
self.assertFalse(self.obj_short.failed)
r3 = next(gen)
self.assertEqual(self.obj_short.state, ['A2', 'fail A2'])
self.assertTrue(self.obj_short.failed)
self.assertEqual(r3, ['A2', 'fail A2'])
r4 = next(gen)
self.assertEqual(r4, ['C2', 4])
self.assertFalse(self.obj_short.failed)
r5 = next(gen)
self.assertEqual(r5, ['C2', 5])
self.assertFalse(self.obj_short.failed)
self.assertEqual(self.obj_short.stats, exp_stats)
def test_call_AC_fail_noshort(self):
iter_ = construct_iterator(**{'iter_x': [1, 2, 'fail A2', 4, 5]})
# success function
def sf(x):
return x.state[:]
ff = sf # failed function
exp_stats = {'A1': 5, 'A2': 5, 'C1': 5}
# pass in a failed callback to capture the result, and pause execution
gen = self.obj_noshort(iter_, sf, ff)
r1 = next(gen)
self.assertEqual(r1, ['C1', 1])
self.assertFalse(self.obj_noshort.failed)
r2 = next(gen)
self.assertEqual(r2, ['C1', 2])
self.assertFalse(self.obj_noshort.failed)
next(gen)
self.assertEqual(self.obj_noshort.state, ['C1', 'fail A2'])
self.assertTrue(self.obj_noshort.failed)
r4 = next(gen)
self.assertEqual(r4, ['C1', 4])
self.assertFalse(self.obj_noshort.failed)
r5 = next(gen)
self.assertEqual(r5, ['C1', 5])
self.assertFalse(self.obj_noshort.failed)
self.assertEqual(self.obj_noshort.stats, exp_stats)
class MockWorkflowReqTest(Workflow):
def _allocate_state(self):
self.state = None
def initialize_state(self, item):
self.state = [None, item]
@method(priority=5)
@requires(state=lambda x: x[-1] < 3)
def wf_needs_data(self):
name = 'needs_data'
self.stats[name] += 1
if self.state[-1] == 'fail %s' % name:
self.failed = True
self.state = [name, self.state[-1]]
@method(priority=10)
def wf_always_run(self):
name = 'always_run'
self.stats[name] += 1
if self.state[-1] == 'fail %s' % name:
self.failed = True
self.state = [name, self.state[-1]]
@method(priority=20)
@requires(option='cannot_be_none', values=not_none)
def wf_run_if_not_none(self):
name = 'run_if_not_none'
self.stats[name] += 1
if self.state[-1] == 'fail %s' % name:
self.failed = True
self.state = [name, self.state[-1]]
class RequiresTests(TestCase):
def test_validdata(self):
obj = MockWorkflowReqTest([None, None], stats=defaultdict(int))
single_iter = construct_iterator(**{'iter_x': [1, 2, 3, 4, 5]})
exp_stats = {'needs_data': 2, 'always_run': 5}
exp_result = [['needs_data', 1], ['needs_data', 2], ['always_run', 3],
['always_run', 4], ['always_run', 5]]
obs_result = list(obj(single_iter))
self.assertEqual(obs_result, exp_result)
self.assertEqual(obj.stats, exp_stats)
def test_not_none_avoid(self):
obj = MockWorkflowReqTest([None, None], {'cannot_be_none': None},
stats=defaultdict(int))
single_iter = construct_iterator(**{'iter_x': [1, 2, 3, 4, 5]})
exp_stats = {'needs_data': 2, 'always_run': 5}
exp_result = [['needs_data', 1], ['needs_data', 2], ['always_run', 3],
['always_run', 4], ['always_run', 5]]
obs_result = list(obj(single_iter))
self.assertEqual(obs_result, exp_result)
self.assertEqual(obj.stats, exp_stats)
def test_not_none_execute(self):
obj = MockWorkflowReqTest([None, None],
options={'cannot_be_none': True}, debug=True,
stats=defaultdict(int))
single_iter = construct_iterator(**{'iter_x': [1, 2, 3, 4, 5]})
exp_stats = {'needs_data': 2, 'always_run': 5, 'run_if_not_none': 5}
exp_result = [['needs_data', 1], ['needs_data', 2], ['always_run', 3],
['always_run', 4], ['always_run', 5]]
obs_result = list(obj(single_iter))
self.assertEqual(obs_result, exp_result)
self.assertEqual(obj.stats, exp_stats)
def test_methodb1(self):
obj = MockWorkflow([None, None], stats=defaultdict(int))
obj.initialize_state('test')
obj.methodB1()
self.assertEqual(obj.state, ['B1', 'test'])
self.assertFalse(obj.failed)
# methodb1 executes regardless of if self.failed
obj.failed = True
obj.initialize_state('test 2')
obj.methodB1()
self.assertEqual(obj.state, ['B1', 'test 2'])
obj.failed = False
obj.state = [None, 'fail B1']
obj.methodB1()
self.assertEqual(obj.state, 'failed')
self.assertEqual(obj.stats, {'B1': 3})
def test_methodb2_accept(self):
# methodb2 is setup to be valid when foo is in [1,2,3], make sure we
# can execute
obj = MockWorkflow([None, None], options={'foo': 1},
stats=defaultdict(int))
obj.initialize_state('test')
obj.methodB2()
self.assertEqual(obj.state, ['B2', 'test'])
self.assertEqual(obj.stats, {'B2': 1})
def test_methodb2_ignore(self):
# methodb2 is setup to be valid when foo is in [1, 2, 3], make sure
# we do not execute
obj = MockWorkflow([None, None], options={'foo': 'bar'},
stats=defaultdict(int))
obj.methodB2()
self.assertEqual(obj.state, [None, None])
self.assertEqual(obj.stats, {})
class PriorityTests(TestCase):
def test_dec(self):
@method(priority=10)
def foo(x, y, z):
"""doc check"""
return x + y + z
self.assertEqual(foo.priority, 10)
self.assertEqual(foo.__name__, 'foo')
self.assertEqual(foo.__doc__, 'doc check')
class NotExecutedTests(TestCase):
def test_call(self):
ne = NotExecuted()
obs = ne('foo')
self.assertTrue(obs is ne)
self.assertEqual(obs.msg, 'foo')
class ExistsTests(TestCase):
def test_contains(self):
e = Exists()
self.assertTrue('foo' in e)
self.assertTrue(None in e)
class NotNoneTests(TestCase):
def test_contains(self):
nn = NotNone()
self.assertTrue('foo' in nn)
self.assertFalse(None in nn)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for google.cloud.forseti.enforcer.project_enforcer."""
from builtins import range
import copy
from datetime import datetime
import json
import unittest
from googleapiclient import errors
import httplib2
import unittest.mock as mock
from tests.enforcer import testing_constants as constants
from google.cloud.forseti.common.gcp_api import errors as api_errors
from google.cloud.forseti.common.gcp_api import repository_mixins
from google.cloud.forseti.enforcer import enforcer_log_pb2
from google.cloud.forseti.enforcer import project_enforcer
class ProjectEnforcerTest(constants.EnforcerTestCase):
"""Extended unit tests for ProjectEnforcer class."""
def setUp(self):
"""Set up."""
super(ProjectEnforcerTest, self).setUp()
self.enforcer = project_enforcer.ProjectEnforcer(
self.project, compute_client=self.gce_api_client, dry_run=True)
self.expected_proto = enforcer_log_pb2.ProjectResult(
timestamp_sec=constants.MOCK_MICROTIMESTAMP,
project_id=self.project,
networks=['test-network'])
self.expected_rules = copy.deepcopy(
list(constants.EXPECTED_FIREWALL_RULES.values()))
response_403 = httplib2.Response({
'status': '403',
'content-type': 'application/json'
})
response_403.reason = 'Failed'
self.error_403 = errors.HttpError(response_403, ''.encode(), uri='')
self.addCleanup(mock.patch.stopall)
def set_expected_audit_log(self,
added=None,
deleted=None,
updated=None,
unchanged=None):
"""Adds the GceFirewallEnforcementResult proto to the expected_proto."""
results = self.expected_proto.gce_firewall_enforcement
results.rules_modified_count = 0
if added:
results.rules_added.extend(added)
results.rules_modified_count += len(added)
if deleted:
results.rules_removed.extend(deleted)
results.rules_modified_count += len(deleted)
if updated:
results.rules_updated.extend(updated)
results.rules_modified_count += len(updated)
if unchanged:
results.rules_unchanged.extend(unchanged)
def validate_results(self, expected_proto, actual_proto,
expect_rules_before=False, expect_rules_after=False):
"""Check that the expected proto matches the actual proto.
Removes the rules_before and rules_after messages from the actual_proto
before comparing.
Args:
expected_proto: The expected response.
actual_proto: The actual responses.
expect_rules_before: If actual response should contain a
rules_before message.
expect_rules_after: If actual response should contain a
rules_after message.
"""
if expect_rules_before:
self.assertTrue(
actual_proto.gce_firewall_enforcement.HasField('rules_before'))
actual_proto.gce_firewall_enforcement.ClearField('rules_before')
if expect_rules_after:
self.assertTrue(
actual_proto.gce_firewall_enforcement.HasField('rules_after'))
actual_proto.gce_firewall_enforcement.ClearField('rules_after')
self.assertEqual(expected_proto, actual_proto)
def test_enforce_policy_no_changes(self):
"""Validate results when there are no firewall policies changed.
Setup:
* Set API calls to return the same networks and firewall rules listed
in the RAW_EXPECTED_JSON_POLICY.
Expected Results:
A ProjectResult proto with status=SUCCESS and all rules listed in
rules_unchanged.
"""
self.gce_api_client.get_firewall_rules.return_value = (
self.expected_rules)
self.expected_proto.status = project_enforcer.STATUS_SUCCESS
unchanged = get_rule_names(self.expected_rules)
self.set_expected_audit_log(unchanged=unchanged)
result = self.enforcer.enforce_firewall_policy(self.policy)
self.validate_results(self.expected_proto, result)
def test_enforce_policy_all_rules_changed(self):
"""Validate results when all firewall policies are changed.
Setup:
* Set API calls to return the different firewall rules from the new
policy on the first call, and the expected new firewall rules on the
second call.
Expected Results:
A ProjectResult proto showing status=SUCCESS, details on the rules
changed, all_rules_changed set to True, and a copy of the previous and
current firewall rules.
"""
self.gce_api_client.get_firewall_rules.side_effect = [
constants.DEFAULT_FIREWALL_API_RESPONSE,
self.expected_rules,
]
result = self.enforcer.enforce_firewall_policy(self.policy)
self.expected_proto.status = project_enforcer.STATUS_SUCCESS
self.expected_proto.gce_firewall_enforcement.all_rules_changed = True
added = get_rule_names(self.expected_rules)
deleted = get_rule_names(
constants.DEFAULT_FIREWALL_API_RESPONSE)
self.set_expected_audit_log(added=added, deleted=deleted)
self.validate_results(self.expected_proto, result,
expect_rules_before=True, expect_rules_after=True)
def test_enforce_policy_multiple_rules_changed(self):
"""Validate results when multiple firewall policies are changed.
Setup:
* Create a new set of rules that is a copy of the expected rules.
- Delete last rule so it will have to be re-added.
- Modify the first rule so it will have to be updated.
- Add a new rule from a different policy so it will have to be
deleted.
* Set API call to return the current firewall rules on the first call,
and the expected new firewall rules on the second call.
Expected Results:
A ProjectResult proto showing status=SUCCESS, details on the rules
changed, and a copy of the previous and current firewall rules.
"""
# Make a deep copy of the expected rules
current_fw_rules = copy.deepcopy(self.expected_rules)
# Delete the last rule, so it has to be re-added
current_fw_rules.pop()
# Make a change to the first rule so it has to be updated
current_fw_rules[0]['sourceRanges'].append('10.0.0.0/8')
# Add a new rule that will need to be deleted
current_fw_rules.append(
constants.DEFAULT_FIREWALL_API_RESPONSE[0])
self.gce_api_client.get_firewall_rules.side_effect = [
current_fw_rules,
self.expected_rules,
]
result = self.enforcer.enforce_firewall_policy(self.policy)
self.expected_proto.status = project_enforcer.STATUS_SUCCESS
added = get_rule_names(self.expected_rules[-1:]) # Expected rule added
deleted = get_rule_names(current_fw_rules[-1:]) # Current rule deleted
updated = get_rule_names(current_fw_rules[:1]) # First rule updated
unchanged = get_rule_names(current_fw_rules[1:2]) # Others unchanged
self.set_expected_audit_log(added, deleted, updated, unchanged)
self.validate_results(self.expected_proto, result,
expect_rules_before=True, expect_rules_after=True)
def test_enforce_policy_one_rule_updated(self):
"""Validate results when a firewall rule is changed.
Setup:
* Set API calls to return the different firewall rules from the new
policy on the first call, and the expected new firewall rules on the
second call.
Expected Results:
A ProjectResult proto showing status=SUCCESS, details on the rules
changed, and a copy of the previous and current firewall rules.
"""
# Make a deep copy of the expected rules
current_fw_rules = copy.deepcopy(self.expected_rules)
# Make a change to one of the rules
current_fw_rules[0]['sourceRanges'].append('10.0.0.0/8')
self.gce_api_client.get_firewall_rules.side_effect = [
current_fw_rules,
self.expected_rules,
]
result = self.enforcer.enforce_firewall_policy(self.policy)
self.expected_proto.status = project_enforcer.STATUS_SUCCESS
updated = get_rule_names(current_fw_rules[:1]) # First rule updated
unchanged = get_rule_names(current_fw_rules[1:]) # Others unchanged
self.set_expected_audit_log(updated=updated, unchanged=unchanged)
self.validate_results(self.expected_proto, result,
expect_rules_before=True, expect_rules_after=True)
def test_enforce_policy_all_rules_changed_with_retry(self):
"""Validate results when a rule is added while the enforcer is running.
Setup:
* Set API calls to return the different firewall rules from the new
policy on the first call, the expected rules with an additional rule
added on the second and third calls, and the expected new firewall
rules on the forth call.
* Set retry_on_dry_run to True so that code path will be tested.
Expected Results:
A ProjectResult proto showing status=SUCCESS, details on the rules
changed, all_rules_changed set to True, and a copy of the previous and
current firewall rules.
"""
extra_rules = copy.deepcopy(self.expected_rules)
extra_rules.extend(
constants.DEFAULT_FIREWALL_API_RESPONSE[:1])
self.gce_api_client.get_firewall_rules.side_effect = [
constants.DEFAULT_FIREWALL_API_RESPONSE,
extra_rules,
extra_rules,
self.expected_rules
]
result = self.enforcer.enforce_firewall_policy(
self.policy, retry_on_dry_run=True)
self.expected_proto.status = project_enforcer.STATUS_SUCCESS
self.expected_proto.gce_firewall_enforcement.all_rules_changed = True
added = get_rule_names(self.expected_rules)
deleted = get_rule_names(
constants.DEFAULT_FIREWALL_API_RESPONSE)
deleted.extend(get_rule_names(
constants.DEFAULT_FIREWALL_API_RESPONSE[:1]))
self.set_expected_audit_log(added=added, deleted=sorted(deleted))
self.validate_results(self.expected_proto, result,
expect_rules_before=True, expect_rules_after=True)
def test_enforce_policy_rules_changed_exceeds_maximum_retries(self):
"""Validate error status if rule is constantly readded while enforcing.
Setup:
* Set API calls to return the different firewall rules from the new
policy on the first call, the expected rules with an additional rule
added on all subsequent calls.
* Set retry_on_dry_run to True so that code path will be tested.
Expected Results:
A ProjectResult proto showing status=ERROR, details on the rules
changed, and a copy of the previous and current firewall rules.
"""
extra_rules = copy.deepcopy(self.expected_rules)
extra_rules.extend(
constants.DEFAULT_FIREWALL_API_RESPONSE[:1])
firewall_list = [
constants.DEFAULT_FIREWALL_API_RESPONSE,
extra_rules
]
maximum_retries = 3
# Return the same extra rule for each retry.
firewall_list.extend([extra_rules] * maximum_retries * 2)
self.gce_api_client.get_firewall_rules.side_effect = firewall_list
result = self.enforcer.enforce_firewall_policy(
self.policy, retry_on_dry_run=True, maximum_retries=maximum_retries)
self.expected_proto.status = project_enforcer.STATUS_ERROR
self.expected_proto.status_reason = (
'New firewall rules do not match the expected rules enforced by '
'the policy')
added = get_rule_names(self.expected_rules)
deleted = get_rule_names(
constants.DEFAULT_FIREWALL_API_RESPONSE)
# Rule is deleted 3 times, but always comes back.
for _ in range(maximum_retries):
deleted.extend(get_rule_names(
constants.DEFAULT_FIREWALL_API_RESPONSE[:1]))
unchanged = get_rule_names(
constants.DEFAULT_FIREWALL_API_RESPONSE[:1])
self.set_expected_audit_log(added=added, deleted=sorted(deleted),
unchanged=unchanged)
self.validate_results(self.expected_proto, result,
expect_rules_before=True, expect_rules_after=True)
def test_enforce_policy_rules_changed_no_retry_if_skipped(self):
"""Retry is not attempted when prechange callback returns false.
Setup:
* Set API calls to return the different firewall rules from the new
policy on all calls.
* Set retry_on_dry_run arg to True so that code path will be tested.
* Set prechange_callback arg to a function that always returns false.
Expected Results:
A ProjectResult proto showing status=SUCCESS, with no rules changed.
"""
self.gce_api_client.get_firewall_rules.return_value = (
constants.DEFAULT_FIREWALL_API_RESPONSE)
prechange_callback_func = lambda *unused_args: False
result = self.enforcer.enforce_firewall_policy(
self.policy, prechange_callback=prechange_callback_func,
retry_on_dry_run=True)
self.expected_proto.status = project_enforcer.STATUS_SUCCESS
unchanged = get_rule_names(constants.DEFAULT_FIREWALL_API_RESPONSE)
self.set_expected_audit_log(added=[], deleted=[], unchanged=unchanged)
self.validate_results(self.expected_proto, result)
def test_enforce_policy_one_network(self):
"""Validate that running on a single network only changes that network.
Setup:
* Set API calls to return default rules on two networks for the first
call and the expected rules on the test network for the second
call.
Expected Results:
The rules on the test network are changed, but the rules on the
default network remain the same.
"""
current_fw_rules_network1 = copy.deepcopy(
constants.DEFAULT_FIREWALL_API_RESPONSE)
current_fw_rules_network2 = json.loads(
json.dumps(constants.DEFAULT_FIREWALL_API_RESPONSE).replace(
'test-network', 'default'))
expected_fw_rules_network1 = copy.deepcopy(
constants.EXPECTED_FIREWALL_API_RESPONSE)
expected_fw_rules_network2 = copy.deepcopy(current_fw_rules_network2)
self.gce_api_client.get_firewall_rules.side_effect = [
current_fw_rules_network1 + current_fw_rules_network2,
expected_fw_rules_network1 + expected_fw_rules_network2
]
result = self.enforcer.enforce_firewall_policy(
self.policy, networks=[constants.TEST_NETWORK])
self.expected_proto.status = project_enforcer.STATUS_SUCCESS
added = get_rule_names(expected_fw_rules_network1)
deleted = get_rule_names(current_fw_rules_network1)
unchanged = get_rule_names(current_fw_rules_network2)
self.set_expected_audit_log(
added=added, deleted=deleted, unchanged=unchanged)
self.validate_results(self.expected_proto, result,
expect_rules_before=True, expect_rules_after=True)
def test_project_enforcer_empty_firewall_policy_exception(self):
"""Verifies that an empty firewall policy raises exception.
Setup:
* Set the firewall policy to an empty list.
* Set the current firewall rules.
* Enforce the expected policy which will raise an exception.
Expected Result:
A ProjectResult proto showing status=ERROR and a reason string.
"""
firewall_policy = []
self.gce_api_client.get_firewall_rules.return_value = (
self.expected_rules)
result = self.enforcer.enforce_firewall_policy(firewall_policy)
self.expected_proto.status = project_enforcer.STATUS_ERROR
self.expected_proto.status_reason = (
'error enforcing firewall for project: No rules defined in the '
'expected rules.')
unchanged = get_rule_names(self.expected_rules)
self.set_expected_audit_log(unchanged=unchanged)
self.validate_results(self.expected_proto, result)
def test_project_enforcer_empty_firewall_policy_allowed(self):
"""Verifies that an empty firewall policy deletes all rules if allowed.
Setup:
* Set the firewall policy to an empty list.
* Set the current firewall rules.
* Set allow_empty_ruleset to True.
* Enforce the expected policy which deletes all rules.
Expected Result:
A ProjectResult proto showing status=SUCCESS and the number of rules
changed in an audit_log, and a copy of the previous and current
firewall rules.
"""
firewall_policy = []
self.gce_api_client.get_firewall_rules.side_effect = [
self.expected_rules,
[]
]
result = self.enforcer.enforce_firewall_policy(
firewall_policy, allow_empty_ruleset=True)
self.expected_proto.status = project_enforcer.STATUS_SUCCESS
self.expected_proto.gce_firewall_enforcement.all_rules_changed = True
deleted = get_rule_names(self.expected_rules)
self.set_expected_audit_log(deleted=deleted)
self.validate_results(self.expected_proto, result,
expect_rules_before=True, expect_rules_after=True)
@mock.patch('google.cloud.forseti.enforcer.gce_firewall_enforcer.LOGGER', autospec=True)
def test_enforce_policy_firewall_enforcer_error(self, mock_logger):
"""Verifies that a firewall enforcer error returns a status=ERROR proto.
Setup:
* Switch the dry run response to a firewall change to be an error.
* Set the current firewall rules to something different from the
policy.
* Enforce the expected policy which will force a firewall change.
Expected Result:
A ProjectResult proto showing status=ERROR and a reason string.
"""
# Make a deep copy of the expected rules
current_fw_rules = copy.deepcopy(self.expected_rules)
# Make a change to one of the rules
current_fw_rules[0]['sourceRanges'].append('10.0.0.0/8')
self.gce_api_client.get_firewall_rules.return_value = current_fw_rules
with mock.patch.object(
repository_mixins, '_create_fake_operation') as mock_dry_run:
mock_dry_run.return_value = {
'status': 'DONE',
'name': 'test-net-allow-all-tcp',
'error': {
'errors': [{
'code': 'ERROR'
}]
}
}
result = self.enforcer.enforce_firewall_policy(self.policy)
self.expected_proto.status = project_enforcer.STATUS_ERROR
unchanged = get_rule_names(self.expected_rules)
self.set_expected_audit_log(unchanged=unchanged)
# Match first part of error reason string
self.assertStartsWith(result.status_reason,
'error enforcing firewall for project')
# Copy reason string into expected proto. The reason includes a long
# error message, which would be ugly to replicate in the test.
self.expected_proto.status_reason = result.status_reason
self.validate_results(self.expected_proto, result)
self.assertTrue(mock_logger.error.called)
@mock.patch('google.cloud.forseti.enforcer.gce_firewall_enforcer.LOGGER', autospec=True)
def test_enforce_policy_failure_during_enforcement(self, mock_logger):
"""Forces an error in the middle of enforcing a policy.
Setup:
* Create a new set of rules that is a copy of the expected rules.
- Delete last rule so it will have to be re-added.
- Modify the first rule so it will have to be updated.
- Add a new rule from a different policy so it will have to be
deleted.
* Mock the update function so it returns an error. Updates are always
done after inserts and deletes.
* Set API call to return the current firewall rules on the first call,
and the partially updated firewall rules on the second call.
Expected Results:
A ProjectResult proto showing status=ERROR, the correct reason string,
the number of rules changed in an audit_log, and a copy of the
previous and current firewall rules.
"""
# Make a change to the first rule so it should be updated. The update
# will fail so this rule will still not match the policy.
self.expected_rules[0]['sourceRanges'].append('10.0.0.0/8')
# Start with the rules as they exist after enforce_firewall_policy is
# run and modify them.
current_fw_rules = copy.deepcopy(self.expected_rules)
# Delete the last rule, so it has to be re-added
current_fw_rules.pop()
# Add a new rule that will need to be deleted
current_fw_rules.append(
constants.DEFAULT_FIREWALL_API_RESPONSE[0])
self.gce_api_client.get_firewall_rules.side_effect = [
current_fw_rules,
self.expected_rules,
]
with mock.patch.object(self.gce_api_client,
'patch_firewall_rule') as mock_updater:
mock_updater.return_value = {
'status': 'DONE',
'name': 'test-net-allow-corp-internal-0',
'error': {
'errors': [{
'code': 'ERROR'
}]
}
}
result = self.enforcer.enforce_firewall_policy(self.policy)
self.expected_proto.status = project_enforcer.STATUS_ERROR
added = get_rule_names(self.expected_rules[-1:]) # expected rule added
deleted = get_rule_names(current_fw_rules[-1:]) # current rule deleted
unchanged = get_rule_names(current_fw_rules[0:2]) # others unchanged
self.set_expected_audit_log(
added=added, deleted=deleted, unchanged=unchanged)
# Match first part of error reason string
self.assertStartsWith(result.status_reason,
'error enforcing firewall for project')
# Copy reason string into expected proto. The reason includes a long
# error message, which would be ugly to replicate in the test.
self.expected_proto.status_reason = result.status_reason
self.validate_results(self.expected_proto, result,
expect_rules_before=True, expect_rules_after=True)
self.assertTrue(mock_logger.error.called)
@mock.patch('google.cloud.forseti.enforcer.project_enforcer.LOGGER', autospec=True)
def test_enforce_policy_error_fetching_updated_rules(self, mock_logger):
"""Forces an error when requesting firewall rules after enforcement.
Setup:
* Create a new set of rules that is a copy of the expected rules.
- Modify the first rule so it will have to be updated.
* Set API call to return the current firewall rules on the first call,
and an error on the second call.
Expected Results:
A ProjectResult proto showing status=ERROR, the correct reason string,
the number of rules changed in an audit_log, and a copy of the
previous firewall rules only.
"""
# Make a deep copy of the expected rules
current_fw_rules = copy.deepcopy(self.expected_rules)
# Make a change to one of the rules
current_fw_rules[0]['sourceRanges'].append('10.0.0.0/8')
err = api_errors.ApiExecutionError(self.project, self.error_403)
self.gce_api_client.get_firewall_rules.side_effect = [
current_fw_rules,
err,
err,
]
result = self.enforcer.enforce_firewall_policy(self.policy)
self.expected_proto.status = project_enforcer.STATUS_ERROR
updated = get_rule_names(current_fw_rules[:1]) # First rule updated
self.set_expected_audit_log(updated=updated)
# Match first part of error reason string
self.assertStartsWith(result.status_reason,
'error getting current firewall rules from API:')
# Copy reason string into expected proto. The reason includes a long
# error message, which would be ugly to replicate in the test.
self.expected_proto.status_reason = result.status_reason
# Verify rules after json is an empty string
self.assertEqual(
self.expected_proto.gce_firewall_enforcement.rules_after.json, '')
self.validate_results(self.expected_proto, result,
expect_rules_before=True,
expect_rules_after=False)
self.assertTrue(mock_logger.error.called)
@mock.patch('google.cloud.forseti.enforcer.project_enforcer.LOGGER', autospec=True)
def test_enforce_policy_error_listing_networks(self, mock_logger):
"""Forces an error when listing project networks.
Setup:
* Set the networks.list API call to return an error.
* Set the firewalls.list API call to return the current firewall
rules.
Expected Results:
A ProjectResult proto showing status=ERROR and the correct reason
string.
"""
err = api_errors.ApiExecutionError(self.project, self.error_403)
self.gce_api_client.get_networks.side_effect = err
self.gce_api_client.get_firewall_rules.return_value = (
self.expected_rules)
result = self.enforcer.enforce_firewall_policy(self.policy)
self.expected_proto.status = project_enforcer.STATUS_ERROR
self.expected_proto.status_reason = (
'error getting current networks from API: <HttpError 403 '
'"Failed">')
self.expected_proto.ClearField('networks')
self.validate_results(self.expected_proto, result)
self.assertTrue(mock_logger.exception.called)
def test_enforce_policy_error_listing_firewalls(self):
"""Forces an error when listing project firewall rules.
Setup:
* Set the firewalls.list API call to return an error.
Expected Results:
A ProjectResult proto showing status=ERROR and the correct reason
string.
"""
err = api_errors.ApiExecutionError(self.project, self.error_403)
self.gce_api_client.get_firewall_rules.side_effect = err
result = self.enforcer.enforce_firewall_policy(self.policy)
self.expected_proto.status = project_enforcer.STATUS_ERROR
# Match first part of error reason string
self.assertStartsWith(result.status_reason,
'error getting current firewall rules from API:')
# Copy reason string into expected proto. The reason includes a long
# error message, which would be ugly to replicate in the test.
self.expected_proto.status_reason = result.status_reason
self.validate_results(self.expected_proto, result)
def test_enforce_policy_error_adding_rules(self):
"""Forces an error when adding the expected firewall policy rules.
Setup:
* Set the first firewall policy rule to have a very long name.
Expected Results:
A ProjectResult proto showing status=ERROR and the correct reason
string.
"""
# Set the first firewall policy rule to have a very long name
self.policy[0]['name'] = 'long-name-' + 'x' * 54
self.gce_api_client.get_firewall_rules.return_value = (
self.expected_rules)
result = self.enforcer.enforce_firewall_policy(self.policy)
self.expected_proto.status = project_enforcer.STATUS_ERROR
# Match first part of error reason string
self.assertStartsWith(
result.status_reason,
'error adding the expected firewall rules from the '
'policy:')
# Copy reason string into expected proto. The reason includes a long
# error message, which would be ugly to replicate in the test.
self.expected_proto.status_reason = result.status_reason
self.validate_results(self.expected_proto, result)
def test_enforce_policy_firewall_enforcer_deleted_403(self):
"""Verifies that a deleted project returns status=PROJECT_DELETED.
Setup:
* Switch the list_firewalls response to be a 403 error with the reason
string set to pending deletion.
Expected Result:
A ProjectResult proto showing status=PROJECT_DELETED and the correct
reason string.
"""
deleted_403 = httplib2.Response({
'status': '403',
'content-type': 'application/json'
})
deleted_403.reason = ('Project has been scheduled for deletion and '
'cannot be used for API calls. Visit '
'https://console.developers.google.com/iam-admin/'
'projects?pendingDeletion=true to undelete the '
'project.')
error_deleted_403 = errors.HttpError(deleted_403, ''.encode(), uri='')
err = api_errors.ApiExecutionError(self.project, error_deleted_403)
self.gce_api_client.get_networks.side_effect = err
result = self.enforcer.enforce_firewall_policy(self.policy)
self.expected_proto.status = project_enforcer.STATUS_DELETED
self.expected_proto.ClearField('networks')
# Match first part of error reason string
self.assertStartsWith(result.status_reason,
'Project scheduled for deletion')
# Copy reason string into expected proto. The reason includes a long
# error message, which would be ugly to replicate in the test.
self.expected_proto.status_reason = result.status_reason
self.validate_results(self.expected_proto, result)
def test_enforce_policy_firewall_enforcer_deleted_400(self):
"""Verifies that a deleted project returns a status=PROJECT_DELETED.
Setup:
* Switch the ListFirewalls response to be a 400 error with the reason
string set to unknown project.
Expected Result:
A ProjectResult proto showing status=PROJECT_DELETED and the correct
reason string.
"""
deleted_400 = httplib2.Response({
'status': '400',
'content-type': 'application/json'
})
deleted_400.reason = 'Invalid value for project: %s' % self.project
error_deleted_400 = errors.HttpError(deleted_400, ''.encode(), uri='')
err = api_errors.ApiExecutionError(self.project, error_deleted_400)
self.gce_api_client.get_firewall_rules.side_effect = err
result = self.enforcer.enforce_firewall_policy(self.policy)
self.expected_proto.status = project_enforcer.STATUS_DELETED
# Match first part of error reason string
self.assertStartsWith(result.status_reason,
'Project scheduled for deletion')
# Copy reason string into expected proto. The reason includes a long
# error message, which would be ugly to replicate in the test.
self.expected_proto.status_reason = result.status_reason
self.validate_results(self.expected_proto, result)
@mock.patch('google.cloud.forseti.enforcer.project_enforcer.LOGGER', autospec=True)
def test_enforce_policy_firewall_enforcer_gce_api_disabled(self, mock_logger):
"""Project returns a status=PROJECT_DELETED if GCE API is disabled.
Setup:
* Switch the ListFirewalls response to be a 403 error with the reason
string set to GCE API disabled.
Expected Result:
A ProjectResult proto showing status=PROJECT_DELETED and the correct
reason string.
"""
api_disabled_403 = httplib2.Response(
{'status': '403',
'content-type': 'application/json'})
api_disabled_403.reason = (
'Access Not Configured. Compute Engine API has not been used in '
'project 1 before or it is disabled. Enable it by visiting '
'https://console.developers.google.com/apis/api/compute_component/'
'overview?project=1 then retry. If you enabled this API recently,'
'wait a few minutes for the action to propagate to our systems and '
'retry.')
error_api_disabled_403 = errors.HttpError(api_disabled_403, ''.encode(), uri='')
err = api_errors.ApiNotEnabledError(
'https://console.developers.google.com/apis/api/compute_component/',
error_api_disabled_403)
self.gce_api_client.get_firewall_rules.side_effect = err
result = self.enforcer.enforce_firewall_policy(self.policy)
self.expected_proto.status = project_enforcer.STATUS_DELETED
# Match first part of error reason string
self.assertStartsWith(result.status_reason,
'Project has GCE API disabled')
# Copy reason string into expected proto. The reason includes a long
# error message, which would be ugly to replicate in the test.
self.expected_proto.status_reason = result.status_reason
self.validate_results(self.expected_proto, result)
self.assertTrue(mock_logger.error.called)
def get_rule_names(rules):
"""Returns a sorted list of rule names from the rules list."""
return sorted([r['name'] for r in rules])
if __name__ == '__main__':
unittest.main()
|
|
## @file
# process rule section generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
from struct import *
from . import Section
from .GenFdsGlobalVariable import GenFdsGlobalVariable
import subprocess
from .Ffs import SectionSuffix
import Common.LongFilePathOs as os
from CommonDataClass.FdfClass import EfiSectionClassObject
from Common import EdkLogger
from Common.BuildToolError import *
from Common.Misc import PeImageClass
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.LongFilePathSupport import CopyLongFilePath
from Common.DataType import *
## generate rule section
#
#
class EfiSection (EfiSectionClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
EfiSectionClassObject.__init__(self)
## GenSection() method
#
# Generate rule section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name list, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf = None, Dict = {}, IsMakefile = False) :
if self.FileName is not None and self.FileName.startswith('PCD('):
self.FileName = GenFdsGlobalVariable.GetPcdValue(self.FileName)
"""Prepare the parameter of GenSection"""
if FfsInf is not None :
InfFileName = FfsInf.InfFileName
SectionType = FfsInf.__ExtendMacro__(self.SectionType)
Filename = FfsInf.__ExtendMacro__(self.FileName)
BuildNum = FfsInf.__ExtendMacro__(self.BuildNum)
StringData = FfsInf.__ExtendMacro__(self.StringData)
ModuleNameStr = FfsInf.__ExtendMacro__('$(MODULE_NAME)')
NoStrip = True
if FfsInf.ModuleType in (SUP_MODULE_SEC, SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM) and SectionType in (BINARY_FILE_TYPE_TE, BINARY_FILE_TYPE_PE32):
if FfsInf.KeepReloc is not None:
NoStrip = FfsInf.KeepReloc
elif FfsInf.KeepRelocFromRule is not None:
NoStrip = FfsInf.KeepRelocFromRule
elif self.KeepReloc is not None:
NoStrip = self.KeepReloc
elif FfsInf.ShadowFromInfFile is not None:
NoStrip = FfsInf.ShadowFromInfFile
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "Module %s apply rule for None!" %ModuleName)
"""If the file name was pointed out, add it in FileList"""
FileList = []
if Filename is not None:
Filename = GenFdsGlobalVariable.MacroExtend(Filename, Dict)
# check if the path is absolute or relative
if os.path.isabs(Filename):
Filename = os.path.normpath(Filename)
else:
Filename = os.path.normpath(os.path.join(FfsInf.EfiOutputPath, Filename))
if not self.Optional:
FileList.append(Filename)
elif os.path.exists(Filename):
FileList.append(Filename)
elif IsMakefile:
SuffixMap = FfsInf.GetFinalTargetSuffixMap()
if '.depex' in SuffixMap:
FileList.append(Filename)
else:
FileList, IsSect = Section.Section.GetFileList(FfsInf, self.FileType, self.FileExtension, Dict, IsMakefile=IsMakefile)
if IsSect :
return FileList, self.Alignment
Index = 0
Align = self.Alignment
""" If Section type is 'VERSION'"""
OutputFileList = []
if SectionType == 'VERSION':
InfOverrideVerString = False
if FfsInf.Version is not None:
#StringData = FfsInf.Version
BuildNum = FfsInf.Version
InfOverrideVerString = True
if InfOverrideVerString:
#VerTuple = ('-n', '"' + StringData + '"')
if BuildNum is not None and BuildNum != '':
BuildNumTuple = ('-j', BuildNum)
else:
BuildNumTuple = tuple()
Num = SecNum
OutputFile = os.path.join( OutputPath, ModuleName + SUP_MODULE_SEC + str(Num) + SectionSuffix.get(SectionType))
GenFdsGlobalVariable.GenerateSection(OutputFile, [], 'EFI_SECTION_VERSION',
#Ui=StringData,
Ver=BuildNum,
IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
elif FileList != []:
for File in FileList:
Index = Index + 1
Num = '%s.%d' %(SecNum, Index)
OutputFile = os.path.join(OutputPath, ModuleName + SUP_MODULE_SEC + Num + SectionSuffix.get(SectionType))
f = open(File, 'r')
VerString = f.read()
f.close()
BuildNum = VerString
if BuildNum is not None and BuildNum != '':
BuildNumTuple = ('-j', BuildNum)
GenFdsGlobalVariable.GenerateSection(OutputFile, [], 'EFI_SECTION_VERSION',
#Ui=VerString,
Ver=BuildNum,
IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
else:
BuildNum = StringData
if BuildNum is not None and BuildNum != '':
BuildNumTuple = ('-j', BuildNum)
else:
BuildNumTuple = tuple()
BuildNumString = ' ' + ' '.join(BuildNumTuple)
#if VerString == '' and
if BuildNumString == '':
if self.Optional == True :
GenFdsGlobalVariable.VerboseLogger( "Optional Section don't exist!")
return [], None
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "File: %s miss Version Section value" %InfFileName)
Num = SecNum
OutputFile = os.path.join( OutputPath, ModuleName + SUP_MODULE_SEC + str(Num) + SectionSuffix.get(SectionType))
GenFdsGlobalVariable.GenerateSection(OutputFile, [], 'EFI_SECTION_VERSION',
#Ui=VerString,
Ver=BuildNum,
IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
#
# If Section Type is BINARY_FILE_TYPE_UI
#
elif SectionType == BINARY_FILE_TYPE_UI:
InfOverrideUiString = False
if FfsInf.Ui is not None:
StringData = FfsInf.Ui
InfOverrideUiString = True
if InfOverrideUiString:
Num = SecNum
if IsMakefile and StringData == ModuleNameStr:
StringData = "$(MODULE_NAME)"
OutputFile = os.path.join( OutputPath, ModuleName + SUP_MODULE_SEC + str(Num) + SectionSuffix.get(SectionType))
GenFdsGlobalVariable.GenerateSection(OutputFile, [], 'EFI_SECTION_USER_INTERFACE',
Ui=StringData, IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
elif FileList != []:
for File in FileList:
Index = Index + 1
Num = '%s.%d' %(SecNum, Index)
OutputFile = os.path.join(OutputPath, ModuleName + SUP_MODULE_SEC + Num + SectionSuffix.get(SectionType))
f = open(File, 'r')
UiString = f.read()
f.close()
if IsMakefile and UiString == ModuleNameStr:
UiString = "$(MODULE_NAME)"
GenFdsGlobalVariable.GenerateSection(OutputFile, [], 'EFI_SECTION_USER_INTERFACE',
Ui=UiString, IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
else:
if StringData is not None and len(StringData) > 0:
UiTuple = ('-n', '"' + StringData + '"')
else:
UiTuple = tuple()
if self.Optional == True :
GenFdsGlobalVariable.VerboseLogger( "Optional Section don't exist!")
return '', None
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "File: %s miss UI Section value" %InfFileName)
Num = SecNum
if IsMakefile and StringData == ModuleNameStr:
StringData = "$(MODULE_NAME)"
OutputFile = os.path.join( OutputPath, ModuleName + SUP_MODULE_SEC + str(Num) + SectionSuffix.get(SectionType))
GenFdsGlobalVariable.GenerateSection(OutputFile, [], 'EFI_SECTION_USER_INTERFACE',
Ui=StringData, IsMakefile=IsMakefile)
OutputFileList.append(OutputFile)
else:
"""If File List is empty"""
if FileList == [] :
if self.Optional == True:
GenFdsGlobalVariable.VerboseLogger("Optional Section don't exist!")
return [], None
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "Output file for %s section could not be found for %s" % (SectionType, InfFileName))
else:
"""Convert the File to Section file one by one """
for File in FileList:
""" Copy Map file to FFS output path """
Index = Index + 1
Num = '%s.%d' %(SecNum, Index)
OutputFile = os.path.join( OutputPath, ModuleName + SUP_MODULE_SEC + Num + SectionSuffix.get(SectionType))
File = GenFdsGlobalVariable.MacroExtend(File, Dict)
#Get PE Section alignment when align is set to AUTO
if self.Alignment == 'Auto' and (SectionType == BINARY_FILE_TYPE_PE32 or SectionType == BINARY_FILE_TYPE_TE):
ImageObj = PeImageClass (File)
if ImageObj.SectionAlignment < 0x400:
Align = str (ImageObj.SectionAlignment)
elif ImageObj.SectionAlignment < 0x100000:
Align = str (ImageObj.SectionAlignment / 0x400) + 'K'
else:
Align = str (ImageObj.SectionAlignment / 0x100000) + 'M'
if File[(len(File)-4):] == '.efi':
MapFile = File.replace('.efi', '.map')
CopyMapFile = os.path.join(OutputPath, ModuleName + '.map')
if IsMakefile:
if GenFdsGlobalVariable.CopyList == []:
GenFdsGlobalVariable.CopyList = [(MapFile, CopyMapFile)]
else:
GenFdsGlobalVariable.CopyList.append((MapFile, CopyMapFile))
else:
if os.path.exists(MapFile):
if not os.path.exists(CopyMapFile) or \
(os.path.getmtime(MapFile) > os.path.getmtime(CopyMapFile)):
CopyLongFilePath(MapFile, CopyMapFile)
if not NoStrip:
FileBeforeStrip = os.path.join(OutputPath, ModuleName + '.efi')
if IsMakefile:
if GenFdsGlobalVariable.CopyList == []:
GenFdsGlobalVariable.CopyList = [(File, FileBeforeStrip)]
else:
GenFdsGlobalVariable.CopyList.append((File, FileBeforeStrip))
else:
if not os.path.exists(FileBeforeStrip) or \
(os.path.getmtime(File) > os.path.getmtime(FileBeforeStrip)):
CopyLongFilePath(File, FileBeforeStrip)
StrippedFile = os.path.join(OutputPath, ModuleName + '.stripped')
GenFdsGlobalVariable.GenerateFirmwareImage(
StrippedFile,
[File],
Strip=True,
IsMakefile = IsMakefile
)
File = StrippedFile
"""For TE Section call GenFw to generate TE image"""
if SectionType == BINARY_FILE_TYPE_TE:
TeFile = os.path.join( OutputPath, ModuleName + 'Te.raw')
GenFdsGlobalVariable.GenerateFirmwareImage(
TeFile,
[File],
Type='te',
IsMakefile = IsMakefile
)
File = TeFile
"""Call GenSection"""
GenFdsGlobalVariable.GenerateSection(OutputFile,
[File],
Section.Section.SectionType.get (SectionType),
IsMakefile=IsMakefile
)
OutputFileList.append(OutputFile)
return OutputFileList, Align
|
|
import unittest
from unittest import mock
from betfairlightweight.resources.baseresource import BaseResource
from betfairlightweight.streaming.cache import (
OrderBookCache,
OrderBookRunner,
UnmatchedOrder,
MarketBookCache,
RunnerBookCache,
Available,
RaceCache,
)
from tests.tools import create_mock_json
class TestAvailable(unittest.TestCase):
def setUp(self):
self.prices = [[1, 1.02, 34.45], [0, 1.01, 12]]
self.available = Available(self.prices, 2)
def test_init(self):
self.assertEqual(
self.available.order_book,
{
0: [0, 1.01, 12, {"price": 1.01, "size": 12}],
1: [1, 1.02, 34.45, {"price": 1.02, "size": 34.45}],
},
)
self.assertEqual(self.available.deletion_select, 2)
self.assertFalse(self.available.reverse)
self.assertEqual(
self.available.serialised,
[{"price": 1.01, "size": 12}, {"price": 1.02, "size": 34.45}],
)
def test_serialise(self):
# [price, size]
current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]]
available = Available(current, 1)
available.serialise()
self.assertEqual(
available.serialised,
[
{"price": 1.02, "size": 1157.21},
{"price": 13, "size": 28.01},
{"price": 27, "size": 0.95},
],
)
# [position, price, size]
current = [[2, 27, 0.95], [1, 13, 28.01], [0, 1.02, 1157.21]]
available = Available(current, 2)
available.serialise()
self.assertEqual(
available.serialised,
[
{"price": 1.02, "size": 1157.21},
{"price": 13, "size": 28.01},
{"price": 27, "size": 0.95},
],
)
@mock.patch("betfairlightweight.streaming.cache.Available.serialise")
def test_clear(self, mock_serialise):
self.available.clear()
assert self.available.order_book == {}
mock_serialise.assert_called()
def test__sort_order_book(self):
self.available.order_book = {1.01: [2], 100: [3], 13: [5]}
self.available._sort_order_book()
self.assertEqual(list(self.available.order_book.keys()), [1.01, 13, 100])
# reverse
self.available.reverse = True
self.available._sort_order_book()
self.assertEqual(list(self.available.order_book.keys()), [100, 13, 1.01])
@mock.patch("betfairlightweight.streaming.cache.Available.serialise")
def test_update(self, mock_serialise):
book_update = [[27, 2]] # [price, size]
current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]]
expected = {
27: [27, 2, {"price": 27, "size": 2}],
13: [13, 28.01, {"price": 13, "size": 28.01}],
1.02: [1.02, 1157.21, {"price": 1.02, "size": 1157.21}],
}
available = Available(current, 1)
available.update(book_update, True)
mock_serialise.assert_called()
self.assertEqual(available.order_book, expected)
@mock.patch("betfairlightweight.streaming.cache.Available._sort_order_book")
@mock.patch("betfairlightweight.streaming.cache.Available.serialise")
def test_update_false(self, mock_serialise, mock__sort_order_book):
book_update = [[1, 1.02, 2]]
expected = {
0: [0, 1.01, 12, {"price": 1.01, "size": 12}],
1: [1, 1.02, 2, {"price": 1.02, "size": 2}],
}
self.available.update(book_update, False)
self.assertEqual(self.available.order_book, expected)
mock_serialise.assert_not_called()
mock__sort_order_book.assert_not_called()
@mock.patch("betfairlightweight.streaming.cache.Available._sort_order_book")
@mock.patch("betfairlightweight.streaming.cache.Available.serialise")
def test_update_new(self, mock_serialise, mock__sort_order_book):
book_update = [[30, 6.9]] # [price, size]
current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]]
expected = {
27: [27, 0.95, {"price": 27, "size": 0.95}],
13: [13, 28.01, {"price": 13, "size": 28.01}],
1.02: [1.02, 1157.21, {"price": 1.02, "size": 1157.21}],
30: [30, 6.9, {"price": 30, "size": 6.9}],
}
available = Available(current, 1)
available.update(book_update, True)
mock_serialise.assert_called()
mock__sort_order_book.assert_called()
self.assertEqual(available.order_book, expected)
@mock.patch("betfairlightweight.streaming.cache.Available.serialise")
def test_update_del(self, mock_serialise):
book_update = [[27, 0]] # [price, size]
current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]]
expected = {
13: [13, 28.01, {"price": 13, "size": 28.01}],
1.02: [1.02, 1157.21, {"price": 1.02, "size": 1157.21}],
}
available = Available(current, 1)
available.update(book_update, True)
mock_serialise.assert_called()
self.assertEqual(available.order_book, expected)
@mock.patch("betfairlightweight.streaming.cache.Available._sort_order_book")
@mock.patch("betfairlightweight.streaming.cache.Available.serialise")
def test_update_available_new_update(self, mock_serialise, mock__sort_order_book):
# [price, size]
book_update = [[30, 6.9]]
current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]]
expected = {
27: [27, 0.95, {"price": 27, "size": 0.95}],
13: [13, 28.01, {"price": 13, "size": 28.01}],
1.02: [1.02, 1157.21, {"price": 1.02, "size": 1157.21}],
30: [30, 6.9, {"price": 30, "size": 6.9}],
}
available = Available(current, 1)
available.update(book_update, True)
assert available.order_book == expected
book_update = [[30, 6.9], [1.01, 12]]
current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]]
expected = {
27: [27, 0.95, {"price": 27, "size": 0.95}],
13: [13, 28.01, {"price": 13, "size": 28.01}],
1.02: [1.02, 1157.21, {"price": 1.02, "size": 1157.21}],
1.01: [1.01, 12, {"price": 1.01, "size": 12}],
30: [30, 6.9, {"price": 30, "size": 6.9}],
}
available = Available(current, 1)
available.update(book_update, True)
assert available.order_book == expected
# [position, price, size]
book_update = [[0, 36, 0.57]]
current = []
expected = {0: [0, 36, 0.57, {"price": 36, "size": 0.57}]}
available = Available(current, 2)
available.update(book_update, True)
assert available.order_book == expected
@mock.patch("betfairlightweight.streaming.cache.Available._sort_order_book")
@mock.patch("betfairlightweight.streaming.cache.Available.serialise")
def test_update_available_new_replace(self, mock_serialise, mock__sort_order_book):
# [price, size]
book_update = [[27, 6.9]]
current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]]
expected = {
27: [27, 6.9, {"price": 27, "size": 6.9}],
13: [13, 28.01, {"price": 13, "size": 28.01}],
1.02: [1.02, 1157.21, {"price": 1.02, "size": 1157.21}],
}
available = Available(current, 1)
available.update(book_update, True)
assert available.order_book == expected
# [position, price, size]
book_update = [[0, 36, 0.57]]
current = [[0, 36, 10.57], [1, 38, 3.57]]
expected = {
0: [0, 36, 0.57, {"price": 36, "size": 0.57}],
1: [1, 38, 3.57, {"price": 38, "size": 3.57}],
}
available = Available(current, 2)
available.update(book_update, True)
assert available.order_book == expected
# tests handling of betfair bug, http://forum.bdp.betfair.com/showthread.php?t=3351
book_update = [[2, 0, 0], [1, 1.01, 9835.74], [0, 1.02, 1126.22]]
current = [[1, 1.01, 9835.74], [0, 1.02, 1126.22]]
expected = {
0: [0, 1.02, 1126.22, {"price": 1.02, "size": 1126.22}],
1: [1, 1.01, 9835.74, {"price": 1.01, "size": 9835.74}],
}
available = Available(current, 2)
available.update(book_update, True)
assert available.order_book == expected
@mock.patch("betfairlightweight.streaming.cache.Available._sort_order_book")
@mock.patch("betfairlightweight.streaming.cache.Available.serialise")
def test_update_available_new_remove(self, mock_serialise, mock__sort_order_book):
# [price, size]
book_update = [[27, 0]]
current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]]
expected = {
1.02: [1.02, 1157.21, {"price": 1.02, "size": 1157.21}],
13: [13, 28.01, {"price": 13, "size": 28.01}],
}
available = Available(current, 1)
available.update(book_update, True)
assert available.order_book == expected
# [position, price, size]
book_update = [[0, 36, 0], [1, 38, 0], [0, 38, 3.57]]
current = [[0, 36, 10.57], [1, 38, 3.57]]
expected = {0: [0, 38, 3.57, {"price": 38, "size": 3.57}]}
available = Available(current, 2)
available.update(book_update, True)
assert available.order_book == expected
@mock.patch("betfairlightweight.streaming.cache.Available._sort_order_book")
@mock.patch("betfairlightweight.streaming.cache.Available.serialise")
def test_refresh(self, mock_serialise, mock__sort_order_book):
self.available.refresh()
mock_serialise.assert_called()
mock__sort_order_book.assert_called()
class TestMarketBookCache(unittest.TestCase):
def setUp(self):
self.market_book_cache = MarketBookCache("1.2345", 12345, True, False, False)
def test_init(self):
self.assertTrue(self.market_book_cache.active)
self.assertEqual(self.market_book_cache.market_id, "1.2345")
self.assertEqual(self.market_book_cache.publish_time, 12345)
self.assertTrue(self.market_book_cache.lightweight)
self.assertFalse(self.market_book_cache.calculate_market_tv)
self.assertFalse(self.market_book_cache.cumulative_runner_tv)
self.assertEqual(self.market_book_cache.total_matched, 0)
self.assertEqual(self.market_book_cache.market_definition, {})
self.assertIsNone(self.market_book_cache._market_definition_resource)
self.assertIsNone(self.market_book_cache._definition_bet_delay)
self.assertIsNone(self.market_book_cache._definition_version)
self.assertIsNone(self.market_book_cache._definition_complete)
self.assertIsNone(self.market_book_cache._definition_runners_voidable)
self.assertIsNone(self.market_book_cache._definition_status)
self.assertIsNone(self.market_book_cache._definition_bsp_reconciled)
self.assertIsNone(self.market_book_cache._definition_cross_matching)
self.assertIsNone(self.market_book_cache._definition_in_play)
self.assertIsNone(self.market_book_cache._definition_number_of_winners)
self.assertIsNone(self.market_book_cache._definition_number_of_active_runners)
self.assertIsNone(self.market_book_cache._definition_price_ladder_definition)
self.assertIsNone(self.market_book_cache._definition_key_line_description)
self.assertIsNone(self.market_book_cache.streaming_update)
self.assertEqual(self.market_book_cache.runners, [])
self.assertEqual(self.market_book_cache.runner_dict, {})
self.assertEqual(self.market_book_cache._number_of_runners, 0)
@mock.patch("betfairlightweight.streaming.cache.MarketBookCache.strip_datetime")
def test_update_cache_md(self, mock_strip_datetime):
publish_time = mock.Mock()
market_change = create_mock_json("tests/resources/streaming_mcm_UPDATE_md.json")
book_data = market_change.json().get("mc")
for book in book_data:
self.market_book_cache.update_cache(book, publish_time, True)
self.assertTrue(self.market_book_cache.active)
assert self.market_book_cache.market_definition == book.get(
"marketDefinition"
)
self.assertEqual(self.market_book_cache.streaming_update, book)
@mock.patch("betfairlightweight.streaming.cache.MarketBookCache.strip_datetime")
def test_update_cache_tv(self, mock_strip_datetime):
publish_time = mock.Mock()
market_change = create_mock_json("tests/resources/streaming_mcm_UPDATE_tv.json")
book_data = market_change.json().get("mc")
for book in book_data:
self.market_book_cache.update_cache(book, publish_time, True)
self.assertTrue(self.market_book_cache.active)
assert self.market_book_cache.total_matched == book.get("tv")
self.assertEqual(self.market_book_cache.streaming_update, book)
def test_update_multiple_rc(self):
# update data with multiple rc entries for the same selection
data = {
"rc": [
{"atb": [[1.01, 200]], "id": 13536143},
{"atl": [[1000.0, 200]], "id": 13536143},
]
}
market_book_cache = MarketBookCache("1.123", 123, True, False, False)
market_book_cache.update_cache(data, 123, True)
self.assertTrue(market_book_cache.active)
assert len(market_book_cache.runners) == len(market_book_cache.runner_dict)
# @mock.patch('betfairlightweight.resources.streamingresources.MarketBookCache.strip_datetime')
# def test_update_cache_rc(self, mock_strip_datetime):
# publish_time = mock.Mock()
# market_change = create_mock_json('tests/resources/streaming_mcm_UPDATE.json')
# book_data = market_change.json().get('mc')
#
# for book in book_data:
# self.market_book_cache.update_cache(book, publish_time)
# mock_strip_datetime.assert_called_with(publish_time)
#
# assert self.market_book_cache.total_matched == book.get('tv')
def test_refresh_cache(self):
mock_runner = mock.Mock()
self.market_book_cache.runners = [mock_runner]
self.market_book_cache.refresh_cache()
mock_runner.traded.refresh.assert_called()
mock_runner.available_to_back.refresh.assert_called()
mock_runner.available_to_lay.refresh.assert_called()
mock_runner.best_available_to_back.refresh.assert_called()
mock_runner.best_available_to_lay.refresh.assert_called()
mock_runner.best_display_available_to_back.refresh.assert_called()
mock_runner.best_display_available_to_lay.refresh.assert_called()
mock_runner.starting_price_back.refresh.assert_called()
mock_runner.starting_price_lay.refresh.assert_called()
mock_runner.serialise.assert_called()
@mock.patch(
"betfairlightweight.streaming.cache.MarketBookCache.serialise",
new_callable=mock.PropertyMock,
return_value={},
)
@mock.patch("betfairlightweight.streaming.cache.MarketDefinition")
@mock.patch("betfairlightweight.streaming.cache.MarketBook")
def test_create_resource(
self, mock_market_book, mock_market_definition, mock_serialise
):
# lightweight
market_book = self.market_book_cache.create_resource(1234, snap=True)
assert market_book == {
"streaming_unique_id": 1234,
"streaming_snap": True,
}
assert market_book == mock_serialise()
# not lightweight
self.market_book_cache.lightweight = False
market_book = self.market_book_cache.create_resource(1234, snap=True)
assert market_book == mock_market_book()
@mock.patch(
"betfairlightweight.streaming.cache.MarketBookCache.serialise",
new_callable=mock.PropertyMock,
return_value={},
)
@mock.patch("betfairlightweight.streaming.cache.MarketDefinition")
@mock.patch("betfairlightweight.streaming.cache.MarketBook")
def test_create_resource_snap(self, *_):
market_book = self.market_book_cache.create_resource(1234, True)
assert market_book == {
"streaming_unique_id": 1234,
"streaming_snap": True,
}
@mock.patch("betfairlightweight.streaming.cache.MarketBook")
def test_create_resource_resource(self, mock_market_book):
self.market_book_cache.lightweight = False
self.assertEqual(
self.market_book_cache.create_resource(1, False), mock_market_book()
)
# todo
@mock.patch("betfairlightweight.streaming.cache.MarketDefinition")
@mock.patch("betfairlightweight.streaming.cache.MarketBookCache._add_new_runner")
def test__process_market_definition(
self, mock__add_new_runner, mock_market_definition_cls
):
self.market_book_cache.lightweight = False
mock_market_definition = {"runners": [{"id": 12}]}
self.market_book_cache._process_market_definition(mock_market_definition)
self.assertEqual(
self.market_book_cache.market_definition, mock_market_definition
)
self.assertEqual(self.market_book_cache.runner_dict, {})
mock__add_new_runner.assert_called_with(id=12, hc=0, definition={"id": 12})
mock__add_new_runner().serialise.assert_called()
mock_market_definition = {"runners": [{"id": 34, "hc": 1}]}
self.market_book_cache._process_market_definition(mock_market_definition)
self.assertEqual(
self.market_book_cache.market_definition, mock_market_definition
)
self.assertEqual(self.market_book_cache.runner_dict, {})
mock__add_new_runner.assert_called_with(
id=34, hc=1, definition={"id": 34, "hc": 1}
)
mock__add_new_runner().serialise.assert_called()
mock_market_definition_cls.assert_called_with(**mock_market_definition)
self.assertEqual(
self.market_book_cache._market_definition_resource,
mock_market_definition_cls(),
)
@mock.patch("betfairlightweight.streaming.cache.MarketDefinition")
def test__process_market_definition_caches(self, mock_market_definition_cls):
mock_market_definition = {
"betDelay": 1,
"version": 234,
"complete": True,
"runnersVoidable": False,
"status": "ACTIVE",
"bspReconciled": True,
"crossMatching": False,
"inPlay": True,
"numberOfWinners": 5,
"numberOfActiveRunners": 6,
"priceLadderDefinition": "",
}
self.market_book_cache._process_market_definition(mock_market_definition)
self.assertEqual(self.market_book_cache._definition_bet_delay, 1)
self.assertEqual(self.market_book_cache._definition_version, 234)
self.assertEqual(self.market_book_cache._definition_complete, True)
self.assertEqual(self.market_book_cache._definition_runners_voidable, False)
self.assertEqual(self.market_book_cache._definition_status, "ACTIVE")
self.assertEqual(self.market_book_cache._definition_bsp_reconciled, True)
self.assertEqual(self.market_book_cache._definition_cross_matching, False)
self.assertEqual(self.market_book_cache._definition_in_play, True)
self.assertEqual(self.market_book_cache._definition_number_of_winners, 5)
self.assertEqual(self.market_book_cache._definition_number_of_active_runners, 6)
self.assertEqual(self.market_book_cache._definition_price_ladder_definition, "")
self.assertEqual(self.market_book_cache._definition_key_line_description, None)
@mock.patch("betfairlightweight.streaming.cache.RunnerBookCache")
def test__add_new_runner(self, mock_runner_book_cache):
self.assertEqual(self.market_book_cache.runner_dict, {})
self.market_book_cache._add_new_runner(id=1, hc=2, definition={1: 2})
mock_runner_book_cache.assert_called_with(
lightweight=True, id=1, hc=2, definition={1: 2}
)
self.assertEqual(
self.market_book_cache.runner_dict,
{
(
mock_runner_book_cache().selection_id,
mock_runner_book_cache().handicap,
): mock_runner_book_cache()
},
)
self.assertEqual(self.market_book_cache.runners, [mock_runner_book_cache()])
self.assertEqual(self.market_book_cache._number_of_runners, 1)
def test_closed(self):
self.assertFalse(self.market_book_cache.closed)
self.market_book_cache._definition_status = "CLOSED"
self.assertTrue(self.market_book_cache.closed)
class TestMarketBookCacheCumulative(unittest.TestCase):
def setUp(self):
self.market_book_cache = MarketBookCache("1.2345", 12345, True, False, True)
def test_update_cache_runner_tv(self):
market_change = {"rc": [{"tv": 123, "id": 13536143}]}
self.market_book_cache.update_cache(market_change, 123, True)
self.assertEqual(self.market_book_cache.runners[0].total_matched, 123)
market_change = {"rc": [{"tv": 123, "trd": [], "id": 13536143}]}
self.market_book_cache.update_cache(market_change, 123, True)
self.assertEqual(self.market_book_cache.runners[0].total_matched, 0)
market_change = {"rc": [{"tv": 123, "trd": [[12, 2]], "id": 13536143}]}
self.market_book_cache.update_cache(market_change, 123, True)
self.assertEqual(self.market_book_cache.runners[0].total_matched, 2)
class TestMarketBookCacheCalculate(unittest.TestCase):
def setUp(self):
self.market_book_cache = MarketBookCache("1.2345", 12345, True, True, False)
def test_update_cache_market_tv(self):
market_change = {"rc": [{"tv": 123, "id": 13536143}]}
self.market_book_cache.update_cache(market_change, 123, True)
self.assertEqual(self.market_book_cache.total_matched, 0)
market_change = {"rc": [{"tv": 123, "trd": [[12, 2]], "id": 13536143}]}
self.market_book_cache.update_cache(market_change, 123, True)
self.assertEqual(self.market_book_cache.total_matched, 2)
class TestRunnerBookCache(unittest.TestCase):
def setUp(self):
self.runner_book = RunnerBookCache(lightweight=True, **{"id": 123})
def test_init(self):
self.assertEqual(self.runner_book.selection_id, 123)
self.assertTrue(self.runner_book.lightweight)
self.assertEqual(self.runner_book.serialised, {})
self.assertIsNone(self.runner_book.resource)
def test_update_definition(self):
definition = {
"status": "ACTIVE",
"bsp": 12,
"adjustmentFactor": 23.1,
}
self.runner_book.update_definition(definition)
self.assertEqual(self.runner_book.definition, definition)
self.assertEqual(self.runner_book._definition_status, "ACTIVE")
self.assertEqual(self.runner_book._definition_bsp, 12)
self.assertEqual(self.runner_book._definition_adjustment_factor, 23.1)
self.assertIsNone(self.runner_book._definition_removal_date)
def test_update_traded(self):
self.mock_traded = mock.Mock()
self.runner_book.traded = self.mock_traded
self.runner_book.update_traded([], True)
self.mock_traded.clear.assert_called_with()
self.runner_book.update_traded([1, 2], True)
self.mock_traded.update.assert_called_with([1, 2], True)
self.runner_book.update_traded([1, 2], False)
self.mock_traded.update.assert_called_with([1, 2], False)
def test_serialise_back(self):
mock_available_to_back = mock.Mock()
mock_available_to_back.order_book = True
mock_best_available_to_back = mock.Mock()
mock_best_available_to_back.prices = True
mock_best_display_available_to_back = mock.Mock()
mock_best_display_available_to_back.order_book = True
self.runner_book.available_to_back = mock_available_to_back
assert (
self.runner_book.serialise_available_to_back()
== mock_available_to_back.serialised
)
mock_available_to_back.order_book = False
self.runner_book.best_available_to_back = mock_best_available_to_back
assert (
self.runner_book.serialise_available_to_back()
== mock_best_available_to_back.serialised
)
mock_best_available_to_back.order_book = False
self.runner_book.best_display_available_to_back = (
mock_best_display_available_to_back
)
assert (
self.runner_book.serialise_available_to_back()
== mock_best_display_available_to_back.serialised
)
def test_serialise_lay(self):
mock_available_to_lay = mock.Mock()
mock_available_to_lay.order_book = True
mock_best_available_to_lay = mock.Mock()
mock_best_available_to_lay.prices = True
mock_best_display_available_to_lay = mock.Mock()
mock_best_display_available_to_lay.order_book = True
self.runner_book.available_to_lay = mock_available_to_lay
assert (
self.runner_book.serialise_available_to_lay()
== mock_available_to_lay.serialised
)
mock_available_to_lay.order_book = False
self.runner_book.best_available_to_lay = mock_best_available_to_lay
assert (
self.runner_book.serialise_available_to_lay()
== mock_best_available_to_lay.serialised
)
mock_best_available_to_lay.order_book = False
self.runner_book.best_display_available_to_lay = (
mock_best_display_available_to_lay
)
assert (
self.runner_book.serialise_available_to_lay()
== mock_best_display_available_to_lay.serialised
)
def test_serialise(self):
self.runner_book._definition_status = "ACTIVE"
self.runner_book._definition_bsp = 12
self.runner_book._definition_adjustment_factor = 23.1
self.runner_book.serialise()
self.assertEqual(
self.runner_book.serialised,
{
"adjustmentFactor": 23.1,
"ex": {"availableToBack": [], "availableToLay": [], "tradedVolume": []},
"handicap": 0,
"lastPriceTraded": None,
"removalDate": None,
"selectionId": 123,
"sp": {
"actualSP": 12,
"backStakeTaken": [],
"farPrice": None,
"layLiabilityTaken": [],
"nearPrice": None,
},
"status": "ACTIVE",
"totalMatched": 0,
},
)
def test_empty_serialise(self):
self.runner_book.serialise()
ex = self.runner_book.serialised["ex"]
# all empty lists
assert all(not ex[a] for a in ex.keys())
sp = self.runner_book.serialised["sp"]
# all 'None' or empty lists
assert all(not sp[a] for a in sp.keys())
@mock.patch("betfairlightweight.streaming.cache.RunnerBook")
def test_serialise_resource(self, mock_runner_book):
self.runner_book.lightweight = False
self.runner_book.serialise()
mock_runner_book.assert_called_with(**self.runner_book.serialised)
self.assertEqual(self.runner_book.resource, mock_runner_book())
class TestOrderBookCache(unittest.TestCase):
def setUp(self):
self.order_book_cache = OrderBookCache("1.123", 123, True)
self.runner = mock.Mock()
self.runner.selection_id = 10895629
self.runner.handicap = 0
self.runner.serialise_orders = mock.Mock(return_value=[])
self.runner.unmatched_orders = [1]
self.order_book_cache.runners = {(10895629, 0): self.runner}
def test_init(self):
self.assertTrue(self.order_book_cache.active)
self.assertEqual(self.order_book_cache.market_id, "1.123")
self.assertEqual(self.order_book_cache.publish_time, 123)
self.assertTrue(self.order_book_cache.lightweight)
def test_full_image(self):
self.order_book_cache.runners = {}
mock_response = create_mock_json(
"tests/resources/streaming_ocm_FULL_IMAGE.json"
)
for order_book in mock_response.json().get("oc"):
self.order_book_cache.update_cache(order_book, 1234)
self.assertEqual(self.order_book_cache.streaming_update, order_book)
self.assertEqual(len(self.order_book_cache.runners), 5)
for k, v in self.order_book_cache.runners.items():
self.assertEqual(len(v.unmatched_orders), 1)
def test_update_cache(self):
mock_response = create_mock_json("tests/resources/streaming_ocm_UPDATE.json")
for order_book in mock_response.json().get("oc"):
self.order_book_cache.update_cache(order_book, 1234)
self.assertEqual(self.order_book_cache.streaming_update, order_book)
for order_changes in order_book.get("orc"):
# self.runner.matched_lays.update.assert_called_with(order_changes.get('ml', []))
# self.runner.matched_backs.update.assert_called_with(order_book.get('mb', []))
self.runner.update_unmatched.assert_called_with(
1234, order_changes.get("uo", [])
)
@mock.patch("betfairlightweight.streaming.cache.OrderBookRunner")
def test_update_cache_new(self, mock_order_book_runner):
self.order_book_cache.runners = {(108956, 0): self.runner}
mock_response = create_mock_json("tests/resources/streaming_ocm_UPDATE.json")
for order_book in mock_response.json().get("oc"):
self.order_book_cache.update_cache(order_book, 1234)
self.assertEqual(self.order_book_cache.streaming_update, order_book)
for order_changes in order_book.get("orc"):
mock_order_book_runner.assert_called_with(
self.order_book_cache.market_id, 1234, **order_changes
)
def test_update_cache_closed(self):
mock_response = create_mock_json("tests/resources/streaming_ocm_SUB_IMAGE.json")
for order_book in mock_response.json().get("oc"):
self.order_book_cache.update_cache(order_book, 1234)
self.assertEqual(self.order_book_cache.streaming_update, order_book)
self.assertTrue(self.order_book_cache.closed)
@mock.patch(
"betfairlightweight.streaming.cache.OrderBookCache.serialise",
return_value={},
)
@mock.patch("betfairlightweight.streaming.cache.CurrentOrders")
def test_create_resource(self, mock_current_orders, mock_serialise):
# lightweight
current_orders = self.order_book_cache.create_resource(123, True)
assert current_orders == mock_serialise()
assert current_orders == {
"streaming_unique_id": 123,
"streaming_snap": True,
}
# not lightweight
self.order_book_cache.lightweight = False
current_orders = self.order_book_cache.create_resource(123, True)
assert current_orders == mock_current_orders()
def test_serialise(self):
mock_runner_one = mock.Mock()
mock_runner_one.serialise_orders.return_value = [1]
mock_runner_one.serialise_matches.return_value = 6
mock_runner_two = mock.Mock()
mock_runner_two.serialise_orders.return_value = [2, 3]
mock_runner_two.serialise_matches.return_value = 4
self.order_book_cache.runners = {
(123, 0): mock_runner_one,
(123, 1): mock_runner_two,
}
self.assertEqual(
self.order_book_cache.serialise(None),
{
"currentOrders": [1, 2, 3],
"matches": [6, 4],
"moreAvailable": False,
"streaming_update": None,
},
)
class TestOrderBookRunner(unittest.TestCase):
def setUp(self):
uo = [
{
"id": 1,
"p": "a",
"s": "a",
"side": "L",
"ot": "L",
"pd": "a",
"sm": "a",
"sr": "a",
"sl": "a",
"sc": "a",
"sv": "a",
"rfo": "a",
"rfs": "a",
"status": "E",
},
{
"id": 2,
"p": "b",
"s": "a",
"side": "L",
"ot": "L",
"pd": "a",
"sm": "a",
"sr": "a",
"sl": "a",
"sc": "a",
"sv": "a",
"rfo": "a",
"rfs": "a",
"status": "EC",
},
]
self.order_book_runner = OrderBookRunner(
"1.123", 123, **{"id": 1, "ml": [], "mb": [], "uo": uo}
)
def test_init(self):
self.assertEqual(self.order_book_runner.market_id, "1.123")
self.assertEqual(self.order_book_runner.selection_id, 1)
self.assertEqual(self.order_book_runner.handicap, 0)
self.assertIsNone(self.order_book_runner.full_image)
self.assertIsNone(self.order_book_runner.strategy_matches)
self.assertEqual(len(self.order_book_runner.unmatched_orders), 2)
def test_update_unmatched(self):
unmatched_orders = [
{
"id": 2,
"p": "b",
"s": "a",
"side": "L",
"ot": "L",
"pd": "a",
"sm": "a",
"sr": "a",
"sl": "a",
"sc": "a",
"sv": "a",
"rfo": "a",
"rfs": "a",
"status": "EC",
}
]
self.order_book_runner.update_unmatched(123, unmatched_orders)
self.assertEqual(self.order_book_runner.unmatched_orders[1].publish_time, 123)
self.assertEqual(self.order_book_runner.unmatched_orders[1].status, "E")
self.assertEqual(self.order_book_runner.unmatched_orders[2].status, "EC")
self.assertEqual(
self.order_book_runner.unmatched_orders[2].serialised,
{
"averagePriceMatched": 0.0,
"betId": 2,
"bspLiability": None,
"cancelledDate": None,
"customerOrderRef": "a",
"customerStrategyRef": "a",
"handicap": 0,
"lapseStatusReasonCode": None,
"lapsedDate": None,
"marketId": "1.123",
"matchedDate": None,
"orderType": "LIMIT",
"persistenceType": None,
"placedDate": None,
"priceSize": {"price": "b", "size": "a"},
"regulatorAuthCode": None,
"regulatorCode": None,
"selectionId": 1,
"side": "LAY",
"sizeCancelled": "a",
"sizeLapsed": "a",
"sizeMatched": "a",
"sizeRemaining": "a",
"sizeVoided": "a",
"status": "EXECUTION_COMPLETE",
},
)
def test_serialise_orders(self):
mock_order = mock.Mock(id=123, publish_time=123)
mock_order_two = mock.Mock(id=456, publish_time=456)
unmatched_orders = {
mock_order.id: mock_order,
mock_order_two.id: mock_order_two,
}
self.order_book_runner.unmatched_orders = unmatched_orders
def mock_serialise(*args, **kwargs):
unmatched_orders[789] = "SYM"
return
mock_order_two.serialise = mock_serialise
assert len(self.order_book_runner.serialise_orders(None)), 2
assert len(self.order_book_runner.serialise_orders(123)), 1
def test_serialise_matches(self):
self.assertEqual(
self.order_book_runner.serialise_matches(),
{"matchedBacks": [], "matchedLays": [], "selectionId": 1},
)
class TestUnmatchedOrder(unittest.TestCase):
def setUp(self):
order = {
"id": 1,
"p": 2,
"s": 3,
"side": "L",
"status": "E",
"pt": "L",
"ot": "L",
"pd": 8,
"sm": 9,
"sr": 10,
"sl": 11,
"sc": 12,
"sv": 13,
"rfo": 14,
"rfs": 15,
"ld": 16,
"lsrc": 17,
"error": "test",
"md": 4,
"cd": 18,
}
self.unmatched_order = UnmatchedOrder(123, **order)
def test_init(self):
assert self.unmatched_order.publish_time == 123
assert self.unmatched_order.bet_id == 1
assert self.unmatched_order.price == 2
assert self.unmatched_order.size == 3
assert self.unmatched_order.side == "L"
assert self.unmatched_order.status == "E"
assert self.unmatched_order.persistence_type == "L"
assert self.unmatched_order.order_type == "L"
assert self.unmatched_order.placed_date == BaseResource.strip_datetime(8)
assert self.unmatched_order.size_matched == 9
assert self.unmatched_order.size_remaining == 10
assert self.unmatched_order.size_lapsed == 11
assert self.unmatched_order.size_cancelled == 12
assert self.unmatched_order.size_voided == 13
assert self.unmatched_order.reference_order == 14
assert self.unmatched_order.reference_strategy == 15
assert self.unmatched_order.lapsed_date == BaseResource.strip_datetime(16)
assert self.unmatched_order.lapse_status_reason_code == 17
assert self.unmatched_order.cancelled_date == BaseResource.strip_datetime(18)
assert self.unmatched_order.serialised == {}
def test_serialise(self):
self.unmatched_order.serialise("1.23", 12345, 0)
self.assertEqual(
self.unmatched_order.serialised,
{
"sizeLapsed": 11,
"persistenceType": "LAPSE",
"sizeRemaining": 10,
"placedDate": "1970-01-01T00:00:00.008000Z",
"sizeVoided": 13,
"sizeCancelled": 12,
"betId": 1,
"customerOrderRef": 14,
"orderType": "LIMIT",
"marketId": "1.23",
"side": "LAY",
"selectionId": 12345,
"bspLiability": None,
"sizeMatched": 9,
"handicap": 0.0,
"averagePriceMatched": 0.0,
"status": "EXECUTABLE",
"customerStrategyRef": 15,
"regulatorCode": None,
"regulatorAuthCode": None,
"priceSize": {"price": 2, "size": 3},
"matchedDate": "1970-01-01T00:00:00.004000Z",
"lapsedDate": "1970-01-01T00:00:00.016000Z",
"lapseStatusReasonCode": 17,
"cancelledDate": "1970-01-01T00:00:00.018000Z",
},
)
class TestRaceCache(unittest.TestCase):
def setUp(self):
self.market_id = "1.12"
self.publish_time = 123
self.race_id = "456"
self.race_cache = RaceCache(
self.market_id, self.publish_time, self.race_id, True
)
def test_init(self):
self.assertTrue(self.race_cache.active)
self.assertEqual(self.race_cache.market_id, self.market_id)
self.assertEqual(self.race_cache.publish_time, self.publish_time)
self.assertEqual(self.race_cache.race_id, self.race_id)
self.assertTrue(self.race_cache.lightweight)
self.assertIsNone(self.race_cache.rpc)
self.assertEqual(self.race_cache.rrc, {})
self.assertIsNone(self.race_cache.streaming_update)
def test_update_rpm(self):
update = {"rpc": 1234}
publish_time = 1518626764
self.race_cache.update_cache(update, publish_time)
assert self.race_cache._datetime_updated is not None
assert self.race_cache.publish_time == publish_time
assert self.race_cache.rpc == 1234
def test_update_rrc(self):
update = {"rrc": [{"id": 1}]}
publish_time = 1518626764
self.race_cache.update_cache(update, publish_time)
assert self.race_cache._datetime_updated is not None
assert self.race_cache.publish_time == publish_time
assert self.race_cache.rrc == {1: {"id": 1}}
@mock.patch("betfairlightweight.streaming.cache.RaceCache.serialise")
def test_create_resource_lightweight(self, mock_serialise):
assert self.race_cache.create_resource(12, True) == mock_serialise
# @mock.patch('betfairlightweight.streaming.cache.Race')
# @mock.patch('betfairlightweight.streaming.cache.RaceCache.serialise')
# def test_create_resource(self, mock_serialise, mock_race):
# # print(self.race_cache.create_resource(12, {}, False))
# self.assertIsInstance(self.race_cache.create_resource(12, {}, False), mock_race)
def test_serialise(self):
self.race_cache.rpc = {"test": 123}
self.race_cache.rrc = {1: {"test": "me"}}
self.race_cache.publish_time = 12
assert self.race_cache.serialise == {
"pt": 12,
"mid": self.market_id,
"id": self.race_id,
"rpc": {"test": 123},
"rrc": [{"test": "me"}],
"streaming_update": None,
}
|
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from api.models import Photo, AlbumAuto, AlbumUser, Face, Person, AlbumDate, AlbumPlace, AlbumThing, LongRunningJob, get_or_create_person
from django.db.models import Count
from django.db.models import Q
from django.db.models import Prefetch
from django.http import HttpResponse
from django.http import HttpResponseForbidden
from rest_framework import viewsets
from api.serializers import PhotoSerializer
from api.serializers import PhotoEditSerializer
from api.serializers import PhotoHashListSerializer
from api.serializers import PhotoSuperSimpleSerializer
from api.serializers import PhotoSimpleSerializer
from api.serializers import FaceSerializer
from api.serializers import FaceListSerializer
from api.serializers import PersonSerializer
from api.serializers import AlbumAutoSerializer
from api.serializers import AlbumPersonSerializer
from api.serializers import AlbumDateSerializer
from api.serializers import AlbumThingSerializer
from api.serializers import AlbumPlaceSerializer
from api.serializers import AlbumUserSerializer
from api.serializers import AlbumUserEditSerializer
from api.serializers import AlbumAutoListSerializer
from api.serializers import AlbumPersonListSerializer
from api.serializers import AlbumDateListSerializer
from api.serializers import AlbumDateListWithPhotoHashSerializer
from api.serializers import AlbumThingListSerializer
from api.serializers import AlbumPlaceListSerializer
from api.serializers import AlbumUserListSerializer
from api.serializers import LongRunningJobSerializer
from api.serializers_serpy import AlbumDateListWithPhotoHashSerializer as AlbumDateListWithPhotoHashSerializerSerpy
from api.serializers_serpy import PhotoSuperSimpleSerializer as PhotoSuperSimpleSerializerSerpy
from api.face_classify import train_faces, cluster_faces
from api.social_graph import build_social_graph, build_ego_graph
from api.autoalbum import generate_event_albums
from api.drf_optimize import OptimizeRelatedModelViewSetMetaclass
from django.utils import six
from api.api_util import \
get_count_stats, \
get_location_clusters, \
get_photo_country_counts, \
get_photo_month_counts, \
get_searchterms_wordcloud, \
get_search_term_examples, \
get_location_sunburst, \
get_location_timeline
from api.directory_watcher import scan_photos, long_running_job
from api.autoalbum import generate_event_albums, regenerate_event_titles
from api.flags import is_photos_being_added
from api.flags import is_auto_albums_being_processed
from rest_framework.pagination import PageNumberPagination
from rest_framework import filters
import random
import numpy as np
import base64
import datetime
from django.core.cache import cache
from django.utils.encoding import force_text
from rest_framework_extensions.cache.mixins import CacheResponseMixin
from rest_framework_extensions.cache.decorators import cache_response
from rest_framework_extensions.key_constructor.constructors import (
DefaultKeyConstructor
)
from rest_framework_extensions.key_constructor.bits import (
KeyBitBase,
RetrieveSqlQueryKeyBit,
ListSqlQueryKeyBit,
PaginationKeyBit
)
import ipdb
from tqdm import tqdm
import time
from django_rq import job
import django_rq
# CACHE_TTL = 60 * 60 * 24 # 1 day
CACHE_TTL = 60*60*24*30 # 1 month
CACHE_TTL = 60*60*24# 1 day
CACHE_TTL_VIZ = 60*60 # 1 hour
#caching stuff straight out of https://chibisov.github.io/drf-extensions/docs/#caching
class UpdatedAtKeyBit(KeyBitBase):
def get_data(self, **kwargs):
key = 'api_updated_at_timestamp'
value = cache.get(key, None)
if not value:
value = datetime.datetime.utcnow()
cache.set(key, value=value)
print('key not found, key: %s, value: %s'%(key,value))
else:
print('key found, key: %s, value: %s'%(key,value))
return force_text(value)
class CustomObjectKeyConstructor(DefaultKeyConstructor):
retrieve_sql = RetrieveSqlQueryKeyBit()
updated_at = UpdatedAtKeyBit()
class CustomListKeyConstructor(DefaultKeyConstructor):
list_sql = ListSqlQueryKeyBit()
pagination = PaginationKeyBit()
updated_at = UpdatedAtKeyBit()
class HugeResultsSetPagination(PageNumberPagination):
page_size = 50000
page_size_query_param = 'page_size'
max_page_size = 100000
class StandardResultsSetPagination(PageNumberPagination):
page_size = 10000
page_size_query_param = 'page_size'
max_page_size = 100000
class SmallResultsSetPagination(PageNumberPagination):
page_size = 100
page_size_query_param = 'page_size'
max_page_size = 200
def get_current_job():
job_detail = None
running_job = LongRunningJob.objects.filter(finished=False).order_by('-started_at').first()
if running_job:
job_detail = LongRunningJobSerializer(running_job).data
return job_detail
def queue_can_accept_job():
default_queue_stat = [q for q in django_rq.utils.get_statistics()['queues'] if q['name']=='default'][0]
started_jobs = default_queue_stat['started_jobs']
runninb_jobs = default_queue_stat['jobs']
if started_jobs + runninb_jobs > 0:
return False
else:
return True
# Create your views here.
# @six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class PhotoViewSet(viewsets.ModelViewSet):
queryset = Photo.objects.all().order_by('-exif_timestamp')
serializer_class = PhotoSerializer
pagination_class = HugeResultsSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = (['search_captions','search_location','faces__person__name','exif_timestamp','image_path'])
# search_fields = (['faces__person__name','faces__person__name'])
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(PhotoViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(PhotoViewSet, self).list(*args, **kwargs)
# @six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class PhotoEditViewSet(viewsets.ModelViewSet):
queryset = Photo.objects.all()
serializer_class = PhotoEditSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(PhotoEditViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(PhotoEditViewSet, self).list(*args, **kwargs)
class PhotoHashListViewSet(viewsets.ModelViewSet):
queryset = Photo.objects.all().order_by('-exif_timestamp')
serializer_class = PhotoHashListSerializer
pagination_class = HugeResultsSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = (['search_captions','search_location','faces__person__name','exif_timestamp','image_path'])
# search_fields = (['faces__person__name','faces__person__name'])
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(PhotoHashListViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(PhotoHashListViewSet, self).list(*args, **kwargs)
class PhotoSimpleListViewSet(viewsets.ModelViewSet):
queryset = Photo.objects.all().order_by('-exif_timestamp')
serializer_class = PhotoSimpleSerializer
pagination_class = HugeResultsSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = (['search_captions','search_location','faces__person__name','exif_timestamp','image_path'])
# search_fields = (['faces__person__name','faces__person__name'])
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(PhotoSimpleListViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(PhotoSimpleListViewSet, self).list(*args, **kwargs)
class PhotoSuperSimpleSearchListViewSet(viewsets.ModelViewSet):
queryset = Photo.objects.all().order_by('-exif_timestamp')
serializer_class = PhotoSuperSimpleSerializer
pagination_class = HugeResultsSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = (['search_captions','search_location','faces__person__name','exif_timestamp','image_path'])
# search_fields = (['faces__person__name','faces__person__name'])
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(PhotoSuperSimpleSearchListViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
# def list(self,request):
# queryset = Photo.objects.raw("SELECT image_hash, favorited, hidden, exif_timestamp from api_photo order by exif_timestamp desc")
# serializer = PhotoSuperSimpleSerializer(queryset,many=True)
# return Response({'results':serializer.data})
return super(PhotoSuperSimpleSearchListViewSet, self).list(*args, **kwargs)
class PhotoSuperSimpleListViewSet(viewsets.ModelViewSet):
queryset = Photo.objects.all().order_by('-exif_timestamp')
# serializer_class = PhotoSuperSimpleSerializer
serializer_class = PhotoSuperSimpleSerializerSerpy
pagination_class = HugeResultsSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = (['search_captions','search_location','faces__person__name','exif_timestamp','image_path'])
# search_fields = (['faces__person__name','faces__person__name'])
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(PhotoSuperSimpleListViewSet, self).retrieve(*args, **kwargs)
# def list(self, *args, **kwargs):
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self,request):
# queryset = Photo.objects.raw("SELECT image_hash, favorited, hidden, exif_timestamp from api_photo order by exif_timestamp desc")
queryset = Photo.objects.all().only('image_hash','exif_timestamp','favorited','hidden').order_by('exif_timestamp')
serializer = PhotoSuperSimpleSerializer(queryset,many=True)
return Response({'results':serializer.data})
# return super(PhotoSuperSimpleListViewSet, self).list(*args, **kwargs)
class FavoritePhotoListViewset(viewsets.ModelViewSet):
queryset = Photo.objects.filter(favorited=True).only('image_hash','exif_timestamp','favorited','hidden').order_by('-exif_timestamp')
serializer_class = PhotoSuperSimpleSerializerSerpy
pagination_class = HugeResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(FavoritePhotoListViewset, self).retrieve(*args, **kwargs)
# def list(self, *args, **kwargs):
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self,request):
# queryset = Photo.objects.raw("SELECT image_hash, favorited, hidden, exif_timestamp from api_photo order by exif_timestamp desc")
queryset = Photo.objects.filter(favorited=True).only('image_hash','exif_timestamp','favorited','hidden').order_by('exif_timestamp')
serializer = PhotoSuperSimpleSerializer(queryset,many=True)
return Response({'results':serializer.data})
# return super(PhotoSuperSimpleListViewSet, self).list(*args, **kwargs)
class HiddenPhotoListViewset(viewsets.ModelViewSet):
queryset = Photo.objects.filter(hidden=True).only('image_hash','exif_timestamp','favorited','hidden').order_by('-exif_timestamp')
serializer_class = PhotoSuperSimpleSerializerSerpy
pagination_class = HugeResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(HiddenPhotoListViewset, self).retrieve(*args, **kwargs)
# def list(self, *args, **kwargs):
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self,request):
# queryset = Photo.objects.raw("SELECT image_hash, favorited, hidden, exif_timestamp from api_photo order by exif_timestamp desc")
queryset = Photo.objects.filter(hidden=True).only('image_hash','exif_timestamp','favorited','hidden').order_by('exif_timestamp')
serializer = PhotoSuperSimpleSerializer(queryset,many=True)
return Response({'results':serializer.data})
# return super(PhotoSuperSimpleListViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class NoTimestampPhotoHashListViewSet(viewsets.ModelViewSet):
queryset = Photo.objects.filter(exif_timestamp=None).order_by('image_path')
serializer_class = PhotoSuperSimpleSerializerSerpy
pagination_class = HugeResultsSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = (['search_captions','search_location','faces__person__name'])
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(NoTimestampPhotoHashListViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(NoTimestampPhotoHashListViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class FaceListViewSet(viewsets.ModelViewSet):
queryset = Face.objects.all().select_related('person').order_by('id')
serializer_class = FaceListSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(FaceListViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(FaceListViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class FaceInferredListViewSet(viewsets.ModelViewSet):
queryset = Face.objects.filter(person_label_is_inferred=True).select_related('person').order_by('id')
serializer_class = FaceListSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(FaceInferredListViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(FaceInferredListViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class FaceLabeledListViewSet(viewsets.ModelViewSet):
queryset = Face.objects.filter(Q(person_label_is_inferred=False) | Q(person__name='unknown')).select_related('person').order_by('id')
serializer_class = FaceListSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(FaceLabeledListViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(FaceLabeledListViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class FaceViewSet(viewsets.ModelViewSet):
queryset = Face.objects.all().prefetch_related('person').order_by('id')
serializer_class = FaceSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(FaceViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(FaceViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class FaceInferredViewSet(viewsets.ModelViewSet):
queryset = Face.objects.filter(person_label_is_inferred=True).order_by('id')
serializer_class = FaceSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(FaceInferredViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(FaceInferredViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class FaceLabeledViewSet(viewsets.ModelViewSet):
queryset = Face.objects.filter(person_label_is_inferred=False).order_by('id')
serializer_class = FaceSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(FaceLabeledViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(FaceLabeledViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class PersonViewSet(viewsets.ModelViewSet):
queryset = Person.objects.all().order_by('name')
serializer_class = PersonSerializer
pagination_class = StandardResultsSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = (['name'])
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(PersonViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(PersonViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class AlbumAutoViewSet(viewsets.ModelViewSet):
queryset = AlbumAuto.objects.all().order_by('-timestamp')
serializer_class = AlbumAutoSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(AlbumAutoViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(AlbumAutoViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class AlbumAutoListViewSet(viewsets.ModelViewSet):
queryset = AlbumAuto.objects.all().prefetch_related('photos').order_by('-timestamp')
serializer_class = AlbumAutoListSerializer
pagination_class = StandardResultsSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = (['photos__search_captions','photos__search_location','photos__faces__person__name'])
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(AlbumAutoListViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(AlbumAutoListViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class AlbumPersonViewSet(viewsets.ModelViewSet):
# queryset = Person.objects.all().prefetch_related('faces__photo').order_by('name')
queryset = Person.objects.all().prefetch_related(Prefetch('faces__photo', queryset=Photo.objects.all().order_by('-exif_timestamp').only('image_hash','exif_timestamp','favorited','hidden'))).order_by('name')
serializer_class = AlbumPersonSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(AlbumPersonViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(AlbumPersonViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class AlbumPersonListViewSet(viewsets.ModelViewSet):
queryset = Person.objects.all().order_by('name')
serializer_class = AlbumPersonListSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(AlbumPersonListViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(AlbumPersonListViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class AlbumDateViewSet(viewsets.ModelViewSet):
queryset = AlbumDate.objects.all().order_by('-date')
serializer_class = AlbumDateSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(AlbumDateViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(AlbumDateViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class AlbumDateListViewSet(viewsets.ModelViewSet):
queryset = AlbumDate.objects.all().order_by('-date')
serializer_class = AlbumDateListSerializer
pagination_class = StandardResultsSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = (['photos__search_captions','photos__search_location','photos__faces__person__name'])
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(AlbumDateListViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(AlbumDateListViewSet, self).list(*args, **kwargs)
# @six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class AlbumDateListWithPhotoHashViewSet(viewsets.ReadOnlyModelViewSet):
# queryset = AlbumDate.objects.exclude(date=None).prefetch_related('photos').order_by('-date')
queryset = AlbumDate.objects.all().exclude(date=None).order_by('-date').prefetch_related(
Prefetch('photos', queryset=Photo.objects.all().order_by('-exif_timestamp').only('image_hash','exif_timestamp','favorited','hidden')))
# serializer_class = AlbumDateListWithPhotoHashSerializer
serializer_class = AlbumDateListWithPhotoHashSerializerSerpy
pagination_class = StandardResultsSetPagination
filter_backends = (filters.SearchFilter,)
ordering_fields = ('photos__exif_timestamp',)
search_fields = (['photos__search_captions','photos__search_location','photos__faces__person__name'])
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(AlbumDateListWithPhotoHashViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
# ipdb.set_trace()
return super(AlbumDateListWithPhotoHashViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class AlbumThingViewSet(viewsets.ModelViewSet):
queryset = AlbumThing.objects.all().order_by('title')
serializer_class = AlbumThingSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(AlbumThingViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(AlbumThingViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class AlbumThingListViewSet(viewsets.ModelViewSet):
# max_photo_count = AlbumThing.objects.annotate(photo_count=Count('photos')).order_by('-photo_count').first().photos.count()
# photo_count_thres = int(0.6 * max_photo_count)
# queryset = AlbumThing.objects.annotate(photo_count=Count('photos')).filter(photo_count__gte=3).order_by('-photo_count')[:400]
queryset = AlbumThing.objects.all() \
.annotate(photo_count=Count('photos')) \
.order_by('-title') \
.prefetch_related(
Prefetch(
'cover_photos',
queryset=Photo.objects.all() \
.only('image_hash')))
serializer_class = AlbumThingListSerializer
pagination_class = StandardResultsSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = (['title'])
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(AlbumThingListViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(AlbumThingListViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class AlbumPlaceViewSet(viewsets.ModelViewSet):
queryset = AlbumPlace.objects.all().order_by('title')
serializer_class = AlbumPlaceSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(AlbumPlaceViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(AlbumPlaceViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class AlbumPlaceListViewSet(viewsets.ModelViewSet):
# queryset = AlbumPlace.objects.annotate(photo_count=Count('photos')).filter(photo_count__gte=3).order_by('-photo_count')[:400]
# queryset = AlbumPlace.objects.all()
queryset = AlbumPlace.objects.all() \
.annotate(photo_count=Count('photos')) \
.order_by('-title') \
.prefetch_related(
Prefetch(
'cover_photos',
queryset=Photo.objects.all() \
.only('image_hash')))
serializer_class = AlbumPlaceListSerializer
pagination_class = StandardResultsSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = (['title'])
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(AlbumPlaceListViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(AlbumPlaceListViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class AlbumUserEditViewSet(viewsets.ModelViewSet):
queryset = AlbumUser.objects.all().order_by('title')
serializer_class = AlbumUserEditSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(AlbumUserEditViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(AlbumUserEditViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class AlbumUserViewSet(viewsets.ModelViewSet):
queryset = AlbumUser.objects.all().order_by('title')
serializer_class = AlbumUserSerializer
pagination_class = StandardResultsSetPagination
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(AlbumUserViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(AlbumUserViewSet, self).list(*args, **kwargs)
@six.add_metaclass(OptimizeRelatedModelViewSetMetaclass)
class AlbumUserListViewSet(viewsets.ModelViewSet):
queryset = AlbumUser.objects.all() \
.annotate(photo_count=Count('photos')) \
.order_by('-created_on') \
.prefetch_related(
Prefetch(
'cover_photos',
queryset=Photo.objects.all() \
.only('image_hash')))
serializer_class = AlbumUserListSerializer
pagination_class = StandardResultsSetPagination
filter_backends = (filters.SearchFilter,)
search_fields = (['title'])
@cache_response(CACHE_TTL,key_func=CustomObjectKeyConstructor())
def retrieve(self, *args, **kwargs):
return super(AlbumUserListViewSet, self).retrieve(*args, **kwargs)
@cache_response(CACHE_TTL,key_func=CustomListKeyConstructor())
def list(self, *args, **kwargs):
return super(AlbumUserListViewSet, self).list(*args, **kwargs)
class LongRunningJobViewSet(viewsets.ModelViewSet):
queryset = LongRunningJob.objects.all().order_by('-started_at')
serializer_class = LongRunningJobSerializer
# API Views
# Views that do things I don't know how to make serializers do
class SetPhotosFavorite(APIView):
def post(self, request, format=None):
data = dict(request.data)
print(data)
val_favorite = data['favorite']
image_hashes = data['image_hashes']
out = []
for image_hash in image_hashes:
photo = Photo.objects.get(image_hash=image_hash)
photo.favorited = val_favorite
photo.save()
out.append(PhotoSerializer(photo).data)
return Response({'status':True,'results':out})
class SetPhotosHidden(APIView):
def post(self, request, format=None):
data = dict(request.data)
print(data)
val_hidden = data['hidden']
image_hashes = data['image_hashes']
out = []
for image_hash in image_hashes:
photo = Photo.objects.get(image_hash=image_hash)
photo.hidden = val_hidden
photo.save()
out.append(PhotoSerializer(photo).data)
return Response({'status':True,'results':out})
class SetFacePersonLabel(APIView):
def post(self,request,format=None):
data = dict(request.data)
person = get_or_create_person(name=data['person_name'])
faces = Face.objects.in_bulk(data['face_ids'])
out = []
for face in faces.values():
face.person = person
face.person_label_is_inferred = False
face.person_label_probability = 1.
face.save()
out.append(FaceListSerializer(face).data)
return Response({'status':True, 'results':out})
# ipdb.set_trace()
class DeleteFaces(APIView):
def post(self,request,format=None):
data = dict(request.data)
faces = Face.objects.in_bulk(data['face_ids'])
out = []
for face in faces.values():
face.delete()
return Response({'status':True, 'results':data['face_ids']})
# Utility views
class SearchTermExamples(APIView):
# @cache_response(CACHE_TTL_VIZ)
def get(self, request, format=None):
search_term_examples = get_search_term_examples()
return Response({"results":search_term_examples})
class RegenerateAutoAlbumTitles(APIView):
def get(self,request,format=None):
if get_current_job() is None:
# res = scan_photos.delay()
res = regenerate_event_titles.delay()
return Response({'status':True,'job_id':res.id})
else:
return Response({
'status':False,
'message':'there are jobs being run',
'running_jobs':[job for job in django_rq.get_queue().get_job_ids()]})
class FaceToLabelView(APIView):
def get(self, request, format=None):
# return a single face for labeling
qs = Face.objects.filter(person_label_probability__gt=0).filter(person_label_probability__lt=1).order_by('person_label_probability')
if qs.count() > 0:
face_to_label = qs[0]
data = FaceListSerializer(face_to_label).data
# dirty hack to make the serializer image field to return full url
if request.is_secure():
protocol = 'https://'
else:
protocol = 'http://'
image = protocol + request.META['HTTP_HOST'] + data['image']
data['image'] = image
return Response(data)
faces_all = Face.objects.all()
unlabeled_faces = []
labeled_faces = []
for face in faces_all:
if face.person_label_is_inferred is not False:
unlabeled_faces.append(face)
if face.person_label_is_inferred is False:
labeled_faces.append(face)
labeled_face_encodings = []
for face in labeled_faces:
face_encoding = np.frombuffer(bytes.fromhex(face.encoding))
labeled_face_encodings.append(face_encoding)
labeled_face_encodings = np.array(labeled_face_encodings)
labeled_faces_mean = labeled_face_encodings.mean(0)
distances_to_labeled_faces_mean = []
for face in unlabeled_faces:
face_encoding = np.frombuffer(bytes.fromhex(face.encoding))
distance = np.linalg.norm(labeled_faces_mean-face_encoding)
distances_to_labeled_faces_mean.append(distance)
try:
most_unsure_face_idx = np.argmax(distances_to_labeled_faces_mean)
face_to_label = unlabeled_faces[most_unsure_face_idx]
data = FaceListSerializer(face_to_label).data
# dirty hack to make the serializer image field to return full url
if request.is_secure():
protocol = 'https://'
else:
protocol = 'http://'
image = protocol + request.META['HTTP_HOST'] + data['image']
data['image'] = image
except:
data = {'results':[]}
return Response(data)
class ClusterFaceView(APIView):
# @cache_response(CACHE_TTL_VIZ)
def get(self, request, format=None):
res = cluster_faces()
return Response(res)
class TrainFaceView(APIView):
def get(self, request, format=None):
if get_current_job() is None:
res = train_faces.delay()
return Response({'status':True,'job_id':res.id})
else:
return Response({
'status':False,
'message':'there are jobs being run',
'running_jobs':[job for job in django_rq.get_queue().get_job_ids()]})
class SocialGraphView(APIView):
# @cache_response(CACHE_TTL_VIZ)
def get(self, request, format=None):
res = build_social_graph()
return Response(res)
class EgoGraphView(APIView):
# @cache_response(CACHE_TTL_VIZ)
def get(self, request, format=None):
res = build_ego_graph(request.GET['person_id'])
return Response(res)
class AutoAlbumGenerateView(APIView):
def get(self, request, format=None):
if get_current_job() is None:
# res = scan_photos.delay()
res = generate_event_albums.delay()
return Response({'status':True,'job_id':res.id})
else:
return Response({
'status':False,
'message':'there are jobs being run',
'running_jobs':[job for job in django_rq.get_queue().get_job_ids()]})
return Response(res)
class StatsView(APIView):
def get(self, requests, format=None):
res = get_count_stats()
return Response(res)
class LocationClustersView(APIView):
# @cache_response(CACHE_TTL_VIZ)
def get(self, requests, format=None):
res = get_location_clusters()
return Response(res)
class LocationSunburst(APIView):
# @cache_response(CACHE_TTL_VIZ)
def get(self, requests, format=None):
res = get_location_sunburst()
return Response(res)
class LocationTimeline(APIView):
# @cache_response(CACHE_TTL_VIZ)
def get(self, requests, format=None):
res = get_location_timeline()
return Response(res)
class PhotoMonthCountsView(APIView):
# @cache_response(CACHE_TTL_VIZ)
def get(self, requests, format=None):
res = get_photo_month_counts()
return Response(res)
class PhotoCountryCountsView(APIView):
# @cache_response(CACHE_TTL_VIZ)
def get(self, requests, format=None):
res = get_photo_country_counts()
return Response(res)
class SearchTermWordCloudView(APIView):
# @cache_response(CACHE_TTL_VIZ)
def get(self, requests, format=None):
res = get_searchterms_wordcloud()
return Response(res)
class ScanPhotosView(APIView):
def get(self, requests, format=None):
if get_current_job() is None:
res = scan_photos.delay()
return Response({'status':True,'job_id':res.id})
else:
return Response({
'status':False,
'message':'there are jobs being run',
'running_jobs':[job for job in django_rq.get_queue().get_job_ids()]})
# watchers
class IsPhotosBeingAddedView(APIView):
def get(self, requests, format=None):
res = is_photos_being_added()
return Response(res)
class IsAutoAlbumsBeingProcessed(APIView):
def get(self, requests, format=None):
res = is_auto_albums_being_processed()
return Response(res)
class QueueAvailabilityView(APIView):
def get(self,requests,format=None):
job_detail = None
running_job = LongRunningJob.objects.filter(finished=False).order_by('-started_at').first()
if running_job:
job_detail = LongRunningJobSerializer(running_job).data
return Response({
'status':True,
'queue_can_accept_job':job_detail is None,
'job_detail':job_detail})
class RQJobStatView(APIView):
def get(self,requests,format=None):
# ipdb.set_trace()
job_id = requests.query_params['job_id']
# job_id = '1667f947-bf8c-4ca8-a1cc-f16c7f3615de'
is_job_finished = django_rq.get_queue().fetch_job(job_id).is_finished
return Response({'status':True,'finished':is_job_finished})
def media_access(request, path):
# ipdb.set_trace()
"""
When trying to access :
myproject.com/media/uploads/passport.png
If access is authorized, the request will be redirected to
myproject.com/protected/media/uploads/passport.png
This special URL will be handle by nginx we the help of X-Accel
"""
access_granted = False
user = request.user
print('AUTHORIZATION' in request.META.keys())
if user.is_authenticated:
if user.is_staff:
# If admin, everything is granted
access_granted = True
else:
# For simple user, only their documents can be accessed
user_documents = [
user.identity_document,
# add here more allowed documents
]
for doc in user_documents:
if path == doc.name:
access_granted = True
if access_granted:
response = HttpResponse()
# Content-type will be detected by nginx
del response['Content-Type']
response['X-Accel-Redirect'] = '/media/' + path
return response
else:
return HttpResponseForbidden('Not authorized to access this media.')
|
|
#!/usr/bin/env python
import boto
import sys
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import json
import time
import command
import os
#from boto.exception import S3ResponseError
def get_s3_conn(key_id, key_secret, token):
s3 = S3Connection(aws_access_key_id=key_id,
aws_secret_access_key=key_secret,
security_token=token)
return s3
################################################################
# 1. Get s3connection object
# 2. Get the bucket from the connection
# 3. List the keys and inormation under the bucket for keys matching provided prefix
# Return a list of dicts
################################################################
def upload_s3_keys(s3conn, source, bucket_name, prefix, meta):
start = time.time()
bucket = s3conn.get_bucket(bucket_name, validate=False)
k = Key(bucket)
k.key = prefix
for m in meta:
k.set_metadata(m, meta[m])
k.set_contents_from_filename(source)
k.set_metadata('time', "foo")
return time.time() - start
################################################################
# 1. Get s3connection object
# 2. Get the bucket from the connection
# 3. List the keys and inormation under the bucket for keys matching provided prefix
# Return a list of dicts
################################################################
def fast_upload_s3_keys(s3conn, source, bucket_name, prefix, meta):
cmd = "aws s3 cp --region us-east-1 {0} s3://{1}/{2}".format(source,
bucket_name,
prefix)
# execute_wait(app, cmd, walltime, job_id)
duration = command.execute_wait(None, cmd, None, None)
return duration
def smart_upload_s3_keys(s3conn, source, bucket_name, prefix, meta):
# Use aws s3 cli only if file size is larger than 10 Mb
if os.stat(source).st_size > 10*1024*1024:
print "File size > 1MB. Using aws s3 cli"
duration = fast_upload_s3_keys(s3conn, source, bucket_name, prefix, meta)
else:
print "File size < 1MB. Using upload_s3_keys"
duration = upload_s3_keys(s3conn, source, bucket_name, prefix, meta)
return duration
# Download a key from the bucket
def download_s3_keys(s3conn, bucket_name, prefix, target):
try:
bucket = s3conn.get_bucket(bucket_name, validate=False)
key = bucket.get_key(prefix)
except Exception, e :
print "ERROR: Failed to download data : ", e
raise
print "filename", key
key.get_contents_to_filename(target)
return key
# Download a key from the bucket
def fast_download_s3_keys(creds, bucket_name, prefix, target):
env_vars = "export AWS_ACCESS_KEY_ID={0};export AWS_SECRET_ACCESS_KEY={1};export AWS_SECURITY_TOKEN={2};export AWS_DEFAULT_REGION={3}".format(creds["AccessKeyId"], creds["SecretAccessKey"], creds["SessionToken"], "us-east-1")
cmd = "{3};aws s3 cp --region us-east-1 s3://{1}/{2} {0} ".format(target,
bucket_name,
prefix,
env_vars)
duration = command.execute_wait(None, cmd, None, None)
return duration
# Download a key from the bucket
def smart_download_s3_keys(s3conn, bucket_name, prefix, target, creds):
start = time.time()
try:
bucket = s3conn.get_bucket(bucket_name, validate=False)
key = bucket.get_key(prefix)
if key.size > 10*1024*1024 :
print "File > 10Mb: downloading with s3 cli"
duration = fast_download_s3_keys(creds, bucket_name, prefix, target)
else:
print "File < 10Mb: using get_contents_to_file"
key.get_contents_to_filename(target)
duration = time.time() - start
except boto.exception.S3ResponseError, e :
print "ERROR: Caught S3ResponseError: ", e
return -1
except Exception, e:
print "ERROR: Could not access the bucket"
raise
return duration
def generate_signed_url(s3conn, bucket_name, prefix, duration):
bucket = s3conn.get_bucket(bucket_name, validate=False)
try:
key = bucket.get_key(prefix)
return key.generate_url(duration, method='GET')
except:
return None
def test_uploads(app):
upload_s3_keys(app.config["s3.conn"],
"web_server.log",
"klab-jobs",
"outputs/test/webserver.log",
{"Owner":"Yadu"})
print fast_upload_s3_keys(app.config["s3.conn"],
"web_server.log",
"klab-jobs",
"outputs/test/webserver.log",
{"Owner":"Yadu"})
def list_s3_path(app, bucket_name, prefix):
s3conn = app.config["s3.conn"]
keys = None
try:
bucket = s3conn.get_bucket(bucket_name)
keys = bucket.get_all_keys(prefix=prefix)
except Exception, e:
print "Caught exception with message {1}".format(e, e.error_message)
return keys
def test_list(app, bucket_name, prefix):
print "Bucket : ", bucket_name
try:
bucket = app.config['s3.conn'].get_bucket(bucket_name, validate=False)
keys = bucket.get_all_keys(prefix=prefix)
for key in keys:
print key.name, key._storage_class, key.size
except Exception, e:
print "Caught exception with message {1}".format(e, e.error_message)
#for key in keys:
# print key, key.size, key.last_modified
"""
Update_object is used to update the metadata of an object
to indirectly update it's last-modified/create-date.
This is used to influence the behavior of the life-cycle policies.
"""
def update_object(app, bucket_name, key_name, s3conn=None):
test_list(app, bucket_name, 'uploads/amzn1.account.AEKWXVYINCBBNY5MPRMOYND6CWWA')
try:
if not s3conn:
s3conn = app.config["s3.conn"]
bucket = s3conn.get_bucket(bucket_name, validate=False)
key = bucket.get_key(key_name)
#key.metadata.update({'last_accessed' : str(time.strftime('%Y-%m-%d %H:%M:%S'))})
#nkey = key.copy(key.bucket.name, key.name, key.metadata, preserve_acl=True)
#nkey.metadata = key.metadata
#key.set_metadata('last_accessed', str(time.strftime('%Y-%m-%d %H:%M:%S')) )
key.set_remote_metadata({'last_accessed': str(time.strftime('%Y-%m-%d %H:%M:%S')) },
{},
preserve_acl=True)
except Exception, e :
print "ERROR: Failed to get data/update metadata : ", e
raise
if __name__ == "__main__":
import config_manager as cm
app = cm.load_configs("production.conf")
import sts
import s3_utils as s3
bucket="klab-jobs"
# key="uploads/amzn1.account.AEKWXVYINCBBNY5MPRMOYND6CWWA/LDRD_virtualenv.tar.gz"
key="uploads/amzn1.account.AEKWXVYINCBBNY5MPRMOYND6CWWA/dummy"
#update_object(app, bucket, key)
test_list(app, bucket, key)
test_list(app, 'klabswing-archive', '')
exit(0)
rolestring = 'arn:aws:iam::968994658855:role/wos_read_access' # Left out due to security concerns
if not rolestring :
print "Fill out rolestring to continue tests"
creds = sts.get_temp_creds(rolestring)
s3conn = get_s3_conn( creds["AccessKeyId"],
creds["SecretAccessKey"],
creds["SessionToken"] )
bucket_name = "klab-webofscience"
prefix = 'raw_zipfiles/1976_DSSHPSH.zip'
target = '/tmp/1976_DSSHPSH.zip'
print "Listing items:"
bucket = s3conn.get_bucket(bucket_name)
print "getting keys:"
keys = bucket.get_all_keys(prefix="raw_zipfiles")
for key in keys:
print key , key.size, key.last_modified
exit(0)
#test_uploads(app)
#test_list(app)
#smart_download_s3_keys(s3conn,
# bucket_name,
# prefix,
# target, creds)
#print fast_download_s3_keys(creds, bucket_name, prefix, target)
print download_s3_keys(s3conn, bucket_name, prefix, target)
|
|
import json
import sys
from ..exceptions import JSONRPCInvalidRequestException
from ..jsonrpc2 import (
JSONRPC20Request,
JSONRPC20BatchRequest,
JSONRPC20Response,
JSONRPC20BatchResponse,
)
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestJSONRPC20Request(unittest.TestCase):
""" Test JSONRPC20Request functionality."""
def setUp(self):
self.request_params = {
"method": "add",
"params": [1, 2],
"_id": 1,
}
def test_correct_init(self):
""" Test object is created."""
JSONRPC20Request(**self.request_params)
def test_validation_incorrect_no_parameters(self):
with self.assertRaises(ValueError):
JSONRPC20Request()
def test_method_validation_str(self):
self.request_params.update({"method": "add"})
JSONRPC20Request(**self.request_params)
def test_method_validation_not_str(self):
self.request_params.update({"method": []})
with self.assertRaises(ValueError):
JSONRPC20Request(**self.request_params)
self.request_params.update({"method": {}})
with self.assertRaises(ValueError):
JSONRPC20Request(**self.request_params)
def test_method_validation_str_rpc_prefix(self):
""" Test method SHOULD NOT starts with rpc. """
self.request_params.update({"method": "rpc."})
with self.assertRaises(ValueError):
JSONRPC20Request(**self.request_params)
self.request_params.update({"method": "rpc.test"})
with self.assertRaises(ValueError):
JSONRPC20Request(**self.request_params)
self.request_params.update({"method": "rpccorrect"})
JSONRPC20Request(**self.request_params)
self.request_params.update({"method": "rpc"})
JSONRPC20Request(**self.request_params)
def test_params_validation_list(self):
self.request_params.update({"params": []})
JSONRPC20Request(**self.request_params)
self.request_params.update({"params": [0]})
JSONRPC20Request(**self.request_params)
def test_params_validation_tuple(self):
self.request_params.update({"params": ()})
JSONRPC20Request(**self.request_params)
self.request_params.update({"params": tuple([0])})
JSONRPC20Request(**self.request_params)
def test_params_validation_dict(self):
self.request_params.update({"params": {}})
JSONRPC20Request(**self.request_params)
self.request_params.update({"params": {"a": 0}})
JSONRPC20Request(**self.request_params)
def test_params_validation_none(self):
self.request_params.update({"params": None})
JSONRPC20Request(**self.request_params)
def test_params_validation_incorrect(self):
self.request_params.update({"params": "str"})
with self.assertRaises(ValueError):
JSONRPC20Request(**self.request_params)
def test_request_args(self):
self.assertEqual(JSONRPC20Request("add").args, ())
self.assertEqual(JSONRPC20Request("add", []).args, ())
self.assertEqual(JSONRPC20Request("add", {"a": 1}).args, ())
self.assertEqual(JSONRPC20Request("add", [1, 2]).args, (1, 2))
def test_request_kwargs(self):
self.assertEqual(JSONRPC20Request("add").kwargs, {})
self.assertEqual(JSONRPC20Request("add", [1, 2]).kwargs, {})
self.assertEqual(JSONRPC20Request("add", {}).kwargs, {})
self.assertEqual(JSONRPC20Request("add", {"a": 1}).kwargs, {"a": 1})
def test_id_validation_string(self):
self.request_params.update({"_id": "id"})
JSONRPC20Request(**self.request_params)
def test_id_validation_int(self):
self.request_params.update({"_id": 0})
JSONRPC20Request(**self.request_params)
def test_id_validation_null(self):
self.request_params.update({"_id": "null"})
JSONRPC20Request(**self.request_params)
def test_id_validation_none(self):
self.request_params.update({"_id": None})
JSONRPC20Request(**self.request_params)
def test_id_validation_float(self):
self.request_params.update({"_id": 0.1})
with self.assertRaises(ValueError):
JSONRPC20Request(**self.request_params)
def test_id_validation_incorrect(self):
self.request_params.update({"_id": []})
with self.assertRaises(ValueError):
JSONRPC20Request(**self.request_params)
self.request_params.update({"_id": ()})
with self.assertRaises(ValueError):
JSONRPC20Request(**self.request_params)
def test_data_method_1(self):
r = JSONRPC20Request("add")
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
"id": None,
})
def test_data_method_2(self):
r = JSONRPC20Request(method="add")
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
"id": None,
})
def test_data_method_3(self):
r = JSONRPC20Request("add", None)
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
"id": None,
})
def test_data_params_1(self):
r = JSONRPC20Request("add", params=None, _id=None)
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
"id": None,
})
def test_data_params_2(self):
r = JSONRPC20Request("add", [])
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
"params": [],
"id": None,
})
def test_data_params_3(self):
r = JSONRPC20Request("add", ())
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
"params": [],
"id": None,
})
def test_data_params_4(self):
r = JSONRPC20Request("add", (1, 2))
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
"params": [1, 2],
"id": None,
})
def test_data_params_5(self):
r = JSONRPC20Request("add", {"a": 0})
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
"params": {"a": 0},
"id": None,
})
def test_data_id_1(self):
r = JSONRPC20Request("add", _id="null")
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
"id": "null",
})
def test_data_id_1_notification(self):
r = JSONRPC20Request("add", _id="null", is_notification=True)
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
})
def test_data_id_2(self):
r = JSONRPC20Request("add", _id=None)
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
"id": None,
})
def test_data_id_2_notification(self):
r = JSONRPC20Request("add", _id=None, is_notification=True)
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
})
def test_data_id_3(self):
r = JSONRPC20Request("add", _id="id")
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
"id": "id",
})
def test_data_id_3_notification(self):
r = JSONRPC20Request("add", _id="id", is_notification=True)
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
})
def test_data_id_4(self):
r = JSONRPC20Request("add", _id=0)
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
"id": 0,
})
def test_data_id_4_notification(self):
r = JSONRPC20Request("add", _id=0, is_notification=True)
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"method": "add",
})
def test_is_notification(self):
r = JSONRPC20Request("add")
self.assertFalse(r.is_notification)
r = JSONRPC20Request("add", _id=None)
self.assertFalse(r.is_notification)
r = JSONRPC20Request("add", _id="null")
self.assertFalse(r.is_notification)
r = JSONRPC20Request("add", _id=0)
self.assertFalse(r.is_notification)
r = JSONRPC20Request("add", is_notification=True)
self.assertTrue(r.is_notification)
r = JSONRPC20Request("add", is_notification=True, _id=None)
self.assertTrue(r.is_notification)
self.assertNotIn("id", r.data)
r = JSONRPC20Request("add", is_notification=True, _id=0)
self.assertTrue(r.is_notification)
self.assertNotIn("id", r.data)
def test_set_unset_notification_keep_id(self):
r = JSONRPC20Request("add", is_notification=True, _id=0)
self.assertTrue(r.is_notification)
self.assertFalse("id" in r.data)
r.is_notification = False
self.assertFalse(r.is_notification)
self.assertTrue("id" in r.data)
self.assertEqual(r.data["id"], 0)
def test_serialize_method_1(self):
r = JSONRPC20Request("add")
self.assertTrue({
"jsonrpc": "2.0",
"method": "add",
"id": None,
}, json.loads(r.json))
def test_serialize_method_2(self):
r = JSONRPC20Request(method="add")
self.assertTrue({
"jsonrpc": "2.0",
"method": "add",
"id": None,
}, json.loads(r.json))
def test_serialize_method_3(self):
r = JSONRPC20Request("add", None)
self.assertTrue({
"jsonrpc": "2.0",
"method": "add",
"id": None,
}, json.loads(r.json))
def test_serialize_params_1(self):
r = JSONRPC20Request("add", params=None, _id=None)
self.assertTrue({
"jsonrpc": "2.0",
"method": "add",
"id": None,
}, json.loads(r.json))
def test_serialize_params_2(self):
r = JSONRPC20Request("add", [])
self.assertTrue({
"jsonrpc": "2.0",
"method": "add",
"params": [],
"id": None,
}, json.loads(r.json))
def test_serialize_params_3(self):
r = JSONRPC20Request("add", ())
self.assertTrue({
"jsonrpc": "2.0",
"method": "add",
"params": [],
"id": None,
}, json.loads(r.json))
def test_serialize_params_4(self):
r = JSONRPC20Request("add", (1, 2))
self.assertTrue({
"jsonrpc": "2.0",
"method": "add",
"params": [1, 2],
"id": None,
}, json.loads(r.json))
def test_serialize_params_5(self):
r = JSONRPC20Request("add", {"a": 0})
self.assertTrue({
"jsonrpc": "2.0",
"method": "add",
"params": {"a": 0},
"id": None,
}, json.loads(r.json))
def test_serialize_id_1(self):
r = JSONRPC20Request("add", _id="null")
self.assertTrue({
"jsonrpc": "2.0",
"method": "add",
"id": "null",
}, json.loads(r.json))
def test_serialize_id_2(self):
r = JSONRPC20Request("add", _id=None)
self.assertTrue({
"jsonrpc": "2.0",
"method": "add",
"id": None,
}, json.loads(r.json))
def test_serialize_id_3(self):
r = JSONRPC20Request("add", _id="id")
self.assertTrue({
"jsonrpc": "2.0",
"method": "add",
"id": "id",
}, json.loads(r.json))
def test_serialize_id_4(self):
r = JSONRPC20Request("add", _id=0)
self.assertTrue({
"jsonrpc": "2.0",
"method": "add",
"id": 0,
}, json.loads(r.json))
def test_from_json_request_no_id(self):
str_json = json.dumps({
"method": "add",
"params": [1, 2],
"jsonrpc": "2.0",
})
request = JSONRPC20Request.from_json(str_json)
self.assertTrue(isinstance(request, JSONRPC20Request))
self.assertEqual(request.method, "add")
self.assertEqual(request.params, [1, 2])
self.assertEqual(request._id, None)
self.assertTrue(request.is_notification)
def test_from_json_request_no_params(self):
str_json = json.dumps({
"method": "add",
"jsonrpc": "2.0",
})
request = JSONRPC20Request.from_json(str_json)
self.assertTrue(isinstance(request, JSONRPC20Request))
self.assertEqual(request.method, "add")
self.assertEqual(request.params, None)
self.assertEqual(request._id, None)
self.assertTrue(request.is_notification)
def test_from_json_request_null_id(self):
str_json = json.dumps({
"method": "add",
"jsonrpc": "2.0",
"id": None,
})
request = JSONRPC20Request.from_json(str_json)
self.assertTrue(isinstance(request, JSONRPC20Request))
self.assertEqual(request.method, "add")
self.assertEqual(request.params, None)
self.assertEqual(request._id, None)
self.assertFalse(request.is_notification)
def test_from_json_request(self):
str_json = json.dumps({
"method": "add",
"params": [0, 1],
"jsonrpc": "2.0",
"id": "id",
})
request = JSONRPC20Request.from_json(str_json)
self.assertTrue(isinstance(request, JSONRPC20Request))
self.assertEqual(request.method, "add")
self.assertEqual(request.params, [0, 1])
self.assertEqual(request._id, "id")
self.assertFalse(request.is_notification)
def test_from_json_invalid_request_jsonrpc(self):
str_json = json.dumps({
"method": "add",
})
with self.assertRaises(JSONRPCInvalidRequestException):
JSONRPC20Request.from_json(str_json)
def test_from_json_invalid_request_method(self):
str_json = json.dumps({
"jsonrpc": "2.0",
})
with self.assertRaises(JSONRPCInvalidRequestException):
JSONRPC20Request.from_json(str_json)
def test_from_json_invalid_request_extra_data(self):
str_json = json.dumps({
"jsonrpc": "2.0",
"method": "add",
"is_notification": True,
})
with self.assertRaises(JSONRPCInvalidRequestException):
JSONRPC20Request.from_json(str_json)
def test_data_setter(self):
request = JSONRPC20Request(**self.request_params)
with self.assertRaises(ValueError):
request.data = []
with self.assertRaises(ValueError):
request.data = ""
with self.assertRaises(ValueError):
request.data = None
class TestJSONRPC20BatchRequest(unittest.TestCase):
""" Test JSONRPC20BatchRequest functionality."""
def test_batch_request(self):
request = JSONRPC20BatchRequest(
JSONRPC20Request("devide", {"num": 1, "denom": 2}, _id=1),
JSONRPC20Request("devide", {"num": 3, "denom": 2}, _id=2),
)
self.assertEqual(json.loads(request.json), [
{"method": "devide", "params": {"num": 1, "denom": 2}, "id": 1,
"jsonrpc": "2.0"},
{"method": "devide", "params": {"num": 3, "denom": 2}, "id": 2,
"jsonrpc": "2.0"},
])
def test_from_json_batch(self):
str_json = json.dumps([
{"method": "add", "params": [1, 2], "jsonrpc": "2.0"},
{"method": "mul", "params": [1, 2], "jsonrpc": "2.0"},
])
requests = JSONRPC20BatchRequest.from_json(str_json)
self.assertTrue(isinstance(requests, JSONRPC20BatchRequest))
for r in requests:
self.assertTrue(isinstance(r, JSONRPC20Request))
self.assertTrue(r.method in ["add", "mul"])
self.assertEqual(r.params, [1, 2])
self.assertEqual(r._id, None)
self.assertTrue(r.is_notification)
def test_from_json_batch_one(self):
str_json = json.dumps([
{"method": "add", "params": [1, 2], "jsonrpc": "2.0", "id": None},
])
requests = JSONRPC20Request.from_json(str_json)
self.assertTrue(isinstance(requests, JSONRPC20BatchRequest))
requests = list(requests)
self.assertEqual(len(requests), 1)
r = requests[0]
self.assertTrue(isinstance(r, JSONRPC20Request))
self.assertEqual(r.method, "add")
self.assertEqual(r.params, [1, 2])
self.assertEqual(r._id, None)
self.assertFalse(r.is_notification)
def test_response_iterator(self):
requests = JSONRPC20BatchRequest(
JSONRPC20Request("devide", {"num": 1, "denom": 2}, _id=1),
JSONRPC20Request("devide", {"num": 3, "denom": 2}, _id=2),
)
for request in requests:
self.assertTrue(isinstance(request, JSONRPC20Request))
self.assertEqual(request.method, "devide")
class TestJSONRPC20Response(unittest.TestCase):
""" Test JSONRPC20Response functionality."""
def setUp(self):
self.response_success_params = {
"result": "",
"_id": 1,
}
self.response_error_params = {
"error": {
"code": 1,
"message": "error",
},
"_id": 1,
}
def test_correct_init(self):
""" Test object is created."""
JSONRPC20Response(**self.response_success_params)
def test_validation_incorrect_no_parameters(self):
with self.assertRaises(ValueError):
JSONRPC20Response()
def test_validation_incorrect_result_and_error(self):
response = JSONRPC20Response(error={"code": 1, "message": ""})
with self.assertRaises(ValueError):
response.result = ""
def test_validation_error_correct(self):
JSONRPC20Response(**self.response_error_params)
def test_validation_error_incorrect(self):
self.response_error_params["error"].update({"code": "str"})
with self.assertRaises(ValueError):
JSONRPC20Response(**self.response_error_params)
def test_validation_error_incorrect_no_code(self):
del self.response_error_params["error"]["code"]
with self.assertRaises(ValueError):
JSONRPC20Response(**self.response_error_params)
def test_validation_error_incorrect_no_message(self):
del self.response_error_params["error"]["message"]
with self.assertRaises(ValueError):
JSONRPC20Response(**self.response_error_params)
def test_validation_error_incorrect_message_not_str(self):
self.response_error_params["error"].update({"message": 0})
with self.assertRaises(ValueError):
JSONRPC20Response(**self.response_error_params)
def test_validation_id(self):
response = JSONRPC20Response(**self.response_success_params)
self.assertEqual(response._id, self.response_success_params["_id"])
def test_validation_id_incorrect_type(self):
response = JSONRPC20Response(**self.response_success_params)
with self.assertRaises(ValueError):
response._id = []
with self.assertRaises(ValueError):
response._id = {}
with self.assertRaises(ValueError):
response._id = 0.1
def test_data_result(self):
r = JSONRPC20Response(result="")
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"result": "",
"id": None,
})
def test_data_result_id_none(self):
r = JSONRPC20Response(result="", _id=None)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"result": "",
"id": None,
})
def test_data_result_id(self):
r = JSONRPC20Response(result="", _id=0)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"result": "",
"id": 0,
})
def test_data_error(self):
r = JSONRPC20Response(error={"code": 0, "message": ""})
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"error": {
"code": 0,
"message": "",
},
"id": None,
})
def test_data_error_id_none(self):
r = JSONRPC20Response(error={"code": 0, "message": ""}, _id=None)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"error": {
"code": 0,
"message": "",
},
"id": None,
})
def test_data_error_id(self):
r = JSONRPC20Response(error={"code": 0, "message": ""}, _id=0)
self.assertEqual(json.loads(r.json), r.data)
self.assertEqual(r.data, {
"jsonrpc": "2.0",
"error": {
"code": 0,
"message": "",
},
"id": 0,
})
def test_data_setter(self):
response = JSONRPC20Response(**self.response_success_params)
with self.assertRaises(ValueError):
response.data = []
with self.assertRaises(ValueError):
response.data = ""
with self.assertRaises(ValueError):
response.data = None
class TestJSONRPC20BatchResponse(unittest.TestCase):
""" Test JSONRPC20BatchResponse functionality."""
def test_batch_response(self):
response = JSONRPC20BatchResponse(
JSONRPC20Response(result="result", _id=1),
JSONRPC20Response(error={"code": 0, "message": ""}, _id=2),
)
self.assertEqual(json.loads(response.json), [
{"result": "result", "id": 1, "jsonrpc": "2.0"},
{"error": {"code": 0, "message": ""}, "id": 2, "jsonrpc": "2.0"},
])
def test_response_iterator(self):
responses = JSONRPC20BatchResponse(
JSONRPC20Response(result="result", _id=1),
JSONRPC20Response(result="result", _id=2),
)
for response in responses:
self.assertTrue(isinstance(response, JSONRPC20Response))
self.assertEqual(response.result, "result")
def test_batch_response_data(self):
response = JSONRPC20BatchResponse(
JSONRPC20Response(result="result", _id=1),
JSONRPC20Response(result="result", _id=2),
JSONRPC20Response(result="result"),
)
self.assertEqual(response.data, [
{"id": 1, "jsonrpc": "2.0", "result": "result"},
{"id": 2, "jsonrpc": "2.0", "result": "result"},
{"id": None, "jsonrpc": "2.0", "result": "result"},
])
|
|
'''
This script serves as a Kivy Garden page generator. It looks for all existing
Kivy-Garden flowers via GitHub API, creates a nice grid with their screenshots
(or flower js fallback if not available) and links to their GitHub repository.
Usage:
Render html output:
python generate.py
Rebuild html from cache (without using GitHub API):
python generate.py --rebuild
Clean everything (even cache):
python generate.py --clean
'''
from __future__ import print_function
from os.path import sep, join, dirname, abspath, exists
from distutils.dir_util import copy_tree
from os import mkdir, remove
from shutil import rmtree
from re import findall
import requests
import json
import sys
api = 'https://api.github.com/users/kivy-garden/'
raw = 'https://raw.githubusercontent.com/kivy-garden/'
root = join(dirname(abspath(__file__)), 'source')
dest = join(dirname(abspath(__file__)), 'build')
exclude = ['garden', 'kivy-garden.github.io']
try:
if sys.argv[1] == '--clean':
rmtree(dest)
exit()
elif sys.argv[1] == '--rebuild':
rmtree(join(dest, 'html'))
except IndexError:
pass
# Templates
emptysquare = (
'<td><div class="emptysquare"><a href="$url$">'
'<img src="$scr$" onerror=\'this.src = "stylesheets/flowerscr.png"\' />'
'<span>$text$</span></a></div></td>'
)
square = (
'<td><div class="square"><a href="$url$">'
'<img src="$scr$" onerror=\'this.src = "stylesheets/flowerscr.png"\' />'
'<span>$text$</span></a></div></td>'
)
template = '''
<tr>
$0$
$1$
$2$
$3$
$4$
</tr>
'''
# check folders
if exists(dest):
if exists(join(dest, 'html')):
print('Build already exists! Exiting...')
exit()
else:
mkdir(join(dest, 'html'))
else:
mkdir(dest)
mkdir(join(dest, 'temp'))
mkdir(join(dest, 'html'))
gallery = '' # for html output
flowers = [] # for catching all flowers
page = 1
while True:
url = api + 'repos?callback=getPages&page=' + str(page)
leftstrip = 13 # strip this: /**/getPages(
# if not cached, get data from repository
temp_page = 'temp{}.txt'.format(str(page))
if not exists(join(dest, 'temp', temp_page)):
print('Cached data not available, getting data from repo...\n\t', url)
r = requests.get(url)
content = json.loads(r.content[leftstrip:-1])
# cache it https://developer.github.com/v3/search/#rate-limit
with open(join(dest, 'temp', temp_page), 'w') as f:
f.write(json.dumps(content))
else:
print('Cached data available...')
with open(join(dest, 'temp', temp_page)) as f:
content = json.loads(f.read())
# get pages
links = content['meta']['Link']
for link in links:
if 'last' in link[1]['rel']:
last = int(findall(r'getPages&page=(\d+)', link[0])[0])
else:
last = int(page)
# get values from data
data = content['data']
for d in data:
name = d['name'].lstrip('garden')
if name.startswith('.') or name.startswith('_'):
name = name[1:]
# ensure non-empty and allowed name
if name and name not in exclude:
print('Flower -> {}'.format(name))
flower = {}
flower['name'] = name
flower['url'] = d['html_url']
flower['scr'] = flower['url'] + '/raw/master/screenshot.png'
flowers.append(flower)
if page < last:
page += 1
else:
print('Flowers gathered...')
break
flowers = sorted(flowers, key=lambda k: k['name'])
pagination = 4 # X + 1 rows per page (append on 0)
pages = []
round = 0
start = 0
while True:
tpl = template
# get even or odd row
if round % 2:
_rows = [emptysquare, square, emptysquare, square, emptysquare]
else:
_rows = [square, emptysquare, square, emptysquare, square]
# get row values
first_five = flowers[start:start + 5]
start += 5
# fill up the template
rows = []
for i, row in enumerate(_rows):
try:
row = row.replace('$url$', first_five[i]['url'])
row = row.replace('$scr$', first_five[i]['scr'])
row = row.replace('$text$', first_five[i]['name'])
rows.append(row)
except IndexError:
pass
for i, row in enumerate(_rows):
try:
tpl = tpl.replace('${}$'.format(str(i)), rows[i])
except IndexError:
tpl = tpl.replace('${}$'.format(str(i)), '')
round += 1
if pagination:
gallery += tpl
pagination -= 1
else:
gallery += tpl
pages.append(gallery)
gallery = ''
pagination = 4
if len(first_five) < 5:
pages.append(gallery)
break
# write pages
for i, page in enumerate(pages):
with open(join(root, 'gallery.template.html')) as f:
content = f.read()
if i != 0:
file = join(dest, 'html', 'gallery{}.html'.format(str(i + 1)))
else:
file = join(dest, 'html', 'gallery.html')
with open(file, 'w') as f:
content = content.replace('$CONTENT$', page)
if i == 1:
content = content.replace('$PREV$', 'gallery.html')
content = content.replace('<!--$P$', '').replace('$P$-->', '')
elif i > 0:
content = content.replace(
'$PREV$', 'gallery{}.html'.format(str(i))
)
content = content.replace('<!--$P$', '').replace('$P$-->', '')
if i != len(pages) - 1:
content = content.replace(
'$NEXT$', 'gallery{}.html'.format(str(i + 2))
)
content = content.replace('<!--$N$', '').replace('$N$-->', '')
f.write(content)
# copy garden source
copy_tree(root, join(dest, 'html'))
remove(join(dest, 'html', 'gallery.template.html'))
print('Build complete')
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
__docformat__ = 'restructuredtext'
__version__ = '$Id: __init__.py 1194 2007-08-24 07:55:11Z Alex.Holkner $'
from ctypes import *
import unicodedata
import warnings
from pyglet.window import WindowException, NoSuchDisplayException, \
MouseCursorException, Platform, Display, Screen, MouseCursor, \
DefaultMouseCursor, ImageMouseCursor, BaseWindow
from pyglet.window import event
from pyglet.window import key
from pyglet.window import mouse
from pyglet.event import EventDispatcher
from pyglet import gl
from pyglet.gl import gl_info
from pyglet.gl import glu_info
from pyglet.gl import glx
from pyglet.gl import glxext_arb
from pyglet.gl import glxext_mesa
from pyglet.gl import glx_info
import pyglet.window.xlib.xlib
from pyglet.window.xlib import cursorfont
try:
import pyglet.window.xlib.xinerama
_have_xinerama = True
except:
_have_xinerama = False
class mwmhints_t(Structure):
_fields_ = [
('flags', c_uint32),
('functions', c_uint32),
('decorations', c_uint32),
('input_mode', c_int32),
('status', c_uint32)
]
# Do we have the November 2000 UTF8 extension?
_have_utf8 = hasattr(xlib._lib, 'Xutf8TextListToTextProperty')
# symbol,ctrl -> motion mapping
_motion_map = {
(key.UP, False): key.MOTION_UP,
(key.RIGHT, False): key.MOTION_RIGHT,
(key.DOWN, False): key.MOTION_DOWN,
(key.LEFT, False): key.MOTION_LEFT,
(key.RIGHT, True): key.MOTION_NEXT_WORD,
(key.LEFT, True): key.MOTION_PREVIOUS_WORD,
(key.HOME, False): key.MOTION_BEGINNING_OF_LINE,
(key.END, False): key.MOTION_END_OF_LINE,
(key.PAGEUP, False): key.MOTION_PREVIOUS_PAGE,
(key.PAGEDOWN, False): key.MOTION_NEXT_PAGE,
(key.PAGEUP, True): key.MOTION_BEGINNING_OF_FILE,
(key.PAGEDOWN, True): key.MOTION_END_OF_FILE,
(key.BACKSPACE, False): key.MOTION_BACKSPACE,
(key.DELETE, False): key.MOTION_DELETE,
}
class XlibException(WindowException):
'''An X11-specific exception. This exception is probably a programming
error in pyglet.'''
pass
class XlibMouseCursor(MouseCursor):
drawable = False
def __init__(self, cursor):
self.cursor = cursor
class XlibPlatform(Platform):
def __init__(self):
self._displays = {}
def get_display(self, name):
if name not in self._displays:
self._displays[name] = XlibDisplayDevice(name)
return self._displays[name]
def get_default_display(self):
return self.get_display('')
class XlibDisplayDevice(Display):
_display = None # POINTER(xlib.Display)
def __init__(self, name):
super(XlibDisplayDevice, self).__init__()
self._display = xlib.XOpenDisplay(name)
if not self._display:
raise NoSuchDisplayException('Cannot connect to "%s"' % name)
self.info = glx_info.GLXInfo(self._display)
# Also set the default GLX display for future info queries
glx_info.set_display(self._display.contents)
def get_screens(self):
x_screen = xlib.XDefaultScreen(self._display)
if _have_xinerama and xinerama.XineramaIsActive(self._display):
number = c_int()
infos = xinerama.XineramaQueryScreens(self._display,
byref(number))
infos = cast(infos,
POINTER(xinerama.XineramaScreenInfo * number.value)).contents
result = []
for info in infos:
result.append(XlibScreen(self,
x_screen,
info.x_org,
info.y_org,
info.width,
info.height,
True))
xlib.XFree(infos)
return result
else:
# No xinerama
screen_count = xlib.XScreenCount(self._display)
result = []
for i in range(screen_count):
screen = xlib.XScreenOfDisplay(self._display, i)
result.append(XlibScreen(self,
i,
0, 0,
screen.contents.width,
screen.contents.height,
False))
# Move default screen to be first in list.
s = result.pop(x_screen)
result.insert(0, s)
return result
class XlibScreen(Screen):
def __init__(self, display, x_screen_id, x, y, width, height, xinerama):
super(XlibScreen, self).__init__(x, y, width, height)
self.display = display
self._x_screen_id = x_screen_id
self._xinerama = xinerama
def get_matching_configs(self, template):
x_display = self.display._display
have_13 = self.display.info.have_version(1, 3)
if have_13:
config_class = XlibGLConfig13
else:
if 'ATI' in self.display.info.get_client_vendor():
config_class = XlibGLConfig10ATI
else:
config_class = XlibGLConfig10
# Construct array of attributes
attrs = []
for name, value in template.get_gl_attributes():
attr = config_class.attribute_ids.get(name, None)
if attr and value is not None:
attrs.extend([attr, int(value)])
if have_13:
attrs.extend([glx.GLX_X_RENDERABLE, True])
else:
attrs.extend([glx.GLX_RGBA, True])
if len(attrs):
attrs.extend([0, 0])
attrib_list = (c_int * len(attrs))(*attrs)
else:
attrib_list = None
if have_13:
elements = c_int()
configs = glx.glXChooseFBConfig(x_display, self._x_screen_id,
attrib_list, byref(elements))
if not configs:
return []
configs = cast(configs,
POINTER(glx.GLXFBConfig * elements.value)).contents
result = [config_class(self, c) for c in configs]
# Can't free array until all XlibGLConfig13's are GC'd. Too much
# hassle, live with leak. XXX
#xlib.XFree(configs)
return result
else:
return [config_class(self, attrib_list)]
def __repr__(self):
return 'XlibScreen(screen=%d, x=%d, y=%d, ' \
'width=%d, height=%d, xinerama=%d)' % \
(self._x_screen_id, self.x, self.y, self.width, self.height,
self._xinerama)
class XlibGLConfig(gl.Config):
attribute_ids = {
'buffer_size': glx.GLX_BUFFER_SIZE,
'level': glx.GLX_LEVEL, # Not supported
'double_buffer': glx.GLX_DOUBLEBUFFER,
'stereo': glx.GLX_STEREO,
'aux_buffers': glx.GLX_AUX_BUFFERS,
'red_size': glx.GLX_RED_SIZE,
'green_size': glx.GLX_GREEN_SIZE,
'blue_size': glx.GLX_BLUE_SIZE,
'alpha_size': glx.GLX_ALPHA_SIZE,
'depth_size': glx.GLX_DEPTH_SIZE,
'stencil_size': glx.GLX_STENCIL_SIZE,
'accum_red_size': glx.GLX_ACCUM_RED_SIZE,
'accum_green_size': glx.GLX_ACCUM_GREEN_SIZE,
'accum_blue_size': glx.GLX_ACCUM_BLUE_SIZE,
'accum_alpha_size': glx.GLX_ACCUM_ALPHA_SIZE,
}
def create_context(self, share):
context = self._create_glx_context(share)
if context == glx.GLX_BAD_CONTEXT:
raise gl.ContextException('Invalid context share')
elif context == glx.GLXBadFBConfig:
raise gl.ContextException('Invalid GL configuration')
elif context < 0:
raise gl.ContextException('Could not create GL context')
return XlibGLContext(self, context, share)
def _create_glx_context(self, share):
raise NotImplementedError('abstract')
def is_complete(self):
return True
def get_visual_info(self):
raise NotImplementedError('abstract')
class XlibGLConfig10(XlibGLConfig):
def __init__(self, screen, attrib_list):
self.screen = screen
self._display = screen.display._display
self._visual_info = glx.glXChooseVisual(self._display,
screen._x_screen_id, attrib_list)
if not self._visual_info:
raise gl.ContextException('No conforming visual exists')
for name, attr in self.attribute_ids.items():
value = c_int()
result = glx.glXGetConfig(self._display,
self._visual_info, attr, byref(value))
if result >= 0:
setattr(self, name, value.value)
self.sample_buffers = 0
self.samples = 0
def get_visual_info(self):
return self._visual_info.contents
def _create_glx_context(self, share):
if share:
return glx.glXCreateContext(self._display, self._visual_info,
share._context, True)
else:
return glx.glXCreateContext(self._display, self._visual_info,
None, True)
class XlibGLConfig10ATI(XlibGLConfig10):
attribute_ids = XlibGLConfig.attribute_ids.copy()
del attribute_ids['stereo']
stereo = False
class XlibGLConfig13(XlibGLConfig):
attribute_ids = XlibGLConfig.attribute_ids.copy()
attribute_ids.update({
'sample_buffers': glx.GLX_SAMPLE_BUFFERS,
'samples': glx.GLX_SAMPLES,
# Not supported in current pyglet API:
'render_type': glx.GLX_RENDER_TYPE,
'config_caveat': glx.GLX_CONFIG_CAVEAT,
'transparent_type': glx.GLX_TRANSPARENT_TYPE,
'transparent_index_value': glx.GLX_TRANSPARENT_INDEX_VALUE,
'transparent_red_value': glx.GLX_TRANSPARENT_RED_VALUE,
'transparent_green_value': glx.GLX_TRANSPARENT_GREEN_VALUE,
'transparent_blue_value': glx.GLX_TRANSPARENT_BLUE_VALUE,
'transparent_alpha_value': glx.GLX_TRANSPARENT_ALPHA_VALUE,
# Used internally
'x_renderable': glx.GLX_X_RENDERABLE,
})
def __init__(self, screen, fbconfig):
super(XlibGLConfig13, self).__init__()
self.screen = screen
self._display = screen.display._display
self._fbconfig = fbconfig
for name, attr in self.attribute_ids.items():
value = c_int()
result = glx.glXGetFBConfigAttrib(
self._display, self._fbconfig, attr, byref(value))
if result >= 0:
setattr(self, name, value)
def get_visual_info(self):
return glx.glXGetVisualFromFBConfig(
self._display, self._fbconfig).contents
def _create_glx_context(self, share):
if share:
return glx.glXCreateNewContext(self._display, self._fbconfig,
glx.GLX_RGBA_TYPE, share._context, True)
else:
return glx.glXCreateNewContext(self._display, self._fbconfig,
glx.GLX_RGBA_TYPE, None, True)
class XlibGLContext(gl.Context):
def __init__(self, config, context, share):
super(XlibGLContext, self).__init__(share)
self.config = config
self._context = context
self._x_display = config.screen.display._display
def destroy(self):
super(XlibGLContext, self).destroy()
glx.glXDestroyContext(self._x_display, self._context)
def is_direct(self):
return glx.glXIsDirect(self._x_display, self._context)
_xlib_event_handler_names = set()
def XlibEventHandler(ev):
def handler_wrapper(f):
_xlib_event_handler_names.add(f.__name__)
if not hasattr(f, '_xlib_handler'):
f._xlib_handler = []
f._xlib_handler.append(ev)
return f
return handler_wrapper
class XlibWindow(BaseWindow):
_x_display = None # X display connection
_x_screen_id = None # X screen index
_glx_context = None # GLX context handle
_glx_window = None # GLX window handle
_window = None # Xlib window handle
_minimum_size = None
_maximum_size = None
_x = 0
_y = 0 # Last known window position
_width = 0
_height = 0 # Last known window size
_mouse_exclusive_client = None # x,y of "real" mouse during exclusive
_mouse_buttons = [False] * 6 # State of each xlib button
_keyboard_exclusive = False
_active = True
_applied_mouse_exclusive = False
_applied_keyboard_exclusive = False
_mapped = False
_lost_context = False
_lost_context_state = False
_default_event_mask = (0x1ffffff
& ~xlib.PointerMotionHintMask
& ~xlib.ResizeRedirectMask)
def _recreate(self, changes):
# If flipping to/from fullscreen and using override_redirect (we
# always are, _NET_WM_FULLSCREEN doesn't work), need to recreate the
# window.
#
# A possible improvement could be to just hide the top window,
# destroy the GLX window, and reshow it again when leaving fullscreen.
# This would prevent the floating window from being moved by the
# WM.
if 'fullscreen' in changes or 'resizable' in changes:
# clear out the GLX context
self.switch_to()
gl.glFlush()
glx.glXMakeCurrent(self._x_display, 0, None)
if self._glx_window:
glx.glXDestroyWindow(self._x_display, self._glx_window)
xlib.XDestroyWindow(self._x_display, self._window)
self._glx_window = None
self._window = None
self._mapped = False
# TODO: detect state loss only by examining context share.
if 'context' in changes:
self._lost_context = True
self._lost_context_state = True
self._create()
def _create(self):
# Bind event handlers
self._event_handlers = {}
for func_name in _xlib_event_handler_names:
if not hasattr(self, func_name):
continue
func = getattr(self, func_name)
for message in func._xlib_handler:
self._event_handlers[message] = func
# Unmap existing window if necessary while we fiddle with it.
if self._window and self._mapped:
self._unmap()
self.context.window = self
self._x_display = self.config._display
self._x_screen_id = self.screen._x_screen_id
self._glx_context = self.context._context
self._glx_1_3 = self.display.info.have_version(1, 3)
self._have_SGI_video_sync = \
self.display.info.have_extension('GLX_SGI_video_sync')
self._have_SGI_swap_control = \
self.display.info.have_extension('GLX_SGI_swap_control')
self._have_MESA_swap_control = \
self.display.info.have_extension('GLX_MESA_swap_control')
# In order of preference:
# 1. GLX_MESA_swap_control (more likely to work where video_sync will
# not)
# 2. GLX_SGI_video_sync (does not work on Intel 945GM, but that has
# MESA)
# 3. GLX_SGI_swap_control (cannot be disabled once enabled).
self._use_video_sync = (self._have_SGI_video_sync and
not self._have_MESA_swap_control)
# Create X window if not already existing.
if not self._window:
root = xlib.XRootWindow(self._x_display, self._x_screen_id)
visual_info = self.config.get_visual_info()
visual = visual_info.visual
visual_id = xlib.XVisualIDFromVisual(visual)
default_visual = xlib.XDefaultVisual(
self._x_display, self._x_screen_id)
default_visual_id = xlib.XVisualIDFromVisual(default_visual)
window_attributes = xlib.XSetWindowAttributes()
if visual_id != default_visual_id:
window_attributes.colormap = xlib.XCreateColormap(
self._x_display, root, visual, xlib.AllocNone)
else:
window_attributes.colormap = xlib.XDefaultColormap(
self._x_display, self._x_screen_id)
self._window = xlib.XCreateWindow(self._x_display, root,
0, 0, self._width, self._height, 0, visual_info.depth,
xlib.InputOutput, visual, xlib.CWColormap,
byref(window_attributes))
# Setting null background pixmap disables drawing the background,
# preventing flicker while resizing (in theory).
xlib.XSetWindowBackgroundPixmap(self._x_display, self._window, 0)
# Enable WM_DELETE_WINDOW message
wm_delete_window = xlib.XInternAtom(self._x_display,
'WM_DELETE_WINDOW', False)
wm_delete_window = c_ulong(wm_delete_window)
xlib.XSetWMProtocols(self._x_display, self._window,
byref(wm_delete_window), 1)
# Set window attributes
attributes = xlib.XSetWindowAttributes()
attributes_mask = 0
# Bypass the window manager in fullscreen. This is the only reliable
# technique (over _NET_WM_STATE_FULLSCREEN, Motif, KDE and Gnome
# hints) that is pretty much guaranteed to work. Unfortunately
# we run into window activation and focus problems that require
# attention. Search for "override_redirect" for all occurences.
attributes.override_redirect = self._fullscreen
attributes_mask |= xlib.CWOverrideRedirect
if self._fullscreen:
xlib.XMoveResizeWindow(self._x_display, self._window,
self.screen.x, self.screen.y,
self.screen.width, self.screen.height)
else:
xlib.XResizeWindow(self._x_display, self._window,
self._width, self._height)
xlib.XChangeWindowAttributes(self._x_display, self._window,
attributes_mask, byref(attributes))
# Set style
styles = {
self.WINDOW_STYLE_DEFAULT: '_NET_WM_WINDOW_TYPE_NORMAL',
self.WINDOW_STYLE_DIALOG: '_NET_WM_WINDOW_TYPE_DIALOG',
self.WINDOW_STYLE_TOOL: '_NET_WM_WINDOW_TYPE_UTILITY',
self.WINDOW_STYLE_BORDERLESS: '_NET_WM_WINDOW_TYPE_SPLASH',
# XXX BORDERLESS is inhibiting task-bar entry (Gnome)
}
if self._style in styles:
self._set_atoms_property('_NET_WM_WINDOW_TYPE',
(styles[self._style],))
# Set resizeable
if not self._resizable:
self.set_minimum_size(self._width, self._height)
self.set_maximum_size(self._width, self._height)
# Set caption
self.set_caption(self._caption)
self.switch_to()
if self._visible:
self.set_visible(True)
def _map(self):
if self._mapped:
return
# Map the window, wait for map event before continuing.
xlib.XSelectInput(
self._x_display, self._window, xlib.StructureNotifyMask)
xlib.XMapRaised(self._x_display, self._window)
e = xlib.XEvent()
while True:
xlib.XNextEvent(self._x_display, e)
if e.type == xlib.MapNotify:
break
xlib.XSelectInput(
self._x_display, self._window, self._default_event_mask)
self._mapped = True
if self._fullscreen:
self.activate()
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_show')
self.dispatch_event('on_expose')
def _unmap(self):
if not self._mapped:
return
xlib.XSelectInput(
self._x_display, self._window, xlib.StructureNotifyMask)
xlib.XUnmapWindow(self._x_display, self._window)
e = xlib.XEvent()
while True:
xlib.XNextEvent(self._x_display, e)
if e.type == xlib.UnmapNotify:
break
xlib.XSelectInput(
self._x_display, self._window, self._default_event_mask)
self._mapped = False
def _get_root(self):
attributes = xlib.XWindowAttributes()
xlib.XGetWindowAttributes(self._x_display, self._window,
byref(attributes))
return attributes.root
def close(self):
# clear out the GLX context. Can fail if current context already
# destroyed (on exit, say).
try:
gl.glFlush()
except gl.GLException:
pass
glx.glXMakeCurrent(self._x_display, 0, None)
self._unmap()
if self._glx_window:
glx.glXDestroyWindow(self._x_display, self._glx_window)
if self._window:
xlib.XDestroyWindow(self._x_display, self._window)
self._window = None
self._glx_window = None
def switch_to(self):
if self._glx_1_3:
if not self._glx_window:
self._glx_window = glx.glXCreateWindow(self._x_display,
self._config._fbconfig, self._window, None)
glx.glXMakeContextCurrent(self._x_display,
self._glx_window, self._glx_window, self._glx_context)
else:
glx.glXMakeCurrent(self._x_display, self._window, self._glx_context)
self.set_vsync(self._vsync)
self._context.set_current()
gl_info.set_active_context()
glu_info.set_active_context()
def flip(self):
self.draw_mouse_cursor()
if self._vsync and self._have_SGI_video_sync and self._use_video_sync:
count = c_uint()
glxext_arb.glXGetVideoSyncSGI(byref(count))
glxext_arb.glXWaitVideoSyncSGI(
2, (count.value + 1) % 2, byref(count))
if self._glx_1_3:
if not self._glx_window:
self._glx_window = glx.glXCreateWindow(self._x_display,
self._config._fbconfig, self._window, None)
glx.glXSwapBuffers(self._x_display, self._glx_window)
else:
glx.glXSwapBuffers(self._x_display, self._window)
def set_vsync(self, vsync):
self._vsync = vsync
if not self._use_video_sync:
interval = vsync and 1 or 0
if self._have_MESA_swap_control:
glxext_mesa.glXSwapIntervalMESA(interval)
elif self._have_SGI_swap_control and interval:
# SGI_swap_control interval cannot be set to 0
glxext_arb.glXSwapIntervalSGI(interval)
def set_caption(self, caption):
self._caption = caption
self._set_text_property('WM_NAME', caption, allow_utf8=False)
self._set_text_property('WM_ICON_NAME', caption, allow_utf8=False)
self._set_text_property('_NET_WM_NAME', caption)
self._set_text_property('_NET_WM_ICON_NAME', caption)
def get_caption(self):
return self._caption
def set_size(self, width, height):
if self._fullscreen:
raise WindowException('Cannot set size of fullscreen window.')
self._width = width
self._height = height
if not self._resizable:
self.set_minimum_size(width, height)
self.set_maximum_size(width, height)
xlib.XResizeWindow(self._x_display, self._window, width, height)
self.dispatch_event('on_resize', width, height)
def get_size(self):
# XGetGeometry and XWindowAttributes seem to always return the
# original size of the window, which is wrong after the user
# has resized it.
# XXX this is probably fixed now, with fix of resize.
return self._width, self._height
def set_location(self, x, y):
# Assume the window manager has reparented our top-level window
# only once, in which case attributes.x/y give the offset from
# the frame to the content window. Better solution would be
# to use _NET_FRAME_EXTENTS, where supported.
attributes = xlib.XWindowAttributes()
xlib.XGetWindowAttributes(self._x_display, self._window,
byref(attributes))
# XXX at least under KDE's WM these attrs are both 0
x -= attributes.x
y -= attributes.y
xlib.XMoveWindow(self._x_display, self._window, x, y)
def get_location(self):
child = xlib.Window()
x = c_int()
y = c_int()
xlib.XTranslateCoordinates(self._x_display,
self._window,
self._get_root(),
0, 0,
byref(x),
byref(y),
byref(child))
return x.value, y.value
def activate(self):
xlib.XSetInputFocus(self._x_display, self._window,
xlib.RevertToParent, xlib.CurrentTime)
def set_visible(self, visible=True):
if visible:
self._map()
else:
self._unmap()
self._visible = visible
def set_minimum_size(self, width, height):
self._minimum_size = width, height
self._set_wm_normal_hints()
def set_maximum_size(self, width, height):
self._maximum_size = width, height
self._set_wm_normal_hints()
def minimize(self):
xlib.XIconifyWindow(self._x_display, self._window, self._x_screen_id)
def maximize(self):
self._set_wm_state('_NET_WM_STATE_MAXIMIZED_HORZ',
'_NET_WM_STATE_MAXIMIZED_VERT')
def set_mouse_platform_visible(self, platform_visible=None):
if platform_visible is None:
platform_visible = self._mouse_visible and \
not self._mouse_cursor.drawable
if not platform_visible:
# Hide pointer by creating an empty cursor
black = xlib.XBlackPixel(self._x_display, self._x_screen_id)
black = xlib.XColor()
bmp = xlib.XCreateBitmapFromData(self._x_display, self._window,
c_buffer(8), 8, 8)
cursor = xlib.XCreatePixmapCursor(self._x_display, bmp, bmp,
black, black, 0, 0)
xlib.XDefineCursor(self._x_display, self._window, cursor)
xlib.XFreeCursor(self._x_display, cursor)
xlib.XFreePixmap(self._x_display, bmp)
else:
# Restore cursor
if isinstance(self._mouse_cursor, XlibMouseCursor):
xlib.XDefineCursor(self._x_display, self._window,
self._mouse_cursor.cursor)
else:
xlib.XUndefineCursor(self._x_display, self._window)
def _update_exclusivity(self):
mouse_exclusive = self._active and self._mouse_exclusive
keyboard_exclusive = self._active and self._keyboard_exclusive
if mouse_exclusive != self._applied_mouse_exclusive:
if mouse_exclusive:
self.set_mouse_platform_visible(False)
# Restrict to client area
xlib.XGrabPointer(self._x_display, self._window,
True,
0,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
self._window,
0,
xlib.CurrentTime)
# Move pointer to center of window
x = self._width / 2
y = self._height / 2
self._mouse_exclusive_client = x, y
xlib.XWarpPointer(self._x_display,
0, # src window
self._window, # dst window
0, 0, # src x, y
0, 0, # src w, h
x, y)
else:
# Unclip
xlib.XUngrabPointer(self._x_display, xlib.CurrentTime)
self.set_mouse_platform_visible()
self._applied_mouse_exclusive = mouse_exclusive
if keyboard_exclusive != self._applied_keyboard_exclusive:
if exclusive:
xlib.XGrabKeyboard(self._x_display,
self._window,
False,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
xlib.CurrentTime)
else:
xlib.XUngrabKeyboard(self._x_display, xlib.CurrentTime)
self._applied_keyboard_exclusive = keyboard_exclusive
def set_exclusive_mouse(self, exclusive=True):
if exclusive == self._mouse_exclusive:
return
self._mouse_exclusive = exclusive
self._update_exclusivity()
def set_exclusive_keyboard(self, exclusive=True):
if exclusive == self._keyboard_exclusive:
return
self._keyboard_exclusive = exclusive
self._update_exclusivity()
def get_system_mouse_cursor(self, name):
if name == self.CURSOR_DEFAULT:
return DefaultMouseCursor()
# NQR means default shape is not pretty... surely there is another
# cursor font?
cursor_shapes = {
self.CURSOR_CROSSHAIR: cursorfont.XC_crosshair,
self.CURSOR_HAND: cursorfont.XC_hand2,
self.CURSOR_HELP: cursorfont.XC_question_arrow, # NQR
self.CURSOR_NO: cursorfont.XC_pirate, # NQR
self.CURSOR_SIZE: cursorfont.XC_fleur,
self.CURSOR_SIZE_UP: cursorfont.XC_top_side,
self.CURSOR_SIZE_UP_RIGHT: cursorfont.XC_top_right_corner,
self.CURSOR_SIZE_RIGHT: cursorfont.XC_right_side,
self.CURSOR_SIZE_DOWN_RIGHT: cursorfont.XC_bottom_right_corner,
self.CURSOR_SIZE_DOWN: cursorfont.XC_bottom_side,
self.CURSOR_SIZE_DOWN_LEFT: cursorfont.XC_bottom_left_corner,
self.CURSOR_SIZE_LEFT: cursorfont.XC_left_side,
self.CURSOR_SIZE_UP_LEFT: cursorfont.XC_top_left_corner,
self.CURSOR_SIZE_UP_DOWN: cursorfont.XC_sb_v_double_arrow,
self.CURSOR_SIZE_LEFT_RIGHT: cursorfont.XC_sb_h_double_arrow,
self.CURSOR_TEXT: cursorfont.XC_xterm,
self.CURSOR_WAIT: cursorfont.XC_watch,
self.CURSOR_WAIT_ARROW: cursorfont.XC_watch, # NQR
}
if name not in cursor_shapes:
raise MouseCursorException('Unknown cursor name "%s"' % name)
cursor = xlib.XCreateFontCursor(self._x_display, cursor_shapes[name])
return XlibMouseCursor(cursor)
def set_icon(self, *images):
# Careful! XChangeProperty takes an array of long when data type
# is 32-bit (but long can be 64 bit!), so pad high bytes of format if
# necessary.
import sys
format = {
('little', 4): 'BGRA',
('little', 8): 'BGRAAAAA',
('big', 4): 'ARGB',
('big', 8): 'AAAAARGB'
}[(sys.byteorder, sizeof(c_ulong))]
data = ''
for image in images:
image = image.image_data
image.format = format
image.pitch = -(image.width * len(image.format))
s = c_buffer(sizeof(c_ulong) * 2)
memmove(s, cast((c_ulong * 2)(image.width, image.height),
POINTER(c_ubyte)), len(s))
data += s.raw + image.data
buffer = (c_ubyte * len(data))()
memmove(buffer, data, len(data))
atom = xlib.XInternAtom(self._x_display, '_NET_WM_ICON', False)
XA_CARDINAL = 6 # Xatom.h:14
xlib.XChangeProperty(self._x_display, self._window, atom, XA_CARDINAL,
32, xlib.PropModeReplace, buffer, len(data)/sizeof(c_ulong))
# Private utility
def _set_wm_normal_hints(self):
hints = xlib.XAllocSizeHints().contents
if self._minimum_size:
hints.flags |= xlib.PMinSize
hints.min_width, hints.min_height = self._minimum_size
if self._maximum_size:
hints.flags |= xlib.PMaxSize
hints.max_width, hints.max_height = self._maximum_size
xlib.XSetWMNormalHints(self._x_display, self._window, byref(hints))
def _set_text_property(self, name, value, allow_utf8=True):
atom = xlib.XInternAtom(self._x_display, name, True)
if not atom:
raise XlibException('Undefined atom "%s"' % name)
assert type(value) in (str, unicode)
property = xlib.XTextProperty()
if _have_utf8 and allow_utf8:
buf = create_string_buffer(value.encode('utf8'))
result = xlib.Xutf8TextListToTextProperty(self._x_display,
cast(pointer(buf), c_char_p), 1, xlib.XStdICCTextStyle,
byref(property))
if result < 0:
raise XlibException('Could not create UTF8 text property')
else:
buf = create_string_buffer(value.encode('ascii', 'ignore'))
result = xlib.XStringListToTextProperty(
cast(pointer(buf), c_char_p), 1, byref(property))
if result < 0:
raise XlibException('Could not create text property')
xlib.XSetTextProperty(self._x_display,
self._window, byref(property), atom)
# XXX <rj> Xlib doesn't like us freeing this
#xlib.XFree(property.value)
def _set_atoms_property(self, name, values, mode=xlib.PropModeReplace):
name_atom = xlib.XInternAtom(self._x_display, name, False)
atoms = []
for value in values:
atoms.append(xlib.XInternAtom(self._x_display, value, False))
atom_type = xlib.XInternAtom(self._x_display, 'ATOM', False)
if len(atoms):
atoms_ar = (xlib.Atom * len(atoms))(*atoms)
xlib.XChangeProperty(self._x_display, self._window,
name_atom, atom_type, 32, mode,
cast(pointer(atoms_ar), POINTER(c_ubyte)), len(atoms))
else:
xlib.XDeleteProperty(self._x_display, self._window, net_wm_state)
def _set_wm_state(self, *states):
# Set property
net_wm_state = xlib.XInternAtom(self._x_display, '_NET_WM_STATE', False)
atoms = []
for state in states:
atoms.append(xlib.XInternAtom(self._x_display, state, False))
atom_type = xlib.XInternAtom(self._x_display, 'ATOM', False)
if len(atoms):
atoms_ar = (xlib.Atom * len(atoms))(*atoms)
xlib.XChangeProperty(self._x_display, self._window,
net_wm_state, atom_type, 32, xlib.PropModePrepend,
cast(pointer(atoms_ar), POINTER(c_ubyte)), len(atoms))
else:
xlib.XDeleteProperty(self._x_display, self._window, net_wm_state)
# Nudge the WM
e = xlib.XEvent()
e.xclient.type = xlib.ClientMessage
e.xclient.message_type = net_wm_state
e.xclient.display = cast(self._x_display, POINTER(xlib.Display))
e.xclient.window = self._window
e.xclient.format = 32
e.xclient.data.l[0] = xlib.PropModePrepend
for i, atom in enumerate(atoms):
e.xclient.data.l[i + 1] = atom
xlib.XSendEvent(self._x_display, self._get_root(),
False, xlib.SubstructureRedirectMask, byref(e))
# Event handling
def dispatch_events(self):
while self._event_queue:
EventDispatcher.dispatch_event(self, *self._event_queue.pop(0))
# Dispatch any context-related events
if self._lost_context:
self._lost_context = False
EventDispatcher.dispatch_event(self, 'on_context_lost')
if self._lost_context_state:
self._lost_context_state = False
EventDispatcher.dispatch_event(self, 'on_context_state_lost')
self._allow_dispatch_event = True
e = xlib.XEvent()
# Cache these in case window is closed from an event handler
_x_display = self._x_display
_window = self._window
# Check for the events specific to this window
while xlib.XCheckWindowEvent(_x_display, _window,
0x1ffffff, byref(e)):
event_handler = self._event_handlers.get(e.type)
if event_handler:
event_handler(e)
# Now check generic events for this display and manually filter
# them to see whether they're for this window. sigh.
# Store off the events we need to push back so we don't confuse
# XCheckTypedEvent
push_back = []
while xlib.XCheckTypedEvent(_x_display,
xlib.ClientMessage, byref(e)):
if e.xclient.window != _window:
push_back.append(e)
e = xlib.XEvent()
else:
event_handler = self._event_handlers.get(e.type)
if event_handler:
event_handler(e)
for e in push_back:
xlib.XPutBackEvent(_x_display, byref(e))
self._allow_dispatch_event = False
@staticmethod
def _translate_modifiers(state):
modifiers = 0
if state & xlib.ShiftMask:
modifiers |= key.MOD_SHIFT
if state & xlib.ControlMask:
modifiers |= key.MOD_CTRL
if state & xlib.LockMask:
modifiers |= key.MOD_CAPSLOCK
if state & xlib.Mod1Mask:
modifiers |= key.MOD_ALT
if state & xlib.Mod2Mask:
modifiers |= key.MOD_NUMLOCK
if state & xlib.Mod4Mask:
modifiers |= key.MOD_WINDOWS
return modifiers
# Event handlers
def _event_symbol(self, event):
# pyglet.self.key keysymbols are identical to X11 keysymbols, no
# need to map the keysymbol.
symbol = xlib.XKeycodeToKeysym(self._x_display, event.xkey.keycode, 0)
if symbol not in key._key_names.keys():
symbol = key.user_key(event.xkey.keycode)
return symbol
def _event_text(self, event):
if event.type == xlib.KeyPress:
buffer = create_string_buffer(16)
# TODO lookup UTF8
count = xlib.XLookupString(event.xkey,
buffer,
len(buffer),
None,
None)
if count:
text = unicode(buffer.value[:count])
if unicodedata.category(text) != 'Cc' or text == '\r':
return text
return None
def _event_text_motion(self, symbol, modifiers):
if modifiers & key.MOD_ALT:
return None
ctrl = modifiers & key.MOD_CTRL != 0
return _motion_map.get((symbol, ctrl), None)
@XlibEventHandler(xlib.KeyPress)
@XlibEventHandler(xlib.KeyRelease)
def _event_key(self, ev):
if ev.type == xlib.KeyRelease:
# Look in the queue for a matching KeyPress with same timestamp,
# indicating an auto-repeat rather than actual key event.
saved = []
while True:
auto_event = xlib.XEvent()
result = xlib.XCheckWindowEvent(self._x_display,
self._window, xlib.KeyPress|xlib.KeyRelease,
byref(auto_event))
if not result:
break
saved.append(auto_event)
if auto_event.type == xlib.KeyRelease:
# just save this off for restoration back to the queue
continue
if ev.xkey.keycode == auto_event.xkey.keycode:
# Found a key repeat: dispatch EVENT_TEXT* event
symbol = self._event_symbol(ev)
modifiers = self._translate_modifiers(ev.xkey.state)
text = self._event_text(auto_event)
motion = self._event_text_motion(symbol, modifiers)
if motion:
if modifiers & key.MOD_SHIFT:
self.dispatch_event(
'on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif text:
self.dispatch_event('on_text', text)
ditched = saved.pop()
for auto_event in reversed(saved):
xlib.XPutBackEvent(self._x_display, byref(auto_event))
return
# Whoops, put the events back, it's for real.
for auto_event in reversed(saved):
xlib.XPutBackEvent(self._x_display, byref(auto_event))
symbol = self._event_symbol(ev)
modifiers = self._translate_modifiers(ev.xkey.state)
text = self._event_text(ev)
motion = self._event_text_motion(symbol, modifiers)
if ev.type == xlib.KeyPress:
self.dispatch_event('on_key_press', symbol, modifiers)
if motion:
if modifiers & key.MOD_SHIFT:
self.dispatch_event('on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif text:
self.dispatch_event('on_text', text)
elif ev.type == xlib.KeyRelease:
self.dispatch_event('on_key_release', symbol, modifiers)
@XlibEventHandler(xlib.MotionNotify)
def _event_motionnotify(self, ev):
x = ev.xmotion.x
y = self.height - ev.xmotion.y
dx = x - self._mouse_x
dy = y - self._mouse_y
if self._applied_mouse_exclusive and \
(ev.xmotion.x, ev.xmotion.y) == self._mouse_exclusive_client:
# Ignore events caused by XWarpPointer
self._mouse_x = x
self._mouse_y = y
return
if self._applied_mouse_exclusive:
# Reset pointer position
ex, ey = self._mouse_exclusive_client
xlib.XWarpPointer(self._x_display,
0,
self._window,
0, 0,
0, 0,
ex, ey)
self._mouse_x = x
self._mouse_y = y
self._mouse_in_window = True
buttons = 0
if ev.xmotion.state & xlib.Button1MotionMask:
buttons |= mouse.LEFT
if ev.xmotion.state & xlib.Button2MotionMask:
buttons |= mouse.MIDDLE
if ev.xmotion.state & xlib.Button3MotionMask:
buttons |= mouse.RIGHT
if buttons:
# Drag event
modifiers = self._translate_modifiers(ev.xmotion.state)
self.dispatch_event('on_mouse_drag',
x, y, dx, dy, buttons, modifiers)
else:
# Motion event
self.dispatch_event('on_mouse_motion', x, y, dx, dy)
@XlibEventHandler(xlib.ClientMessage)
def _event_clientmessage(self, ev):
wm_delete_window = xlib.XInternAtom(ev.xclient.display,
'WM_DELETE_WINDOW', False)
if ev.xclient.data.l[0] == wm_delete_window:
self.dispatch_event('on_close')
@XlibEventHandler(xlib.ButtonPress)
@XlibEventHandler(xlib.ButtonRelease)
def _event_button(self, ev):
x = ev.xbutton.x
y = self.height - ev.xbutton.y
button = 1 << (ev.xbutton.button - 1) # 1, 2, 3 -> 1, 2, 4
modifiers = self._translate_modifiers(ev.xbutton.state)
if ev.type == xlib.ButtonPress:
if ev.xbutton.button == 4:
self.dispatch_event('on_mouse_scroll', x, y, 0, 1)
elif ev.xbutton.button == 5:
self.dispatch_event('on_mouse_scroll', x, y, 0, -1)
else:
self._mouse_buttons[ev.xbutton.button] = True
self.dispatch_event('on_mouse_press',
x, y, button, modifiers)
else:
if ev.xbutton.button < 4:
self._mouse_buttons[ev.xbutton.button] = False
self.dispatch_event('on_mouse_release',
x, y, button, modifiers)
@XlibEventHandler(xlib.Expose)
def _event_expose(self, ev):
# Ignore all expose events except the last one. We could be told
# about exposure rects - but I don't see the point since we're
# working with OpenGL and we'll just redraw the whole scene.
if ev.xexpose.count > 0: return
self.dispatch_event('on_expose')
@XlibEventHandler(xlib.EnterNotify)
def _event_enternotify(self, ev):
# figure active mouse buttons
# XXX ignore modifier state?
state = ev.xcrossing.state
self._mouse_buttons[1] = state & xlib.Button1Mask
self._mouse_buttons[2] = state & xlib.Button2Mask
self._mouse_buttons[3] = state & xlib.Button3Mask
self._mouse_buttons[4] = state & xlib.Button4Mask
self._mouse_buttons[5] = state & xlib.Button5Mask
# mouse position
x = self._mouse_x = ev.xcrossing.x
y = self._mouse_y = self.height - ev.xcrossing.y
self._mouse_in_window = True
# XXX there may be more we could do here
self.dispatch_event('on_mouse_enter', x, y)
@XlibEventHandler(xlib.LeaveNotify)
def _event_leavenotify(self, ev):
x = self._mouse_x = ev.xcrossing.x
y = self._mouse_y = self.height - ev.xcrossing.y
self._mouse_in_window = False
self.dispatch_event('on_mouse_leave', x, y)
@XlibEventHandler(xlib.ConfigureNotify)
def _event_configurenotify(self, ev):
w, h = ev.xconfigure.width, ev.xconfigure.height
x, y = ev.xconfigure.x, ev.xconfigure.y
if self._width != w or self._height != h:
self._width = w
self._height = h
self.dispatch_event('on_resize', w, h)
self.dispatch_event('on_expose')
if self._x != x or self._y != y:
self.dispatch_event('on_move', x, y)
self._x = x
self._y = y
@XlibEventHandler(xlib.FocusIn)
def _event_focusin(self, ev):
self._active = True
self._update_exclusivity()
self.dispatch_event('on_activate')
@XlibEventHandler(xlib.FocusOut)
def _event_focusout(self, ev):
self._active = False
self._update_exclusivity()
self.dispatch_event('on_deactivate')
@XlibEventHandler(xlib.MapNotify)
def _event_mapnotify(self, ev):
self._mapped = True
self.dispatch_event('on_show')
@XlibEventHandler(xlib.UnmapNotify)
def _event_unmapnotify(self, ev):
self._mapped = False
self.dispatch_event('on_hide')
|
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_firewall_internet_service
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_firewall_internet_service.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_firewall_internet_service_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_internet_service': {
'database': 'isdb',
'direction': 'src',
'icon_id': '5',
'id': '6',
'name': 'default_name_7',
'offset': '8',
'reputation': '9',
'sld_id': '10'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_internet_service.fortios_firewall(input_data, fos_instance)
expected_data = {
'database': 'isdb',
'direction': 'src',
'icon-id': '5',
'id': '6',
'name': 'default_name_7',
'offset': '8',
'reputation': '9',
'sld-id': '10'
}
set_method_mock.assert_called_with('firewall', 'internet-service', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_internet_service_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_internet_service': {
'database': 'isdb',
'direction': 'src',
'icon_id': '5',
'id': '6',
'name': 'default_name_7',
'offset': '8',
'reputation': '9',
'sld_id': '10'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_internet_service.fortios_firewall(input_data, fos_instance)
expected_data = {
'database': 'isdb',
'direction': 'src',
'icon-id': '5',
'id': '6',
'name': 'default_name_7',
'offset': '8',
'reputation': '9',
'sld-id': '10'
}
set_method_mock.assert_called_with('firewall', 'internet-service', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_internet_service_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_internet_service': {
'database': 'isdb',
'direction': 'src',
'icon_id': '5',
'id': '6',
'name': 'default_name_7',
'offset': '8',
'reputation': '9',
'sld_id': '10'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_internet_service.fortios_firewall(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall', 'internet-service', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_internet_service_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_internet_service': {
'database': 'isdb',
'direction': 'src',
'icon_id': '5',
'id': '6',
'name': 'default_name_7',
'offset': '8',
'reputation': '9',
'sld_id': '10'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_internet_service.fortios_firewall(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall', 'internet-service', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_internet_service_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_internet_service': {
'database': 'isdb',
'direction': 'src',
'icon_id': '5',
'id': '6',
'name': 'default_name_7',
'offset': '8',
'reputation': '9',
'sld_id': '10'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_internet_service.fortios_firewall(input_data, fos_instance)
expected_data = {
'database': 'isdb',
'direction': 'src',
'icon-id': '5',
'id': '6',
'name': 'default_name_7',
'offset': '8',
'reputation': '9',
'sld-id': '10'
}
set_method_mock.assert_called_with('firewall', 'internet-service', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_firewall_internet_service_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_internet_service': {
'random_attribute_not_valid': 'tag',
'database': 'isdb',
'direction': 'src',
'icon_id': '5',
'id': '6',
'name': 'default_name_7',
'offset': '8',
'reputation': '9',
'sld_id': '10'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_internet_service.fortios_firewall(input_data, fos_instance)
expected_data = {
'database': 'isdb',
'direction': 'src',
'icon-id': '5',
'id': '6',
'name': 'default_name_7',
'offset': '8',
'reputation': '9',
'sld-id': '10'
}
set_method_mock.assert_called_with('firewall', 'internet-service', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/type/postal_address.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/type/postal_address.proto",
package="google.type",
syntax="proto3",
serialized_options=b"\n\017com.google.typeB\022PostalAddressProtoP\001ZFgoogle.golang.org/genproto/googleapis/type/postaladdress;postaladdress\370\001\001\242\002\003GTP",
serialized_pb=b'\n google/type/postal_address.proto\x12\x0bgoogle.type"\xfd\x01\n\rPostalAddress\x12\x10\n\x08revision\x18\x01 \x01(\x05\x12\x13\n\x0bregion_code\x18\x02 \x01(\t\x12\x15\n\rlanguage_code\x18\x03 \x01(\t\x12\x13\n\x0bpostal_code\x18\x04 \x01(\t\x12\x14\n\x0csorting_code\x18\x05 \x01(\t\x12\x1b\n\x13\x61\x64ministrative_area\x18\x06 \x01(\t\x12\x10\n\x08locality\x18\x07 \x01(\t\x12\x13\n\x0bsublocality\x18\x08 \x01(\t\x12\x15\n\raddress_lines\x18\t \x03(\t\x12\x12\n\nrecipients\x18\n \x03(\t\x12\x14\n\x0corganization\x18\x0b \x01(\tBx\n\x0f\x63om.google.typeB\x12PostalAddressProtoP\x01ZFgoogle.golang.org/genproto/googleapis/type/postaladdress;postaladdress\xf8\x01\x01\xa2\x02\x03GTPb\x06proto3',
)
_POSTALADDRESS = _descriptor.Descriptor(
name="PostalAddress",
full_name="google.type.PostalAddress",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="revision",
full_name="google.type.PostalAddress.revision",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="region_code",
full_name="google.type.PostalAddress.region_code",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="language_code",
full_name="google.type.PostalAddress.language_code",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="postal_code",
full_name="google.type.PostalAddress.postal_code",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="sorting_code",
full_name="google.type.PostalAddress.sorting_code",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="administrative_area",
full_name="google.type.PostalAddress.administrative_area",
index=5,
number=6,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="locality",
full_name="google.type.PostalAddress.locality",
index=6,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="sublocality",
full_name="google.type.PostalAddress.sublocality",
index=7,
number=8,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="address_lines",
full_name="google.type.PostalAddress.address_lines",
index=8,
number=9,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="recipients",
full_name="google.type.PostalAddress.recipients",
index=9,
number=10,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="organization",
full_name="google.type.PostalAddress.organization",
index=10,
number=11,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=50,
serialized_end=303,
)
DESCRIPTOR.message_types_by_name["PostalAddress"] = _POSTALADDRESS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PostalAddress = _reflection.GeneratedProtocolMessageType(
"PostalAddress",
(_message.Message,),
{
"DESCRIPTOR": _POSTALADDRESS,
"__module__": "google.type.postal_address_pb2"
# @@protoc_insertion_point(class_scope:google.type.PostalAddress)
},
)
_sym_db.RegisterMessage(PostalAddress)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
|
from django import forms
from django.db.models import Count
from dcim.models import Site, Device, Interface, Rack, IFACE_FF_VIRTUAL
from tenancy.forms import bulkedit_tenant_choices
from tenancy.models import Tenant
from utilities.forms import (
APISelect, BootstrapMixin, BulkImportForm, CommentField, CSVDataField, Livesearch, SmallTextarea, SlugField,
)
from .models import Circuit, CircuitType, Provider
#
# Providers
#
class ProviderForm(forms.ModelForm, BootstrapMixin):
slug = SlugField()
comments = CommentField()
class Meta:
model = Provider
fields = ['name', 'slug', 'asn', 'account', 'portal_url', 'noc_contact', 'admin_contact', 'comments']
widgets = {
'noc_contact': SmallTextarea(attrs={'rows': 5}),
'admin_contact': SmallTextarea(attrs={'rows': 5}),
}
help_texts = {
'name': "Full name of the provider",
'asn': "BGP autonomous system number (if applicable)",
'portal_url': "URL of the provider's customer support portal",
'noc_contact': "NOC email address and phone number",
'admin_contact': "Administrative contact email address and phone number",
}
class ProviderFromCSVForm(forms.ModelForm):
class Meta:
model = Provider
fields = ['name', 'slug', 'asn', 'account', 'portal_url']
class ProviderImportForm(BulkImportForm, BootstrapMixin):
csv = CSVDataField(csv_form=ProviderFromCSVForm)
class ProviderBulkEditForm(forms.Form, BootstrapMixin):
pk = forms.ModelMultipleChoiceField(queryset=Provider.objects.all(), widget=forms.MultipleHiddenInput)
asn = forms.IntegerField(required=False, label='ASN')
account = forms.CharField(max_length=30, required=False, label='Account number')
portal_url = forms.URLField(required=False, label='Portal')
noc_contact = forms.CharField(required=False, widget=SmallTextarea, label='NOC contact')
admin_contact = forms.CharField(required=False, widget=SmallTextarea, label='Admin contact')
comments = CommentField()
def provider_site_choices():
site_choices = Site.objects.all()
return [(s.slug, s.name) for s in site_choices]
class ProviderFilterForm(forms.Form, BootstrapMixin):
site = forms.MultipleChoiceField(required=False, choices=provider_site_choices,
widget=forms.SelectMultiple(attrs={'size': 8}))
#
# Circuit types
#
class CircuitTypeForm(forms.ModelForm, BootstrapMixin):
slug = SlugField()
class Meta:
model = CircuitType
fields = ['name', 'slug']
#
# Circuits
#
class CircuitForm(forms.ModelForm, BootstrapMixin):
site = forms.ModelChoiceField(queryset=Site.objects.all(), widget=forms.Select(attrs={'filter-for': 'rack'}))
rack = forms.ModelChoiceField(queryset=Rack.objects.all(), required=False, label='Rack',
widget=APISelect(api_url='/api/dcim/racks/?site_id={{site}}',
attrs={'filter-for': 'device'}))
device = forms.ModelChoiceField(queryset=Device.objects.all(), required=False, label='Device',
widget=APISelect(api_url='/api/dcim/devices/?rack_id={{rack}}',
attrs={'filter-for': 'interface'}))
livesearch = forms.CharField(required=False, label='Device', widget=Livesearch(
query_key='q', query_url='dcim-api:device_list', field_to_update='device')
)
interface = forms.ModelChoiceField(queryset=Interface.objects.all(), required=False, label='Interface',
widget=APISelect(api_url='/api/dcim/devices/{{device}}/interfaces/?type=physical',
disabled_indicator='is_connected'))
comments = CommentField()
class Meta:
model = Circuit
fields = [
'cid', 'type', 'provider', 'tenant', 'site', 'rack', 'device', 'livesearch', 'interface', 'install_date',
'port_speed', 'upstream_speed', 'commit_rate', 'xconnect_id', 'pp_info', 'comments'
]
help_texts = {
'cid': "Unique circuit ID",
'install_date': "Format: YYYY-MM-DD",
'port_speed': "Physical circuit speed",
'commit_rate': "Commited rate",
'xconnect_id': "ID of the local cross-connect",
'pp_info': "Patch panel ID and port number(s)"
}
def __init__(self, *args, **kwargs):
super(CircuitForm, self).__init__(*args, **kwargs)
# If this circuit has been assigned to an interface, initialize rack and device
if self.instance.interface:
self.initial['rack'] = self.instance.interface.device.rack
self.initial['device'] = self.instance.interface.device
# Limit rack choices
if self.is_bound:
self.fields['rack'].queryset = Rack.objects.filter(site__pk=self.data['site'])
elif self.initial.get('site'):
self.fields['rack'].queryset = Rack.objects.filter(site=self.initial['site'])
else:
self.fields['rack'].choices = []
# Limit device choices
if self.is_bound and self.data.get('rack'):
self.fields['device'].queryset = Device.objects.filter(rack=self.data['rack'])
elif self.initial.get('rack'):
self.fields['device'].queryset = Device.objects.filter(rack=self.initial['rack'])
else:
self.fields['device'].choices = []
# Limit interface choices
if self.is_bound and self.data.get('device'):
interfaces = Interface.objects.filter(device=self.data['device'])\
.exclude(form_factor=IFACE_FF_VIRTUAL).select_related('circuit', 'connected_as_a', 'connected_as_b')
self.fields['interface'].widget.attrs['initial'] = self.data.get('interface')
elif self.initial.get('device'):
interfaces = Interface.objects.filter(device=self.initial['device'])\
.exclude(form_factor=IFACE_FF_VIRTUAL).select_related('circuit', 'connected_as_a', 'connected_as_b')
self.fields['interface'].widget.attrs['initial'] = self.initial.get('interface')
else:
interfaces = []
self.fields['interface'].choices = [
(iface.id, {
'label': iface.name,
'disabled': iface.is_connected and iface.id != self.fields['interface'].widget.attrs.get('initial'),
}) for iface in interfaces
]
class CircuitFromCSVForm(forms.ModelForm):
provider = forms.ModelChoiceField(Provider.objects.all(), to_field_name='name',
error_messages={'invalid_choice': 'Provider not found.'})
type = forms.ModelChoiceField(CircuitType.objects.all(), to_field_name='name',
error_messages={'invalid_choice': 'Invalid circuit type.'})
tenant = forms.ModelChoiceField(Tenant.objects.all(), to_field_name='name', required=False,
error_messages={'invalid_choice': 'Tenant not found.'})
site = forms.ModelChoiceField(Site.objects.all(), to_field_name='name',
error_messages={'invalid_choice': 'Site not found.'})
class Meta:
model = Circuit
fields = ['cid', 'provider', 'type', 'tenant', 'site', 'install_date', 'port_speed', 'upstream_speed',
'commit_rate', 'xconnect_id', 'pp_info']
class CircuitImportForm(BulkImportForm, BootstrapMixin):
csv = CSVDataField(csv_form=CircuitFromCSVForm)
class CircuitBulkEditForm(forms.Form, BootstrapMixin):
pk = forms.ModelMultipleChoiceField(queryset=Circuit.objects.all(), widget=forms.MultipleHiddenInput)
type = forms.ModelChoiceField(queryset=CircuitType.objects.all(), required=False)
provider = forms.ModelChoiceField(queryset=Provider.objects.all(), required=False)
tenant = forms.TypedChoiceField(choices=bulkedit_tenant_choices, coerce=int, required=False, label='Tenant')
port_speed = forms.IntegerField(required=False, label='Port speed (Kbps)')
commit_rate = forms.IntegerField(required=False, label='Commit rate (Kbps)')
comments = CommentField()
def circuit_type_choices():
type_choices = CircuitType.objects.annotate(circuit_count=Count('circuits'))
return [(t.slug, u'{} ({})'.format(t.name, t.circuit_count)) for t in type_choices]
def circuit_provider_choices():
provider_choices = Provider.objects.annotate(circuit_count=Count('circuits'))
return [(p.slug, u'{} ({})'.format(p.name, p.circuit_count)) for p in provider_choices]
def circuit_tenant_choices():
tenant_choices = Tenant.objects.annotate(circuit_count=Count('circuits'))
return [(t.slug, u'{} ({})'.format(t.name, t.circuit_count)) for t in tenant_choices]
def circuit_site_choices():
site_choices = Site.objects.annotate(circuit_count=Count('circuits'))
return [(s.slug, u'{} ({})'.format(s.name, s.circuit_count)) for s in site_choices]
class CircuitFilterForm(forms.Form, BootstrapMixin):
type = forms.MultipleChoiceField(required=False, choices=circuit_type_choices)
provider = forms.MultipleChoiceField(required=False, choices=circuit_provider_choices,
widget=forms.SelectMultiple(attrs={'size': 8}))
tenant = forms.MultipleChoiceField(required=False, choices=circuit_tenant_choices,
widget=forms.SelectMultiple(attrs={'size': 8}))
site = forms.MultipleChoiceField(required=False, choices=circuit_site_choices,
widget=forms.SelectMultiple(attrs={'size': 8}))
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._mql_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtins for the MqlLexer.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
types = (
'AccountBalance',
'AccountCompany',
'AccountCredit',
'AccountCurrency',
'AccountEquity',
'AccountFreeMarginCheck',
'AccountFreeMarginMode',
'AccountFreeMargin',
'AccountInfoDouble',
'AccountInfoInteger',
'AccountInfoString',
'AccountLeverage',
'AccountMargin',
'AccountName',
'AccountNumber',
'AccountProfit',
'AccountServer',
'AccountStopoutLevel',
'AccountStopoutMode',
'Alert',
'ArrayBsearch',
'ArrayCompare',
'ArrayCopyRates',
'ArrayCopySeries',
'ArrayCopy',
'ArrayDimension',
'ArrayFill',
'ArrayFree',
'ArrayGetAsSeries',
'ArrayInitialize',
'ArrayIsDynamic',
'ArrayIsSeries',
'ArrayMaximum',
'ArrayMinimum',
'ArrayRange',
'ArrayResize',
'ArraySetAsSeries',
'ArraySize',
'ArraySort',
'CharArrayToString',
'CharToString',
'CharToStr',
'CheckPointer',
'ColorToARGB',
'ColorToString',
'Comment',
'CopyClose',
'CopyHigh',
'CopyLow',
'CopyOpen',
'CopyRates',
'CopyRealVolume',
'CopySpread',
'CopyTickVolume',
'CopyTime',
'DayOfWeek',
'DayOfYear',
'Day',
'DebugBreak',
'Digits',
'DoubleToString',
'DoubleToStr',
'EnumToString',
'EventChartCustom',
'EventKillTimer',
'EventSetMillisecondTimer',
'EventSetTimer',
'ExpertRemove',
'FileClose',
'FileCopy',
'FileDelete',
'FileFindClose',
'FileFindFirst',
'FileFindNext',
'FileFlush',
'FileGetInteger',
'FileIsEnding',
'FileIsExist',
'FileIsLineEnding',
'FileMove',
'FileOpenHistory',
'FileOpen',
'FileReadArray',
'FileReadBool',
'FileReadDatetime',
'FileReadDouble',
'FileReadFloat',
'FileReadInteger',
'FileReadLong',
'FileReadNumber',
'FileReadString',
'FileReadStruct',
'FileSeek',
'FileSize',
'FileTell',
'FileWriteArray',
'FileWriteDouble',
'FileWriteFloat',
'FileWriteInteger',
'FileWriteLong',
'FileWriteString',
'FileWriteStruct',
'FileWrite',
'FolderClean',
'FolderCreate',
'FolderDelete',
'GetLastError',
'GetPointer',
'GetTickCount',
'GlobalVariableCheck',
'GlobalVariableDel',
'GlobalVariableGet',
'GlobalVariableName',
'GlobalVariableSetOnCondition',
'GlobalVariableSet',
'GlobalVariableTemp',
'GlobalVariableTime',
'GlobalVariablesDeleteAll',
'GlobalVariablesFlush',
'GlobalVariablesTotal',
'HideTestIndicators',
'Hour',
'IndicatorBuffers',
'IndicatorCounted',
'IndicatorDigits',
'IndicatorSetDouble',
'IndicatorSetInteger',
'IndicatorSetString',
'IndicatorShortName',
'IntegerToString',
'IsConnected',
'IsDemo',
'IsDllsAllowed',
'IsExpertEnabled',
'IsLibrariesAllowed',
'IsOptimization',
'IsStopped',
'IsTesting',
'IsTradeAllowed',
'IsTradeContextBusy',
'IsVisualMode',
'MQLInfoInteger',
'MQLInfoString',
'MarketInfo',
'MathAbs',
'MathArccos',
'MathArcsin',
'MathArctan',
'MathCeil',
'MathCos',
'MathExp',
'MathFloor',
'MathIsValidNumber',
'MathLog',
'MathMax',
'MathMin',
'MathMod',
'MathPow',
'MathRand',
'MathRound',
'MathSin',
'MathSqrt',
'MathSrand',
'MathTan',
'MessageBox',
'Minute',
'Month',
'NormalizeDouble',
'ObjectCreate',
'ObjectDelete',
'ObjectDescription',
'ObjectFind',
'ObjectGetDouble',
'ObjectGetFiboDescription',
'ObjectGetInteger',
'ObjectGetShiftByValue',
'ObjectGetString',
'ObjectGetTimeByValue',
'ObjectGetValueByShift',
'ObjectGetValueByTime',
'ObjectGet',
'ObjectMove',
'ObjectName',
'ObjectSetDouble',
'ObjectSetFiboDescription',
'ObjectSetInteger',
'ObjectSetString',
'ObjectSetText',
'ObjectSet',
'ObjectType',
'ObjectsDeleteAll',
'ObjectsTotal',
'OrderCloseBy',
'OrderClosePrice',
'OrderCloseTime',
'OrderClose',
'OrderComment',
'OrderCommission',
'OrderDelete',
'OrderExpiration',
'OrderLots',
'OrderMagicNumber',
'OrderModify',
'OrderOpenPrice',
'OrderOpenTime',
'OrderPrint',
'OrderProfit',
'OrderSelect',
'OrderSend',
'OrderStopLoss',
'OrderSwap',
'OrderSymbol',
'OrderTakeProfit',
'OrderTicket',
'OrderType',
'OrdersHistoryTotal',
'OrdersTotal',
'PeriodSeconds',
'Period',
'PlaySound',
'Point',
'PrintFormat',
'Print',
'RefreshRates',
'ResetLastError',
'ResourceCreate',
'ResourceFree',
'ResourceReadImage',
'ResourceSave',
'Seconds',
'SendFTP',
'SendMail',
'SendNotification',
'SeriesInfoInteger',
'SetIndexArrow',
'SetIndexBuffer',
'SetIndexDrawBegin',
'SetIndexEmptyValue',
'SetIndexLabel',
'SetIndexShift',
'SetIndexStyle',
'SetLevelStyle',
'SetLevelValue',
'ShortArrayToString',
'ShortToString',
'Sleep',
'StrToDouble',
'StrToInteger',
'StrToTime',
'StringAdd',
'StringBufferLen',
'StringCompare',
'StringConcatenate',
'StringFill',
'StringFind',
'StringFormat',
'StringGetCharacter',
'StringGetChar',
'StringInit',
'StringLen',
'StringReplace',
'StringSetCharacter',
'StringSetChar',
'StringSplit',
'StringSubstr',
'StringToCharArray',
'StringToColor',
'StringToDouble',
'StringToInteger',
'StringToLower',
'StringToShortArray',
'StringToTime',
'StringToUpper',
'StringTrimLeft',
'StringTrimRight',
'StructToTime',
'SymbolInfoDouble',
'SymbolInfoInteger',
'SymbolInfoSessionQuote',
'SymbolInfoSessionTrade',
'SymbolInfoString',
'SymbolInfoTick',
'SymbolIsSynchronized',
'SymbolName',
'SymbolSelect',
'SymbolsTotal',
'Symbol',
'TerminalClose',
'TerminalCompany',
'TerminalName',
'TerminalPath',
'TesterStatistics',
'TextGetSize',
'TextOut',
'TextSetFont',
'TimeCurrent',
'TimeDayOfWeek',
'TimeDayOfYear',
'TimeDaylightSavings',
'TimeDay',
'TimeGMTOffset',
'TimeGMT',
'TimeHour',
'TimeLocal',
'TimeMinute',
'TimeMonth',
'TimeSeconds',
'TimeToString',
'TimeToStruct',
'TimeToStr',
'TimeTradeServer',
'TimeYear',
'UninitializeReason',
'WindowBarsPerChart',
'WindowExpertName',
'WindowFind',
'WindowFirstVisibleBar',
'WindowHandle',
'WindowIsVisible',
'WindowOnDropped',
'WindowPriceMax',
'WindowPriceMin',
'WindowPriceOnDropped',
'WindowRedraw',
'WindowScreenShot',
'WindowTimeOnDropped',
'WindowXOnDropped',
'WindowYOnDropped',
'WindowsTotal',
'Year',
'ZeroMemory',
'iAC',
'iADX',
'iAD',
'iAO',
'iATR',
'iAlligator',
'iBWMFI',
'iBandsOnArray',
'iBands',
'iBarShift',
'iBars',
'iBearsPower',
'iBullsPower',
'iCCIOnArray',
'iCCI',
'iClose',
'iCustom',
'iDeMarker',
'iEnvelopesOnArray',
'iEnvelopes',
'iForce',
'iFractals',
'iGator',
'iHighest',
'iHigh',
'iIchimoku',
'iLowest',
'iLow',
'iMACD',
'iMAOnArray',
'iMA',
'iMFI',
'iMomentumOnArray',
'iMomentum',
'iOBV',
'iOpen',
'iOsMA',
'iRSIOnArray',
'iRSI',
'iRVI',
'iSAR',
'iStdDevOnArray',
'iStdDev',
'iStochastic',
'iTime',
'iVolume',
'iWPR',
)
constants = (
'ACCOUNT_BALANCE',
'ACCOUNT_COMPANY',
'ACCOUNT_CREDIT',
'ACCOUNT_CURRENCY',
'ACCOUNT_EQUITY',
'ACCOUNT_FREEMARGIN',
'ACCOUNT_LEVERAGE',
'ACCOUNT_LIMIT_ORDERS',
'ACCOUNT_LOGIN',
'ACCOUNT_MARGIN',
'ACCOUNT_MARGIN_LEVEL',
'ACCOUNT_MARGIN_SO_CALL',
'ACCOUNT_MARGIN_SO_MODE',
'ACCOUNT_MARGIN_SO_SO',
'ACCOUNT_NAME',
'ACCOUNT_PROFIT',
'ACCOUNT_SERVER',
'ACCOUNT_STOPOUT_MODE_MONEY',
'ACCOUNT_STOPOUT_MODE_PERCENT',
'ACCOUNT_TRADE_ALLOWED',
'ACCOUNT_TRADE_EXPERT',
'ACCOUNT_TRADE_MODE',
'ACCOUNT_TRADE_MODE_CONTEST',
'ACCOUNT_TRADE_MODE_DEMO',
'ACCOUNT_TRADE_MODE_REAL',
'ALIGN_CENTER',
'ALIGN_LEFT',
'ALIGN_RIGHT',
'ANCHOR_BOTTOM',
'ANCHOR_CENTER',
'ANCHOR_LEFT',
'ANCHOR_LEFT_LOWER',
'ANCHOR_LEFT_UPPER',
'ANCHOR_LOWER',
'ANCHOR_RIGHT',
'ANCHOR_RIGHT_LOWER',
'ANCHOR_RIGHT_UPPER',
'ANCHOR_TOP',
'ANCHOR_UPPER',
'BORDER_FLAT',
'BORDER_RAISED',
'BORDER_SUNKEN',
'CHARTEVENT_CHART_CHANGE',
'CHARTEVENT_CLICK',
'CHARTEVENT_CUSTOM',
'CHARTEVENT_CUSTOM_LAST',
'CHARTEVENT_KEYDOWN',
'CHARTEVENT_MOUSE_MOVE',
'CHARTEVENT_OBJECT_CHANGE',
'CHARTEVENT_OBJECT_CLICK',
'CHARTEVENT_OBJECT_CREATE',
'CHARTEVENT_OBJECT_DELETE',
'CHARTEVENT_OBJECT_DRAG',
'CHARTEVENT_OBJECT_ENDEDIT',
'CHARTS_MAX',
'CHART_AUTOSCROLL',
'CHART_BARS',
'CHART_BEGIN',
'CHART_BRING_TO_TOP',
'CHART_CANDLES',
'CHART_COLOR_ASK',
'CHART_COLOR_BACKGROUND',
'CHART_COLOR_BID',
'CHART_COLOR_CANDLE_BEAR',
'CHART_COLOR_CANDLE_BULL',
'CHART_COLOR_CHART_DOWN',
'CHART_COLOR_CHART_LINE',
'CHART_COLOR_CHART_UP',
'CHART_COLOR_FOREGROUND',
'CHART_COLOR_GRID',
'CHART_COLOR_LAST',
'CHART_COLOR_STOP_LEVEL',
'CHART_COLOR_VOLUME',
'CHART_COMMENT',
'CHART_CURRENT_POS',
'CHART_DRAG_TRADE_LEVELS',
'CHART_END',
'CHART_EVENT_MOUSE_MOVE',
'CHART_EVENT_OBJECT_CREATE',
'CHART_EVENT_OBJECT_DELETE',
'CHART_FIRST_VISIBLE_BAR',
'CHART_FIXED_MAX',
'CHART_FIXED_MIN',
'CHART_FIXED_POSITION',
'CHART_FOREGROUND',
'CHART_HEIGHT_IN_PIXELS',
'CHART_IS_OBJECT',
'CHART_LINE',
'CHART_MODE',
'CHART_MOUSE_SCROLL',
'CHART_POINTS_PER_BAR',
'CHART_PRICE_MAX',
'CHART_PRICE_MIN',
'CHART_SCALEFIX',
'CHART_SCALEFIX_11',
'CHART_SCALE',
'CHART_SCALE_PT_PER_BAR',
'CHART_SHIFT',
'CHART_SHIFT_SIZE',
'CHART_SHOW_ASK_LINE',
'CHART_SHOW_BID_LINE',
'CHART_SHOW_DATE_SCALE',
'CHART_SHOW_GRID',
'CHART_SHOW_LAST_LINE',
'CHART_SHOW_OBJECT_DESCR',
'CHART_SHOW_OHLC',
'CHART_SHOW_PERIOD_SEP',
'CHART_SHOW_PRICE_SCALE',
'CHART_SHOW_TRADE_LEVELS',
'CHART_SHOW_VOLUMES',
'CHART_VISIBLE_BARS',
'CHART_VOLUME_HIDE',
'CHART_VOLUME_REAL',
'CHART_VOLUME_TICK',
'CHART_WIDTH_IN_BARS',
'CHART_WIDTH_IN_PIXELS',
'CHART_WINDOWS_TOTAL',
'CHART_WINDOW_HANDLE',
'CHART_WINDOW_IS_VISIBLE',
'CHART_WINDOW_YDISTANCE',
'CHAR_MAX',
'CHAR_MIN',
'CLR_NONE',
'CORNER_LEFT_LOWER',
'CORNER_LEFT_UPPER',
'CORNER_RIGHT_LOWER',
'CORNER_RIGHT_UPPER',
'CP_ACP',
'CP_MACCP',
'CP_OEMCP',
'CP_SYMBOL',
'CP_THREAD_ACP',
'CP_UTF7',
'CP_UTF8',
'DBL_DIG',
'DBL_EPSILON',
'DBL_MANT_DIG',
'DBL_MAX',
'DBL_MAX_10_EXP',
'DBL_MAX_EXP',
'DBL_MIN',
'DBL_MIN_10_EXP',
'DBL_MIN_EXP',
'DRAW_ARROW',
'DRAW_FILLING',
'DRAW_HISTOGRAM',
'DRAW_LINE',
'DRAW_NONE',
'DRAW_SECTION',
'DRAW_ZIGZAG',
'EMPTY',
'EMPTY_VALUE',
'ERR_ACCOUNT_DISABLED',
'ERR_BROKER_BUSY',
'ERR_COMMON_ERROR',
'ERR_INVALID_ACCOUNT',
'ERR_INVALID_PRICE',
'ERR_INVALID_STOPS',
'ERR_INVALID_TRADE_PARAMETERS',
'ERR_INVALID_TRADE_VOLUME',
'ERR_LONG_POSITIONS_ONLY_ALLOWED',
'ERR_MALFUNCTIONAL_TRADE',
'ERR_MARKET_CLOSED',
'ERR_NOT_ENOUGH_MONEY',
'ERR_NOT_ENOUGH_RIGHTS',
'ERR_NO_CONNECTION',
'ERR_NO_ERROR',
'ERR_NO_RESULT',
'ERR_OFF_QUOTES',
'ERR_OLD_VERSION',
'ERR_ORDER_LOCKED',
'ERR_PRICE_CHANGED',
'ERR_REQUOTE',
'ERR_SERVER_BUSY',
'ERR_TOO_FREQUENT_REQUESTS',
'ERR_TOO_MANY_REQUESTS',
'ERR_TRADE_CONTEXT_BUSY',
'ERR_TRADE_DISABLED',
'ERR_TRADE_EXPIRATION_DENIED',
'ERR_TRADE_HEDGE_PROHIBITED',
'ERR_TRADE_MODIFY_DENIED',
'ERR_TRADE_PROHIBITED_BY_FIFO',
'ERR_TRADE_TIMEOUT',
'ERR_TRADE_TOO_MANY_ORDERS',
'FILE_ACCESS_DATE',
'FILE_ANSI',
'FILE_BIN',
'FILE_COMMON',
'FILE_CREATE_DATE',
'FILE_CSV',
'FILE_END',
'FILE_EXISTS',
'FILE_IS_ANSI',
'FILE_IS_BINARY',
'FILE_IS_COMMON',
'FILE_IS_CSV',
'FILE_IS_READABLE',
'FILE_IS_TEXT',
'FILE_IS_WRITABLE',
'FILE_LINE_END',
'FILE_MODIFY_DATE',
'FILE_POSITION',
'FILE_READ',
'FILE_REWRITE',
'FILE_SHARE_READ',
'FILE_SHARE_WRITE',
'FILE_SIZE',
'FILE_TXT',
'FILE_UNICODE',
'FILE_WRITE',
'FLT_DIG',
'FLT_EPSILON',
'FLT_MANT_DIG',
'FLT_MAX',
'FLT_MAX_10_EXP',
'FLT_MAX_EXP',
'FLT_MIN',
'FLT_MIN_10_EXP',
'FLT_MIN_EXP',
'FRIDAY',
'GANN_DOWN_TREND',
'GANN_UP_TREND',
'IDABORT',
'IDCANCEL',
'IDCONTINUE',
'IDIGNORE',
'IDNO',
'IDOK',
'IDRETRY',
'IDTRYAGAIN',
'IDYES',
'INDICATOR_CALCULATIONS',
'INDICATOR_COLOR_INDEX',
'INDICATOR_DATA',
'INDICATOR_DIGITS',
'INDICATOR_HEIGHT',
'INDICATOR_LEVELCOLOR',
'INDICATOR_LEVELSTYLE',
'INDICATOR_LEVELS',
'INDICATOR_LEVELTEXT',
'INDICATOR_LEVELVALUE',
'INDICATOR_LEVELWIDTH',
'INDICATOR_MAXIMUM',
'INDICATOR_MINIMUM',
'INDICATOR_SHORTNAME',
'INT_MAX',
'INT_MIN',
'INVALID_HANDLE',
'IS_DEBUG_MODE',
'IS_PROFILE_MODE',
'LICENSE_DEMO',
'LICENSE_FREE',
'LICENSE_FULL',
'LICENSE_TIME',
'LONG_MAX',
'LONG_MIN',
'MB_ABORTRETRYIGNORE',
'MB_CANCELTRYCONTINUE',
'MB_DEFBUTTON1',
'MB_DEFBUTTON2',
'MB_DEFBUTTON3',
'MB_DEFBUTTON4',
'MB_ICONASTERISK',
'MB_ICONERROR',
'MB_ICONEXCLAMATION',
'MB_ICONHAND',
'MB_ICONINFORMATION',
'MB_ICONQUESTION',
'MB_ICONSTOP',
'MB_ICONWARNING',
'MB_OKCANCEL',
'MB_OK',
'MB_RETRYCANCEL',
'MB_YESNOCANCEL',
'MB_YESNO',
'MODE_ASK',
'MODE_BID',
'MODE_CHINKOUSPAN',
'MODE_CLOSE',
'MODE_DIGITS',
'MODE_EMA',
'MODE_EXPIRATION',
'MODE_FREEZELEVEL',
'MODE_GATORJAW',
'MODE_GATORLIPS',
'MODE_GATORTEETH',
'MODE_HIGH',
'MODE_KIJUNSEN',
'MODE_LOTSIZE',
'MODE_LOTSTEP',
'MODE_LOWER',
'MODE_LOW',
'MODE_LWMA',
'MODE_MAIN',
'MODE_MARGINCALCMODE',
'MODE_MARGINHEDGED',
'MODE_MARGININIT',
'MODE_MARGINMAINTENANCE',
'MODE_MARGINREQUIRED',
'MODE_MAXLOT',
'MODE_MINLOT',
'MODE_MINUSDI',
'MODE_OPEN',
'MODE_PLUSDI',
'MODE_POINT',
'MODE_PROFITCALCMODE',
'MODE_SENKOUSPANA',
'MODE_SENKOUSPANB',
'MODE_SIGNAL',
'MODE_SMA',
'MODE_SMMA',
'MODE_SPREAD',
'MODE_STARTING',
'MODE_STOPLEVEL',
'MODE_SWAPLONG',
'MODE_SWAPSHORT',
'MODE_SWAPTYPE',
'MODE_TENKANSEN',
'MODE_TICKSIZE',
'MODE_TICKVALUE',
'MODE_TIME',
'MODE_TRADEALLOWED',
'MODE_UPPER',
'MODE_VOLUME',
'MONDAY',
'MQL_DEBUG',
'MQL_DLLS_ALLOWED',
'MQL_FRAME_MODE',
'MQL_LICENSE_TYPE',
'MQL_OPTIMIZATION',
'MQL_PROFILER',
'MQL_PROGRAM_NAME',
'MQL_PROGRAM_PATH',
'MQL_PROGRAM_TYPE',
'MQL_TESTER',
'MQL_TRADE_ALLOWED',
'MQL_VISUAL_MODE',
'M_1_PI',
'M_2_PI',
'M_2_SQRTPI',
'M_E',
'M_LN2',
'M_LN10',
'M_LOG2E',
'M_LOG10E',
'M_PI',
'M_PI_2',
'M_PI_4',
'M_SQRT1_2',
'M_SQRT2',
'NULL',
'OBJPROP_ALIGN',
'OBJPROP_ANCHOR',
'OBJPROP_ANGLE',
'OBJPROP_ARROWCODE',
'OBJPROP_BACK',
'OBJPROP_BGCOLOR',
'OBJPROP_BMPFILE',
'OBJPROP_BORDER_COLOR',
'OBJPROP_BORDER_TYPE',
'OBJPROP_CHART_ID',
'OBJPROP_CHART_SCALE',
'OBJPROP_COLOR',
'OBJPROP_CORNER',
'OBJPROP_CREATETIME',
'OBJPROP_DATE_SCALE',
'OBJPROP_DEVIATION',
'OBJPROP_DRAWLINES',
'OBJPROP_ELLIPSE',
'OBJPROP_FIBOLEVELS',
'OBJPROP_FILL',
'OBJPROP_FIRSTLEVEL',
'OBJPROP_FONTSIZE',
'OBJPROP_FONT',
'OBJPROP_HIDDEN',
'OBJPROP_LEVELCOLOR',
'OBJPROP_LEVELSTYLE',
'OBJPROP_LEVELS',
'OBJPROP_LEVELTEXT',
'OBJPROP_LEVELVALUE',
'OBJPROP_LEVELWIDTH',
'OBJPROP_NAME',
'OBJPROP_PERIOD',
'OBJPROP_PRICE1',
'OBJPROP_PRICE2',
'OBJPROP_PRICE3',
'OBJPROP_PRICE',
'OBJPROP_PRICE_SCALE',
'OBJPROP_RAY',
'OBJPROP_RAY_RIGHT',
'OBJPROP_READONLY',
'OBJPROP_SCALE',
'OBJPROP_SELECTABLE',
'OBJPROP_SELECTED',
'OBJPROP_STATE',
'OBJPROP_STYLE',
'OBJPROP_SYMBOL',
'OBJPROP_TEXT',
'OBJPROP_TIME1',
'OBJPROP_TIME2',
'OBJPROP_TIME3',
'OBJPROP_TIMEFRAMES',
'OBJPROP_TIME',
'OBJPROP_TOOLTIP',
'OBJPROP_TYPE',
'OBJPROP_WIDTH',
'OBJPROP_XDISTANCE',
'OBJPROP_XOFFSET',
'OBJPROP_XSIZE',
'OBJPROP_YDISTANCE',
'OBJPROP_YOFFSET',
'OBJPROP_YSIZE',
'OBJPROP_ZORDER',
'OBJ_ALL_PERIODS',
'OBJ_ARROW',
'OBJ_ARROW_BUY',
'OBJ_ARROW_CHECK',
'OBJ_ARROW_DOWN',
'OBJ_ARROW_LEFT_PRICE',
'OBJ_ARROW_RIGHT_PRICE',
'OBJ_ARROW_SELL',
'OBJ_ARROW_STOP',
'OBJ_ARROW_THUMB_DOWN',
'OBJ_ARROW_THUMB_UP',
'OBJ_ARROW_UP',
'OBJ_BITMAP',
'OBJ_BITMAP_LABEL',
'OBJ_BUTTON',
'OBJ_CHANNEL',
'OBJ_CYCLES',
'OBJ_EDIT',
'OBJ_ELLIPSE',
'OBJ_EVENT',
'OBJ_EXPANSION',
'OBJ_FIBOARC',
'OBJ_FIBOCHANNEL',
'OBJ_FIBOFAN',
'OBJ_FIBOTIMES',
'OBJ_FIBO',
'OBJ_GANNFAN',
'OBJ_GANNGRID',
'OBJ_GANNLINE',
'OBJ_HLINE',
'OBJ_LABEL',
'OBJ_NO_PERIODS',
'OBJ_PERIOD_D1',
'OBJ_PERIOD_H1',
'OBJ_PERIOD_H4',
'OBJ_PERIOD_M1',
'OBJ_PERIOD_M5',
'OBJ_PERIOD_M15',
'OBJ_PERIOD_M30',
'OBJ_PERIOD_MN1',
'OBJ_PERIOD_W1',
'OBJ_PITCHFORK',
'OBJ_RECTANGLE',
'OBJ_RECTANGLE_LABEL',
'OBJ_REGRESSION',
'OBJ_STDDEVCHANNEL',
'OBJ_TEXT',
'OBJ_TRENDBYANGLE',
'OBJ_TREND',
'OBJ_TRIANGLE',
'OBJ_VLINE',
'OP_BUYLIMIT',
'OP_BUYSTOP',
'OP_BUY',
'OP_SELLLIMIT',
'OP_SELLSTOP',
'OP_SELL',
'PERIOD_CURRENT',
'PERIOD_D1',
'PERIOD_H1',
'PERIOD_H2',
'PERIOD_H3',
'PERIOD_H4',
'PERIOD_H6',
'PERIOD_H8',
'PERIOD_H12',
'PERIOD_M1',
'PERIOD_M2',
'PERIOD_M3',
'PERIOD_M4',
'PERIOD_M5',
'PERIOD_M6',
'PERIOD_M10',
'PERIOD_M12',
'PERIOD_M15',
'PERIOD_M20',
'PERIOD_M30',
'PERIOD_MN1',
'PERIOD_W1',
'POINTER_AUTOMATIC',
'POINTER_DYNAMIC',
'POINTER_INVALID'
'PRICE_CLOSE',
'PRICE_HIGH',
'PRICE_LOW',
'PRICE_MEDIAN',
'PRICE_OPEN',
'PRICE_TYPICAL',
'PRICE_WEIGHTED',
'PROGRAM_EXPERT',
'PROGRAM_INDICATOR',
'PROGRAM_SCRIPT',
'REASON_ACCOUNT',
'REASON_CHARTCHANGE',
'REASON_CHARTCLOSE',
'REASON_CLOSE',
'REASON_INITFAILED',
'REASON_PARAMETERS',
'REASON_PROGRAM'
'REASON_RECOMPILE',
'REASON_REMOVE',
'REASON_TEMPLATE',
'SATURDAY',
'SEEK_CUR',
'SEEK_END',
'SEEK_SET',
'SERIES_BARS_COUNT',
'SERIES_FIRSTDATE',
'SERIES_LASTBAR_DATE',
'SERIES_SERVER_FIRSTDATE',
'SERIES_SYNCHRONIZED',
'SERIES_TERMINAL_FIRSTDATE',
'SHORT_MAX',
'SHORT_MIN',
'STAT_BALANCEDD_PERCENT',
'STAT_BALANCEMIN',
'STAT_BALANCE_DDREL_PERCENT',
'STAT_BALANCE_DD',
'STAT_BALANCE_DD_RELATIVE',
'STAT_CONLOSSMAX',
'STAT_CONLOSSMAX_TRADES',
'STAT_CONPROFITMAX',
'STAT_CONPROFITMAX_TRADES',
'STAT_CUSTOM_ONTESTER',
'STAT_DEALS',
'STAT_EQUITYDD_PERCENT',
'STAT_EQUITYMIN',
'STAT_EQUITY_DDREL_PERCENT',
'STAT_EQUITY_DD',
'STAT_EQUITY_DD_RELATIVE',
'STAT_EXPECTED_PAYOFF',
'STAT_GROSS_LOSS',
'STAT_GROSS_PROFIT',
'STAT_INITIAL_DEPOSIT',
'STAT_LONG_TRADES',
'STAT_LOSSTRADES_AVGCON',
'STAT_LOSS_TRADES',
'STAT_MAX_CONLOSSES',
'STAT_MAX_CONLOSS_TRADES',
'STAT_MAX_CONPROFIT_TRADES',
'STAT_MAX_CONWINS',
'STAT_MAX_LOSSTRADE',
'STAT_MAX_PROFITTRADE',
'STAT_MIN_MARGINLEVEL',
'STAT_PROFITTRADES_AVGCON',
'STAT_PROFIT',
'STAT_PROFIT_FACTOR',
'STAT_PROFIT_LONGTRADES',
'STAT_PROFIT_SHORTTRADES',
'STAT_PROFIT_TRADES',
'STAT_RECOVERY_FACTOR',
'STAT_SHARPE_RATIO',
'STAT_SHORT_TRADES',
'STAT_TRADES',
'STAT_WITHDRAWAL',
'STO_CLOSECLOSE',
'STO_LOWHIGH',
'STYLE_DASHDOTDOT',
'STYLE_DASHDOT',
'STYLE_DASH',
'STYLE_DOT',
'STYLE_SOLID',
'SUNDAY',
'SYMBOL_ARROWDOWN',
'SYMBOL_ARROWUP',
'SYMBOL_CHECKSIGN',
'SYMBOL_LEFTPRICE',
'SYMBOL_RIGHTPRICE',
'SYMBOL_STOPSIGN',
'SYMBOL_THUMBSDOWN',
'SYMBOL_THUMBSUP',
'TERMINAL_BUILD',
'TERMINAL_CODEPAGE',
'TERMINAL_COMMONDATA_PATH',
'TERMINAL_COMPANY',
'TERMINAL_CONNECTED',
'TERMINAL_CPU_CORES',
'TERMINAL_DATA_PATH',
'TERMINAL_DISK_SPACE',
'TERMINAL_DLLS_ALLOWED',
'TERMINAL_EMAIL_ENABLED',
'TERMINAL_FTP_ENABLED',
'TERMINAL_LANGUAGE',
'TERMINAL_MAXBARS',
'TERMINAL_MEMORY_AVAILABLE',
'TERMINAL_MEMORY_PHYSICAL',
'TERMINAL_MEMORY_TOTAL',
'TERMINAL_MEMORY_USED',
'TERMINAL_NAME',
'TERMINAL_OPENCL_SUPPORT',
'TERMINAL_PATH',
'TERMINAL_TRADE_ALLOWED',
'TERMINAL_X64',
'THURSDAY',
'TRADE_ACTION_DEAL',
'TRADE_ACTION_MODIFY',
'TRADE_ACTION_PENDING',
'TRADE_ACTION_REMOVE',
'TRADE_ACTION_SLTP',
'TUESDAY',
'UCHAR_MAX',
'UINT_MAX',
'ULONG_MAX',
'USHORT_MAX',
'VOLUME_REAL',
'VOLUME_TICK',
'WEDNESDAY',
'WHOLE_ARRAY',
'WRONG_VALUE',
'clrNONE',
'__DATETIME__',
'__DATE__',
'__FILE__',
'__FUNCSIG__',
'__FUNCTION__',
'__LINE__',
'__MQL4BUILD__',
'__MQLBUILD__',
'__PATH__',
)
colors = (
'AliceBlue',
'AntiqueWhite',
'Aquamarine',
'Aqua',
'Beige',
'Bisque',
'Black',
'BlanchedAlmond',
'BlueViolet',
'Blue',
'Brown',
'BurlyWood',
'CadetBlue',
'Chartreuse',
'Chocolate',
'Coral',
'CornflowerBlue',
'Cornsilk',
'Crimson',
'DarkBlue',
'DarkGoldenrod',
'DarkGray',
'DarkGreen',
'DarkKhaki',
'DarkOliveGreen',
'DarkOrange',
'DarkOrchid',
'DarkSalmon',
'DarkSeaGreen',
'DarkSlateBlue',
'DarkSlateGray',
'DarkTurquoise',
'DarkViolet',
'DeepPink',
'DeepSkyBlue',
'DimGray',
'DodgerBlue',
'FireBrick',
'ForestGreen',
'Gainsboro',
'Goldenrod',
'Gold',
'Gray',
'GreenYellow',
'Green',
'Honeydew',
'HotPink',
'IndianRed',
'Indigo',
'Ivory',
'Khaki',
'LavenderBlush',
'Lavender',
'LawnGreen',
'LemonChiffon',
'LightBlue',
'LightCoral',
'LightCyan',
'LightGoldenrod',
'LightGray',
'LightGreen',
'LightPink',
'LightSalmon',
'LightSeaGreen',
'LightSkyBlue',
'LightSlateGray',
'LightSteelBlue',
'LightYellow',
'LimeGreen',
'Lime',
'Linen',
'Magenta',
'Maroon',
'MediumAquamarine',
'MediumBlue',
'MediumOrchid',
'MediumPurple',
'MediumSeaGreen',
'MediumSlateBlue',
'MediumSpringGreen',
'MediumTurquoise',
'MediumVioletRed',
'MidnightBlue',
'MintCream',
'MistyRose',
'Moccasin',
'NavajoWhite',
'Navy',
'OldLace',
'OliveDrab',
'Olive',
'OrangeRed',
'Orange',
'Orchid',
'PaleGoldenrod',
'PaleGreen',
'PaleTurquoise',
'PaleVioletRed',
'PapayaWhip',
'PeachPuff',
'Peru',
'Pink',
'Plum',
'PowderBlue',
'Purple',
'Red',
'RosyBrown',
'RoyalBlue',
'SaddleBrown',
'Salmon',
'SandyBrown',
'SeaGreen',
'Seashell',
'Sienna',
'Silver',
'SkyBlue',
'SlateBlue',
'SlateGray',
'Snow',
'SpringGreen',
'SteelBlue',
'Tan',
'Teal',
'Thistle',
'Tomato',
'Turquoise',
'Violet',
'Wheat',
'WhiteSmoke',
'White',
'YellowGreen',
'Yellow',
)
keywords = (
'input', '_Digits', '_Point', '_LastError', '_Period', '_RandomSeed',
'_StopFlag', '_Symbol', '_UninitReason', 'Ask', 'Bars', 'Bid',
'Close', 'Digits', 'High', 'Low', 'Open', 'Point', 'Time',
'Volume',
)
c_types = (
'void', 'char', 'uchar', 'bool', 'short', 'ushort', 'int', 'uint',
'color', 'long', 'ulong', 'datetime', 'float', 'double',
'string',
)
|
|
#!BPY
'''
Nothing fancy
Just wrapping the Existing OBJ exporter in Blender 2.49 to do the obj export and conversion to three.js in one go
'''
"""
Name: 'three.js(slim) (.js)...'
Blender: 249
Group: 'Export'
Tooltip: 'Save a three.js File'
"""
__author__ = "Campbell Barton, Jiri Hnidek, Paolo Ciccone,George Profenza,AlteredQualia"
__url__ = ['http://wiki.blender.org/index.php/Scripts/Manual/Export/wavefront_obj', 'www.blender.org', 'blenderartists.org','AlteredQualia http://alteredqualia.com','tomaterial.blogspot.com']
__version__ = "1.22"
__bpydoc__ = """\
This script is an exporter to OBJ file format.
Usage:
Select the objects you wish to export and run this script from "File->Export" menu.
Selecting the default options from the popup box will be good in most cases.
All objects that can be represented as a mesh (mesh, curve, metaball, surface, text3d)
will be exported as mesh data.
"""
# ***** BEGIN GPL LICENSE BLOCK *****
#
# Script copyright (C) Campbell J Barton 2007-2009
# - V1.22- bspline import/export added (funded by PolyDimensions GmbH)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
import Blender
from Blender import Mesh, Scene, Window, sys, Image, Draw
import BPyMesh
import BPyObject
import BPySys
import BPyMessages
import re,os,subprocess
# Returns a tuple - path,extension.
# 'hello.obj' > ('hello', '.obj')
def splitExt(path):
dotidx = path.rfind('.')
if dotidx == -1:
return path, ''
else:
return path[:dotidx], path[dotidx:]
def fixName(name):
if name == None:
return 'None'
else:
return name.replace(' ', '_')
# A Dict of Materials
# (material.name, image.name):matname_imagename # matname_imagename has gaps removed.
MTL_DICT = {}
def write_mtl(filename):
world = Blender.World.GetCurrent()
if world:
worldAmb = world.getAmb()
else:
worldAmb = (0,0,0) # Default value
file = open(filename, "w")
file.write('# Blender3D MTL File: %s\n' % Blender.Get('filename').split('\\')[-1].split('/')[-1])
file.write('# Material Count: %i\n' % len(MTL_DICT))
# Write material/image combinations we have used.
for key, (mtl_mat_name, mat, img) in MTL_DICT.iteritems():
# Get the Blender data for the material and the image.
# Having an image named None will make a bug, dont do it :)
file.write('newmtl %s\n' % mtl_mat_name) # Define a new material: matname_imgname
if mat:
file.write('Ns %.6f\n' % ((mat.getHardness()-1) * 1.9607843137254901) ) # Hardness, convert blenders 1-511 to MTL's
file.write('Ka %.6f %.6f %.6f\n' % tuple([c*mat.amb for c in worldAmb]) ) # Ambient, uses mirror colour,
file.write('Kd %.6f %.6f %.6f\n' % tuple([c*mat.ref for c in mat.rgbCol]) ) # Diffuse
file.write('Ks %.6f %.6f %.6f\n' % tuple([c*mat.spec for c in mat.specCol]) ) # Specular
file.write('Ni %.6f\n' % mat.IOR) # Refraction index
file.write('d %.6f\n' % mat.alpha) # Alpha (obj uses 'd' for dissolve)
# 0 to disable lighting, 1 for ambient & diffuse only (specular color set to black), 2 for full lighting.
if mat.getMode() & Blender.Material.Modes['SHADELESS']:
file.write('illum 0\n') # ignore lighting
elif mat.getSpec() == 0:
file.write('illum 1\n') # no specular.
else:
file.write('illum 2\n') # light normaly
else:
#write a dummy material here?
file.write('Ns 0\n')
file.write('Ka %.6f %.6f %.6f\n' % tuple([c for c in worldAmb]) ) # Ambient, uses mirror colour,
file.write('Kd 0.8 0.8 0.8\n')
file.write('Ks 0.8 0.8 0.8\n')
file.write('d 1\n') # No alpha
file.write('illum 2\n') # light normaly
# Write images!
if img: # We have an image on the face!
file.write('map_Kd %s\n' % img.filename.split('\\')[-1].split('/')[-1]) # Diffuse mapping image
elif mat: # No face image. if we havea material search for MTex image.
for mtex in mat.getTextures():
if mtex and mtex.tex.type == Blender.Texture.Types.IMAGE:
try:
filename = mtex.tex.image.filename.split('\\')[-1].split('/')[-1]
file.write('map_Kd %s\n' % filename) # Diffuse mapping image
break
except:
# Texture has no image though its an image type, best ignore.
pass
file.write('\n\n')
file.close()
def copy_file(source, dest):
file = open(source, 'rb')
data = file.read()
file.close()
file = open(dest, 'wb')
file.write(data)
file.close()
def copy_images(dest_dir):
if dest_dir[-1] != sys.sep:
dest_dir += sys.sep
# Get unique image names
uniqueImages = {}
for matname, mat, image in MTL_DICT.itervalues(): # Only use image name
# Get Texface images
if image:
uniqueImages[image] = image # Should use sets here. wait until Python 2.4 is default.
# Get MTex images
if mat:
for mtex in mat.getTextures():
if mtex and mtex.tex.type == Blender.Texture.Types.IMAGE:
image_tex = mtex.tex.image
if image_tex:
try:
uniqueImages[image_tex] = image_tex
except:
pass
# Now copy images
copyCount = 0
for bImage in uniqueImages.itervalues():
image_path = sys.expandpath(bImage.filename)
if sys.exists(image_path):
# Make a name for the target path.
dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1]
if not sys.exists(dest_image_path): # Image isnt alredy there
print '\tCopying "%s" > "%s"' % (image_path, dest_image_path)
copy_file(image_path, dest_image_path)
copyCount+=1
print '\tCopied %d images' % copyCount
def test_nurbs_compat(ob):
if ob.type != 'Curve':
return False
for nu in ob.data:
if (not nu.knotsV) and nu.type != 1: # not a surface and not bezier
return True
return False
def write_nurb(file, ob, ob_mat):
tot_verts = 0
cu = ob.data
# use negative indices
Vector = Blender.Mathutils.Vector
for nu in cu:
if nu.type==0: DEG_ORDER_U = 1
else: DEG_ORDER_U = nu.orderU-1 # Tested to be correct
if nu.type==1:
print "\tWarning, bezier curve:", ob.name, "only poly and nurbs curves supported"
continue
if nu.knotsV:
print "\tWarning, surface:", ob.name, "only poly and nurbs curves supported"
continue
if len(nu) <= DEG_ORDER_U:
print "\tWarning, orderU is lower then vert count, skipping:", ob.name
continue
pt_num = 0
do_closed = (nu.flagU & 1)
do_endpoints = (do_closed==0) and (nu.flagU & 2)
for pt in nu:
pt = Vector(pt[0], pt[1], pt[2]) * ob_mat
file.write('v %.6f %.6f %.6f\n' % (pt[0], pt[1], pt[2]))
pt_num += 1
tot_verts += pt_num
file.write('g %s\n' % (fixName(ob.name))) # fixName(ob.getData(1)) could use the data name too
file.write('cstype bspline\n') # not ideal, hard coded
file.write('deg %d\n' % DEG_ORDER_U) # not used for curves but most files have it still
curve_ls = [-(i+1) for i in xrange(pt_num)]
# 'curv' keyword
if do_closed:
if DEG_ORDER_U == 1:
pt_num += 1
curve_ls.append(-1)
else:
pt_num += DEG_ORDER_U
curve_ls = curve_ls + curve_ls[0:DEG_ORDER_U]
file.write('curv 0.0 1.0 %s\n' % (' '.join( [str(i) for i in curve_ls] ))) # Blender has no U and V values for the curve
# 'parm' keyword
tot_parm = (DEG_ORDER_U + 1) + pt_num
tot_parm_div = float(tot_parm-1)
parm_ls = [(i/tot_parm_div) for i in xrange(tot_parm)]
if do_endpoints: # end points, force param
for i in xrange(DEG_ORDER_U+1):
parm_ls[i] = 0.0
parm_ls[-(1+i)] = 1.0
file.write('parm u %s\n' % ' '.join( [str(i) for i in parm_ls] ))
file.write('end\n')
return tot_verts
def write(filename, objects,\
EXPORT_TRI=False, EXPORT_EDGES=False, EXPORT_NORMALS=False, EXPORT_NORMALS_HQ=False,\
EXPORT_UV=True, EXPORT_MTL=True, EXPORT_COPY_IMAGES=False,\
EXPORT_APPLY_MODIFIERS=True, EXPORT_ROTX90=True, EXPORT_BLEN_OBS=True,\
EXPORT_GROUP_BY_OB=False, EXPORT_GROUP_BY_MAT=False, EXPORT_KEEP_VERT_ORDER=False,\
EXPORT_POLYGROUPS=False, EXPORT_CURVE_AS_NURBS=True):
'''
Basic write function. The context and options must be alredy set
This can be accessed externaly
eg.
write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options.
'''
def veckey3d(v):
return round(v.x, 6), round(v.y, 6), round(v.z, 6)
def veckey2d(v):
return round(v.x, 6), round(v.y, 6)
def findVertexGroupName(face, vWeightMap):
"""
Searches the vertexDict to see what groups is assigned to a given face.
We use a frequency system in order to sort out the name because a given vetex can
belong to two or more groups at the same time. To find the right name for the face
we list all the possible vertex group names with their frequency and then sort by
frequency in descend order. The top element is the one shared by the highest number
of vertices is the face's group
"""
weightDict = {}
for vert in face:
vWeights = vWeightMap[vert.index]
for vGroupName, weight in vWeights:
weightDict[vGroupName] = weightDict.get(vGroupName, 0) + weight
if weightDict:
alist = [(weight,vGroupName) for vGroupName, weight in weightDict.iteritems()] # sort least to greatest amount of weight
alist.sort()
return(alist[-1][1]) # highest value last
else:
return '(null)'
print 'OBJ Export path: "%s"' % filename
temp_mesh_name = '~tmp-mesh'
time1 = sys.time()
scn = Scene.GetCurrent()
file = open(filename, "w")
# Write Header
file.write('# Blender3D v%s OBJ File: %s\n' % (Blender.Get('version'), Blender.Get('filename').split('/')[-1].split('\\')[-1] ))
file.write('# www.blender3d.org\n')
# Tell the obj file what material file to use.
if EXPORT_MTL:
mtlfilename = '%s.mtl' % '.'.join(filename.split('.')[:-1])
file.write('mtllib %s\n' % ( mtlfilename.split('\\')[-1].split('/')[-1] ))
# Get the container mesh. - used for applying modifiers and non mesh objects.
containerMesh = meshName = tempMesh = None
for meshName in Blender.NMesh.GetNames():
if meshName.startswith(temp_mesh_name):
tempMesh = Mesh.Get(meshName)
if not tempMesh.users:
containerMesh = tempMesh
if not containerMesh:
containerMesh = Mesh.New(temp_mesh_name)
if EXPORT_ROTX90:
mat_xrot90= Blender.Mathutils.RotationMatrix(-90, 4, 'x')
del meshName
del tempMesh
# Initialize totals, these are updated each object
totverts = totuvco = totno = 1
face_vert_index = 1
globalNormals = {}
# Get all meshes
for ob_main in objects:
for ob, ob_mat in BPyObject.getDerivedObjects(ob_main):
# Nurbs curve support
if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob):
if EXPORT_ROTX90:
ob_mat = ob_mat * mat_xrot90
totverts += write_nurb(file, ob, ob_mat)
continue
# end nurbs
# Will work for non meshes now! :)
# getMeshFromObject(ob, container_mesh=None, apply_modifiers=True, vgroups=True, scn=None)
me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, EXPORT_POLYGROUPS, scn)
if not me:
continue
if EXPORT_UV:
faceuv= me.faceUV
else:
faceuv = False
# We have a valid mesh
if EXPORT_TRI and me.faces:
# Add a dummy object to it.
has_quads = False
for f in me.faces:
if len(f) == 4:
has_quads = True
break
if has_quads:
oldmode = Mesh.Mode()
Mesh.Mode(Mesh.SelectModes['FACE'])
me.sel = True
tempob = scn.objects.new(me)
me.quadToTriangle(0) # more=0 shortest length
oldmode = Mesh.Mode(oldmode)
scn.objects.unlink(tempob)
Mesh.Mode(oldmode)
# Make our own list so it can be sorted to reduce context switching
faces = [ f for f in me.faces ]
if EXPORT_EDGES:
edges = me.edges
else:
edges = []
if not (len(faces)+len(edges)+len(me.verts)): # Make sure there is somthing to write
continue # dont bother with this mesh.
if EXPORT_ROTX90:
me.transform(ob_mat*mat_xrot90)
else:
me.transform(ob_mat)
# High Quality Normals
if EXPORT_NORMALS and faces:
if EXPORT_NORMALS_HQ:
BPyMesh.meshCalcNormals(me)
else:
# transforming normals is incorrect
# when the matrix is scaled,
# better to recalculate them
me.calcNormals()
# # Crash Blender
#materials = me.getMaterials(1) # 1 == will return None in the list.
materials = me.materials
materialNames = []
materialItems = materials[:]
if materials:
for mat in materials:
if mat: # !=None
materialNames.append(mat.name)
else:
materialNames.append(None)
# Cant use LC because some materials are None.
# materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken.
# Possible there null materials, will mess up indicies
# but at least it will export, wait until Blender gets fixed.
materialNames.extend((16-len(materialNames)) * [None])
materialItems.extend((16-len(materialItems)) * [None])
# Sort by Material, then images
# so we dont over context switch in the obj file.
if EXPORT_KEEP_VERT_ORDER:
pass
elif faceuv:
try: faces.sort(key = lambda a: (a.mat, a.image, a.smooth))
except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth)))
elif len(materials) > 1:
try: faces.sort(key = lambda a: (a.mat, a.smooth))
except: faces.sort(lambda a,b: cmp((a.mat, a.smooth), (b.mat, b.smooth)))
else:
# no materials
try: faces.sort(key = lambda a: a.smooth)
except: faces.sort(lambda a,b: cmp(a.smooth, b.smooth))
# Set the default mat to no material and no image.
contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get.
contextSmooth = None # Will either be true or false, set bad to force initialization switch.
if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB:
name1 = ob.name
name2 = ob.getData(1)
if name1 == name2:
obnamestring = fixName(name1)
else:
obnamestring = '%s_%s' % (fixName(name1), fixName(name2))
if EXPORT_BLEN_OBS:
file.write('o %s\n' % obnamestring) # Write Object name
else: # if EXPORT_GROUP_BY_OB:
file.write('g %s\n' % obnamestring)
# Vert
for v in me.verts:
file.write('v %.6f %.6f %.6f\n' % tuple(v.co))
# UV
if faceuv:
uv_face_mapping = [[0,0,0,0] for f in faces] # a bit of a waste for tri's :/
uv_dict = {} # could use a set() here
for f_index, f in enumerate(faces):
for uv_index, uv in enumerate(f.uv):
uvkey = veckey2d(uv)
try:
uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]
except:
uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict)
file.write('vt %.6f %.6f\n' % tuple(uv))
uv_unique_count = len(uv_dict)
del uv, uvkey, uv_dict, f_index, uv_index
# Only need uv_unique_count and uv_face_mapping
# NORMAL, Smooth/Non smoothed.
if EXPORT_NORMALS:
for f in faces:
if f.smooth:
for v in f:
noKey = veckey3d(v.no)
if not globalNormals.has_key( noKey ):
globalNormals[noKey] = totno
totno +=1
file.write('vn %.6f %.6f %.6f\n' % noKey)
else:
# Hard, 1 normal from the face.
noKey = veckey3d(f.no)
if not globalNormals.has_key( noKey ):
globalNormals[noKey] = totno
totno +=1
file.write('vn %.6f %.6f %.6f\n' % noKey)
if not faceuv:
f_image = None
if EXPORT_POLYGROUPS:
# Retrieve the list of vertex groups
vertGroupNames = me.getVertGroupNames()
currentVGroup = ''
# Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to
vgroupsMap = [[] for _i in xrange(len(me.verts))]
for vertexGroupName in vertGroupNames:
for vIdx, vWeight in me.getVertsFromGroup(vertexGroupName, 1):
vgroupsMap[vIdx].append((vertexGroupName, vWeight))
for f_index, f in enumerate(faces):
f_v= f.v
f_smooth= f.smooth
f_mat = min(f.mat, len(materialNames)-1)
if faceuv:
f_image = f.image
f_uv= f.uv
# MAKE KEY
if faceuv and f_image: # Object is always true.
key = materialNames[f_mat], f_image.name
else:
key = materialNames[f_mat], None # No image, use None instead.
# Write the vertex group
if EXPORT_POLYGROUPS:
if vertGroupNames:
# find what vertext group the face belongs to
theVGroup = findVertexGroupName(f,vgroupsMap)
if theVGroup != currentVGroup:
currentVGroup = theVGroup
file.write('g %s\n' % theVGroup)
# CHECK FOR CONTEXT SWITCH
if key == contextMat:
pass # Context alredy switched, dont do anything
else:
if key[0] == None and key[1] == None:
# Write a null material, since we know the context has changed.
if EXPORT_GROUP_BY_MAT:
file.write('g %s_%s\n' % (fixName(ob.name), fixName(ob.getData(1))) ) # can be mat_image or (null)
file.write('usemtl (null)\n') # mat, image
else:
mat_data= MTL_DICT.get(key)
if not mat_data:
# First add to global dict so we can export to mtl
# Then write mtl
# Make a new names from the mat and image name,
# converting any spaces to underscores with fixName.
# If none image dont bother adding it to the name
if key[1] == None:
mat_data = MTL_DICT[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image
else:
mat_data = MTL_DICT[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image
if EXPORT_GROUP_BY_MAT:
file.write('g %s_%s_%s\n' % (fixName(ob.name), fixName(ob.getData(1)), mat_data[0]) ) # can be mat_image or (null)
file.write('usemtl %s\n' % mat_data[0]) # can be mat_image or (null)
contextMat = key
if f_smooth != contextSmooth:
if f_smooth: # on now off
file.write('s 1\n')
contextSmooth = f_smooth
else: # was off now on
file.write('s off\n')
contextSmooth = f_smooth
file.write('f')
if faceuv:
if EXPORT_NORMALS:
if f_smooth: # Smoothed, use vertex normals
for vi, v in enumerate(f_v):
file.write( ' %d/%d/%d' % (\
v.index+totverts,\
totuvco + uv_face_mapping[f_index][vi],\
globalNormals[ veckey3d(v.no) ])) # vert, uv, normal
else: # No smoothing, face normals
no = globalNormals[ veckey3d(f.no) ]
for vi, v in enumerate(f_v):
file.write( ' %d/%d/%d' % (\
v.index+totverts,\
totuvco + uv_face_mapping[f_index][vi],\
no)) # vert, uv, normal
else: # No Normals
for vi, v in enumerate(f_v):
file.write( ' %d/%d' % (\
v.index+totverts,\
totuvco + uv_face_mapping[f_index][vi])) # vert, uv
face_vert_index += len(f_v)
else: # No UV's
if EXPORT_NORMALS:
if f_smooth: # Smoothed, use vertex normals
for v in f_v:
file.write( ' %d//%d' % (\
v.index+totverts,\
globalNormals[ veckey3d(v.no) ]))
else: # No smoothing, face normals
no = globalNormals[ veckey3d(f.no) ]
for v in f_v:
file.write( ' %d//%d' % (\
v.index+totverts,\
no))
else: # No Normals
for v in f_v:
file.write( ' %d' % (\
v.index+totverts))
file.write('\n')
# Write edges.
if EXPORT_EDGES:
LOOSE= Mesh.EdgeFlags.LOOSE
for ed in edges:
if ed.flag & LOOSE:
file.write('f %d %d\n' % (ed.v1.index+totverts, ed.v2.index+totverts))
# Make the indicies global rather then per mesh
totverts += len(me.verts)
if faceuv:
totuvco += uv_unique_count
me.verts= None
file.close()
# Now we have all our materials, save them
if EXPORT_MTL:
write_mtl(mtlfilename)
if EXPORT_COPY_IMAGES:
dest_dir = filename
# Remove chars until we are just the path.
while dest_dir and dest_dir[-1] not in '\\/':
dest_dir = dest_dir[:-1]
if dest_dir:
copy_images(dest_dir)
else:
print '\tError: "%s" could not be used as a base for an image path.' % filename
print "Export time: %.2f" % (sys.time() - time1)
convert = ['python', Blender.Get('scriptsdir')+'/convert_obj_threejs_slim.py', '-i', filename, '-o', filename.replace('.obj','.js')]
try:
p = subprocess.Popen(convert, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while p.poll() == None:
pass
except subprocess.CalledProcessError:
print 'Error doing conversion!'
print 'done'
os.remove(filename)
os.remove(filename.replace('.obj','.mtl'))
def write_ui(filename):
if not filename.lower().endswith('.obj'):
filename += '.obj'
if not BPyMessages.Warning_SaveOver(filename):
return
global EXPORT_APPLY_MODIFIERS, EXPORT_ROTX90, EXPORT_TRI, EXPORT_EDGES,\
EXPORT_NORMALS, EXPORT_NORMALS_HQ, EXPORT_UV,\
EXPORT_MTL, EXPORT_SEL_ONLY, EXPORT_ALL_SCENES,\
EXPORT_ANIMATION, EXPORT_COPY_IMAGES, EXPORT_BLEN_OBS,\
EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER,\
EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS
EXPORT_APPLY_MODIFIERS = Draw.Create(0)
EXPORT_ROTX90 = Draw.Create(1)
EXPORT_TRI = Draw.Create(0)
EXPORT_EDGES = Draw.Create(1)
EXPORT_NORMALS = Draw.Create(0)
EXPORT_NORMALS_HQ = Draw.Create(0)
EXPORT_UV = Draw.Create(1)
EXPORT_MTL = Draw.Create(1)
EXPORT_SEL_ONLY = Draw.Create(1)
EXPORT_ALL_SCENES = Draw.Create(0)
EXPORT_ANIMATION = Draw.Create(0)
EXPORT_COPY_IMAGES = Draw.Create(0)
EXPORT_BLEN_OBS = Draw.Create(0)
EXPORT_GROUP_BY_OB = Draw.Create(0)
EXPORT_GROUP_BY_MAT = Draw.Create(0)
EXPORT_KEEP_VERT_ORDER = Draw.Create(1)
EXPORT_POLYGROUPS = Draw.Create(0)
EXPORT_CURVE_AS_NURBS = Draw.Create(1)
# Old UI
'''
# removed too many options are bad!
# Get USER Options
pup_block = [\
('Context...'),\
('Selection Only', EXPORT_SEL_ONLY, 'Only export objects in visible selection. Else export whole scene.'),\
('All Scenes', EXPORT_ALL_SCENES, 'Each scene as a separate OBJ file.'),\
('Animation', EXPORT_ANIMATION, 'Each frame as a numbered OBJ file.'),\
('Object Prefs...'),\
('Apply Modifiers', EXPORT_APPLY_MODIFIERS, 'Use transformed mesh data from each object. May break vert order for morph targets.'),\
('Rotate X90', EXPORT_ROTX90 , 'Rotate on export so Blenders UP is translated into OBJs UP'),\
('Keep Vert Order', EXPORT_KEEP_VERT_ORDER, 'Keep vert and face order, disables some other options.'),\
('Extra Data...'),\
('Edges', EXPORT_EDGES, 'Edges not connected to faces.'),\
('Normals', EXPORT_NORMALS, 'Export vertex normal data (Ignored on import).'),\
('High Quality Normals', EXPORT_NORMALS_HQ, 'Calculate high quality normals for rendering.'),\
('UVs', EXPORT_UV, 'Export texface UV coords.'),\
('Materials', EXPORT_MTL, 'Write a separate MTL file with the OBJ.'),\
('Copy Images', EXPORT_COPY_IMAGES, 'Copy image files to the export directory, never overwrite.'),\
('Triangulate', EXPORT_TRI, 'Triangulate quads.'),\
('Grouping...'),\
('Objects', EXPORT_BLEN_OBS, 'Export blender objects as "OBJ objects".'),\
('Object Groups', EXPORT_GROUP_BY_OB, 'Export blender objects as "OBJ Groups".'),\
('Material Groups', EXPORT_GROUP_BY_MAT, 'Group by materials.'),\
]
if not Draw.PupBlock('Export...', pup_block):
return
'''
# BEGIN ALTERNATIVE UI *******************
if True:
EVENT_NONE = 0
EVENT_EXIT = 1
EVENT_REDRAW = 2
EVENT_EXPORT = 3
GLOBALS = {}
GLOBALS['EVENT'] = EVENT_REDRAW
#GLOBALS['MOUSE'] = Window.GetMouseCoords()
GLOBALS['MOUSE'] = [i/2 for i in Window.GetScreenSize()]
def obj_ui_set_event(e,v):
GLOBALS['EVENT'] = e
def do_split(e,v):
global EXPORT_BLEN_OBS, EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_APPLY_MODIFIERS, KEEP_VERT_ORDER, EXPORT_POLYGROUPS
if EXPORT_BLEN_OBS.val or EXPORT_GROUP_BY_OB.val or EXPORT_GROUP_BY_MAT.val or EXPORT_APPLY_MODIFIERS.val:
EXPORT_KEEP_VERT_ORDER.val = 0
else:
EXPORT_KEEP_VERT_ORDER.val = 1
def do_vertorder(e,v):
global EXPORT_BLEN_OBS, EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_APPLY_MODIFIERS, KEEP_VERT_ORDER
if EXPORT_KEEP_VERT_ORDER.val:
EXPORT_BLEN_OBS.val = EXPORT_GROUP_BY_OB.val = EXPORT_GROUP_BY_MAT.val = EXPORT_APPLY_MODIFIERS.val = 0
else:
if not (EXPORT_BLEN_OBS.val or EXPORT_GROUP_BY_OB.val or EXPORT_GROUP_BY_MAT.val or EXPORT_APPLY_MODIFIERS.val):
EXPORT_KEEP_VERT_ORDER.val = 1
def do_help(e,v):
url = __url__[0]
print 'Trying to open web browser with documentation at this address...'
print '\t' + url
try:
import webbrowser
webbrowser.open(url)
except:
print '...could not open a browser window.'
def obj_ui():
ui_x, ui_y = GLOBALS['MOUSE']
# Center based on overall pup size
ui_x -= 165
ui_y -= 140
global EXPORT_APPLY_MODIFIERS, EXPORT_ROTX90, EXPORT_TRI, EXPORT_EDGES,\
EXPORT_NORMALS, EXPORT_NORMALS_HQ, EXPORT_UV,\
EXPORT_MTL, EXPORT_SEL_ONLY, EXPORT_ALL_SCENES,\
EXPORT_ANIMATION, EXPORT_COPY_IMAGES, EXPORT_BLEN_OBS,\
EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER,\
EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS
Draw.Label('Context...', ui_x+9, ui_y+239, 220, 20)
Draw.BeginAlign()
EXPORT_SEL_ONLY = Draw.Toggle('Selection Only', EVENT_NONE, ui_x+9, ui_y+219, 110, 20, EXPORT_SEL_ONLY.val, 'Only export objects in visible selection. Else export whole scene.')
EXPORT_ALL_SCENES = Draw.Toggle('All Scenes', EVENT_NONE, ui_x+119, ui_y+219, 110, 20, EXPORT_ALL_SCENES.val, 'Each scene as a separate OBJ file.')
EXPORT_ANIMATION = Draw.Toggle('Animation', EVENT_NONE, ui_x+229, ui_y+219, 110, 20, EXPORT_ANIMATION.val, 'Each frame as a numbered OBJ file.')
Draw.EndAlign()
Draw.Label('Output Options...', ui_x+9, ui_y+189, 220, 20)
Draw.BeginAlign()
EXPORT_APPLY_MODIFIERS = Draw.Toggle('Apply Modifiers', EVENT_REDRAW, ui_x+9, ui_y+170, 110, 20, EXPORT_APPLY_MODIFIERS.val, 'Use transformed mesh data from each object. May break vert order for morph targets.', do_split)
EXPORT_ROTX90 = Draw.Toggle('Rotate X90', EVENT_NONE, ui_x+119, ui_y+170, 110, 20, EXPORT_ROTX90.val, 'Rotate on export so Blenders UP is translated into OBJs UP')
EXPORT_COPY_IMAGES = Draw.Toggle('Copy Images', EVENT_NONE, ui_x+229, ui_y+170, 110, 20, EXPORT_COPY_IMAGES.val, 'Copy image files to the export directory, never overwrite.')
Draw.EndAlign()
Draw.Label('Export...', ui_x+9, ui_y+139, 220, 20)
Draw.BeginAlign()
EXPORT_EDGES = Draw.Toggle('Edges', EVENT_NONE, ui_x+9, ui_y+120, 50, 20, EXPORT_EDGES.val, 'Edges not connected to faces.')
EXPORT_TRI = Draw.Toggle('Triangulate', EVENT_NONE, ui_x+59, ui_y+120, 70, 20, EXPORT_TRI.val, 'Triangulate quads.')
Draw.EndAlign()
Draw.BeginAlign()
EXPORT_MTL = Draw.Toggle('Materials', EVENT_NONE, ui_x+139, ui_y+120, 70, 20, EXPORT_MTL.val, 'Write a separate MTL file with the OBJ.')
EXPORT_UV = Draw.Toggle('UVs', EVENT_NONE, ui_x+209, ui_y+120, 31, 20, EXPORT_UV.val, 'Export texface UV coords.')
Draw.EndAlign()
Draw.BeginAlign()
EXPORT_NORMALS = Draw.Toggle('Normals', EVENT_NONE, ui_x+250, ui_y+120, 59, 20, EXPORT_NORMALS.val, 'Export vertex normal data (Ignored on import).')
EXPORT_NORMALS_HQ = Draw.Toggle('HQ', EVENT_NONE, ui_x+309, ui_y+120, 31, 20, EXPORT_NORMALS_HQ.val, 'Calculate high quality normals for rendering.')
Draw.EndAlign()
EXPORT_POLYGROUPS = Draw.Toggle('Polygroups', EVENT_REDRAW, ui_x+9, ui_y+95, 120, 20, EXPORT_POLYGROUPS.val, 'Export vertex groups as OBJ groups (one group per face approximation).')
EXPORT_CURVE_AS_NURBS = Draw.Toggle('Nurbs', EVENT_NONE, ui_x+139, ui_y+95, 100, 20, EXPORT_CURVE_AS_NURBS.val, 'Export 3D nurbs curves and polylines as OBJ curves, (bezier not supported).')
Draw.Label('Blender Objects as OBJ:', ui_x+9, ui_y+59, 220, 20)
Draw.BeginAlign()
EXPORT_BLEN_OBS = Draw.Toggle('Objects', EVENT_REDRAW, ui_x+9, ui_y+39, 60, 20, EXPORT_BLEN_OBS.val, 'Export blender objects as "OBJ objects".', do_split)
EXPORT_GROUP_BY_OB = Draw.Toggle('Groups', EVENT_REDRAW, ui_x+69, ui_y+39, 60, 20, EXPORT_GROUP_BY_OB.val, 'Export blender objects as "OBJ Groups".', do_split)
EXPORT_GROUP_BY_MAT = Draw.Toggle('Material Groups', EVENT_REDRAW, ui_x+129, ui_y+39, 100, 20, EXPORT_GROUP_BY_MAT.val, 'Group by materials.', do_split)
Draw.EndAlign()
EXPORT_KEEP_VERT_ORDER = Draw.Toggle('Keep Vert Order', EVENT_REDRAW, ui_x+239, ui_y+39, 100, 20, EXPORT_KEEP_VERT_ORDER.val, 'Keep vert and face order, disables some other options. Use for morph targets.', do_vertorder)
Draw.BeginAlign()
Draw.PushButton('Online Help', EVENT_REDRAW, ui_x+9, ui_y+9, 110, 20, 'Load the wiki page for this script', do_help)
Draw.PushButton('Cancel', EVENT_EXIT, ui_x+119, ui_y+9, 110, 20, '', obj_ui_set_event)
Draw.PushButton('Export', EVENT_EXPORT, ui_x+229, ui_y+9, 110, 20, 'Export with these settings', obj_ui_set_event)
Draw.EndAlign()
# hack so the toggle buttons redraw. this is not nice at all
while GLOBALS['EVENT'] not in (EVENT_EXIT, EVENT_EXPORT):
Draw.UIBlock(obj_ui, 0)
if GLOBALS['EVENT'] != EVENT_EXPORT:
return
# END ALTERNATIVE UI *********************
if EXPORT_KEEP_VERT_ORDER.val:
EXPORT_BLEN_OBS.val = False
EXPORT_GROUP_BY_OB.val = False
EXPORT_GROUP_BY_MAT.val = False
EXPORT_APPLY_MODIFIERS.val = False
Window.EditMode(0)
Window.WaitCursor(1)
EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS.val
EXPORT_ROTX90 = EXPORT_ROTX90.val
EXPORT_TRI = EXPORT_TRI.val
EXPORT_EDGES = EXPORT_EDGES.val
EXPORT_NORMALS = EXPORT_NORMALS.val
EXPORT_NORMALS_HQ = EXPORT_NORMALS_HQ.val
EXPORT_UV = EXPORT_UV.val
EXPORT_MTL = EXPORT_MTL.val
EXPORT_SEL_ONLY = EXPORT_SEL_ONLY.val
EXPORT_ALL_SCENES = EXPORT_ALL_SCENES.val
EXPORT_ANIMATION = EXPORT_ANIMATION.val
EXPORT_COPY_IMAGES = EXPORT_COPY_IMAGES.val
EXPORT_BLEN_OBS = EXPORT_BLEN_OBS.val
EXPORT_GROUP_BY_OB = EXPORT_GROUP_BY_OB.val
EXPORT_GROUP_BY_MAT = EXPORT_GROUP_BY_MAT.val
EXPORT_KEEP_VERT_ORDER = EXPORT_KEEP_VERT_ORDER.val
EXPORT_POLYGROUPS = EXPORT_POLYGROUPS.val
EXPORT_CURVE_AS_NURBS = EXPORT_CURVE_AS_NURBS.val
base_name, ext = splitExt(filename)
context_name = [base_name, '', '', ext] # basename, scene_name, framenumber, extension
# Use the options to export the data using write()
# def write(filename, objects, EXPORT_EDGES=False, EXPORT_NORMALS=False, EXPORT_MTL=True, EXPORT_COPY_IMAGES=False, EXPORT_APPLY_MODIFIERS=True):
orig_scene = Scene.GetCurrent()
if EXPORT_ALL_SCENES:
export_scenes = Scene.Get()
else:
export_scenes = [orig_scene]
# Export all scenes.
for scn in export_scenes:
scn.makeCurrent() # If alredy current, this is not slow.
context = scn.getRenderingContext()
orig_frame = Blender.Get('curframe')
if EXPORT_ALL_SCENES: # Add scene name into the context_name
context_name[1] = '_%s' % BPySys.cleanName(scn.name) # WARNING, its possible that this could cause a collision. we could fix if were feeling parranoied.
# Export an animation?
if EXPORT_ANIMATION:
scene_frames = xrange(context.startFrame(), context.endFrame()+1) # up to and including the end frame.
else:
scene_frames = [orig_frame] # Dont export an animation.
# Loop through all frames in the scene and export.
for frame in scene_frames:
if EXPORT_ANIMATION: # Add frame to the filename.
context_name[2] = '_%.6d' % frame
Blender.Set('curframe', frame)
if EXPORT_SEL_ONLY:
export_objects = scn.objects.context
else:
export_objects = scn.objects
full_path= ''.join(context_name)
# erm... bit of a problem here, this can overwrite files when exporting frames. not too bad.
# EXPORT THE FILE.
write(full_path, export_objects,\
EXPORT_TRI, EXPORT_EDGES, EXPORT_NORMALS,\
EXPORT_NORMALS_HQ, EXPORT_UV, EXPORT_MTL,\
EXPORT_COPY_IMAGES, EXPORT_APPLY_MODIFIERS,\
EXPORT_ROTX90, EXPORT_BLEN_OBS,\
EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER,\
EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS)
Blender.Set('curframe', orig_frame)
# Restore old active scene.
orig_scene.makeCurrent()
Window.WaitCursor(0)
if __name__ == '__main__':
Window.FileSelector(write_ui, 'Export thee.js (slim)', sys.makename(ext='.obj'))
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import conversion_action
from google.ads.googleads.v8.services.types import conversion_action_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import (
ConversionActionServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import ConversionActionServiceGrpcTransport
class ConversionActionServiceClientMeta(type):
"""Metaclass for the ConversionActionService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ConversionActionServiceTransport]]
_transport_registry["grpc"] = ConversionActionServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ConversionActionServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ConversionActionServiceClient(
metaclass=ConversionActionServiceClientMeta
):
"""Service to manage conversion actions."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversionActionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversionActionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ConversionActionServiceTransport:
"""Return the transport used by the client instance.
Returns:
ConversionActionServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def conversion_action_path(
customer_id: str, conversion_action_id: str,
) -> str:
"""Return a fully-qualified conversion_action string."""
return "customers/{customer_id}/conversionActions/{conversion_action_id}".format(
customer_id=customer_id, conversion_action_id=conversion_action_id,
)
@staticmethod
def parse_conversion_action_path(path: str) -> Dict[str, str]:
"""Parse a conversion_action path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/conversionActions/(?P<conversion_action_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def customer_path(customer_id: str,) -> str:
"""Return a fully-qualified customer string."""
return "customers/{customer_id}".format(customer_id=customer_id,)
@staticmethod
def parse_customer_path(path: str) -> Dict[str, str]:
"""Parse a customer path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ConversionActionServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the conversion action service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ConversionActionServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ConversionActionServiceTransport):
# transport is a ConversionActionServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = ConversionActionServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_conversion_action(
self,
request: conversion_action_service.GetConversionActionRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> conversion_action.ConversionAction:
r"""Returns the requested conversion action.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetConversionActionRequest`):
The request object. Request message for
[ConversionActionService.GetConversionAction][google.ads.googleads.v8.services.ConversionActionService.GetConversionAction].
resource_name (:class:`str`):
Required. The resource name of the
conversion action to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.ConversionAction:
A conversion action.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a conversion_action_service.GetConversionActionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, conversion_action_service.GetConversionActionRequest
):
request = conversion_action_service.GetConversionActionRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_conversion_action
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def mutate_conversion_actions(
self,
request: conversion_action_service.MutateConversionActionsRequest = None,
*,
customer_id: str = None,
operations: Sequence[
conversion_action_service.ConversionActionOperation
] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> conversion_action_service.MutateConversionActionsResponse:
r"""Creates, updates or removes conversion actions. Operation
statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `ConversionActionError <>`__
`CurrencyCodeError <>`__ `DatabaseError <>`__ `FieldError <>`__
`FieldMaskError <>`__ `HeaderError <>`__ `InternalError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`ResourceCountLimitExceededError <>`__ `StringLengthError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.MutateConversionActionsRequest`):
The request object. Request message for
[ConversionActionService.MutateConversionActions][google.ads.googleads.v8.services.ConversionActionService.MutateConversionActions].
customer_id (:class:`str`):
Required. The ID of the customer
whose conversion actions are being
modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v8.services.types.ConversionActionOperation]`):
Required. The list of operations to
perform on individual conversion
actions.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.services.types.MutateConversionActionsResponse:
Response message for
[ConversionActionService.MutateConversionActions][google.ads.googleads.v8.services.ConversionActionService.MutateConversionActions].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a conversion_action_service.MutateConversionActionsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, conversion_action_service.MutateConversionActionsRequest
):
request = conversion_action_service.MutateConversionActionsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_conversion_actions
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("ConversionActionServiceClient",)
|
|
""" Created by Max 10/4/2017 """
from __future__ import division
import random
import math
from typing import Dict, Tuple, List
class CrossValidation:
def __init__(self, folds, learner):
"""
Constructor
:param folds: num folds
:param learner: the k-NN algorithm to use.
"""
self.folds = folds
self.learner = learner
def cross_validation_regression(self, dataset):
"""
Runs cross validation, using the k-NN regression
Creates the folds for CV.
For each fold, creates the test and training sets.
Calculate the MSE for the data sets.
Store the results.
average the MSE over the number of folds and calc SD
return values.
:param dataset: the training data set to use.
:return: average MSE, Standard deviation, the predictions, and the actuals for all cv runs
"""
random.shuffle(dataset)
fold_length = int(math.floor(len(dataset)/self.folds))
cross_validation_dataset = []
for i in range(0, len(dataset), fold_length):
cross_validation_dataset.append(dataset[i:i+fold_length])
# run cross validation
mse_list = []
predictions = []
actuals = []
for i in range(self.folds):
# construct training set.
test_set = cross_validation_dataset[i]
training_set = cross_validation_dataset[:i] + cross_validation_dataset[i+1:]
training_set = [item for sublist in training_set for item in sublist]
# Get the MSE
mse = self.calculate_mse(self.learner, training_set, test_set)
# Store results
mse_list.append(mse[0])
predictions.append(mse[1])
actuals.append(mse[2])
average_mse = sum(mse_list) / len(mse_list)
sd = self.calc_standard_deviation(average_mse, mse_list)
return average_mse, sd, predictions, actuals
def cross_validation_classification(self, dataset, pruning=False):
"""
Runs cross validation, using the DT classification
Pulls out a stratified sample for the validation set. 10%
Creates the folds for CV. USES Stratified data for each fold
For each fold, creates the test and training sets.
Calculate the error rate for the sets.
Store the results.
average the error rate over the number of folds and calc SD
return values.
:param dataset: the training data set to use.
:param pruning: boolean, if we are using pruning or not.
:return: average Error rate , Standard deviation, the predictions, and the actuals for all cv runs
"""
random.shuffle(dataset)
fold_length = int(math.floor(len(dataset)/self.folds))
cross_validation_dataset = self.get_stratified_data(dataset, fold_length, self.folds)
# run cross validation
error_list = []
predictions = []
actuals = []
models = []
for i in range(self.folds):
# construct training set.
test_set = cross_validation_dataset[i]
training_set = cross_validation_dataset[:i] + cross_validation_dataset[i+1:]
training_set = [item for sublist in training_set for item in sublist]
# calculate the error rate for the test set with the training set
model = self.learner.learn(training_set)
models.append(model)
error_rate = self.calculate_error_rate(self.learner, model, test_set)
# Store results
error_list.append(error_rate[0])
predictions.append(error_rate[1])
actuals.append(error_rate[2])
average_error_rate = sum(error_list) / len(error_list)
sd = self.calc_standard_deviation(average_error_rate, error_list)
return average_error_rate, sd, models, predictions, actuals
def calc_standard_deviation(self, average, list_of_values):
"""
Calculates the SD of the Cross validation.
:param average: average error for CV float
:param list_of_values: list of errors for CV
:return: sd of CV
"""
sd = 0
for x in list_of_values:
sd += (x - average) ** 2
sd /= len(list_of_values)
sd = math.sqrt(sd)
return sd
def calculate_mse(self, learner, training_data, test_data):
"""
Helper function for calculating MSE, and tracking actual and predicted values.
Calculate the squared error for each pair of points, and sum over all the squared errors.
Then divide by the number of squared errors.
:param learner: the k-NN regression class
:param training_data: the data set for the model
:param test_data: the query points to get predictions for.
:return: (MSE, list of predictions, list of corresponding actuals)
"""
squared_error = []
predictions = learner.test(training_data, test_data)
actual = []
for prediction, test_item in zip(predictions, test_data):
actual.append(test_item[-1])
squared_error.append(self.get_squared_error(prediction, test_item))
squared_error_sum = sum(squared_error)
mse = squared_error_sum / len(squared_error)
return mse, predictions, actual
def get_squared_error(self, predicted_value, item):
"""
Calculates the squared error of predicted points, and actual items
:param predicted_value: list of floats
:param item: list of query data points.
:return: Squared error
"""
actual_value = item[-1]
squared_error = (predicted_value - actual_value)**2
return squared_error
def calculate_error_rate(self, learner, model, test_data):
"""
Calculates the error rate for classification.
tracks the actual and predictions
:param learner: a classifier
:param model: model of the learner
:param test_data: query points for
:return: error_rate, list of predictions, the actual values.
"""
predictions = learner.classify(model, test_data)
actuals = []
num_errors = 0
for prediction, test_item in zip(predictions, test_data):
if type(prediction) is tuple:
actual_prediction = prediction[0]
else:
actual_prediction = prediction[0][0]
actuals.append(test_item[-1])
if actual_prediction != test_item[-1]:
num_errors += 1
error_rate = num_errors / len(predictions)
return (error_rate, predictions, actuals)
def get_stratified_data(self, dataset, fold_length, num_folds):
"""
Creates the Cross Validation fold with Stratified data, i.e. data that matches the distribution of the overall
data set.
segment the data
calculate distribution of classes
create x folds according to that distribution, without replacement.
:param dataset: list of list of data points with labels
:param fold_length: number of points in each fold
:param num_folds: number of folds to build.
:return: a list of list of datapoints, where each inner list is a fold in the CV
"""
# for all data
unique_labels = {}
labeled_datapoints = {}
# build dict of listed segmented datapoints
for datapoint in dataset:
label = datapoint[-1]
if label in unique_labels:
unique_labels[label] += 1
labeled_datapoints[label].append(datapoint)
else:
unique_labels[label] = 1
labeled_datapoints[label] = [datapoint]
# Calculate the class distribution
distribution = {}
for key in unique_labels:
distribution[key] = unique_labels[key] / len(dataset)
fold_data_set = []
for x in range(num_folds):
single_fold = self.build_single_fold(distribution, labeled_datapoints, fold_length)
fold_data_set.append(single_fold)
return fold_data_set
def build_single_fold(self, distribution, labeled_datapoints, fold_length):
"""
Builds a single CV fold according to a distribution without replacement.
:param distribution: dict of dist of the classes
:param labeled_datapoints: dict of labeled data points
:param fold_length: number of data poitns in a fold
:return: list of data points in the fold.
"""
# build a single fold
fold = []
for key in distribution:
# get number of data points for this class
number_of_items_per_class = int(distribution[key] * fold_length)
if number_of_items_per_class == 0:
number_of_items_per_class = 1
# select the data points for this class in this fold.
single_key_datapoints = []
for x in range(number_of_items_per_class):
datapoint_possibilities = labeled_datapoints[key]
if len(datapoint_possibilities) == 0:
continue
selected_datapoint_index = random.randint(0, len(datapoint_possibilities) - 1)
single_key_datapoints.append(datapoint_possibilities[selected_datapoint_index])
# remove datapoint from being selected again.
del datapoint_possibilities[selected_datapoint_index]
fold = fold + single_key_datapoints
return fold
def get_validation_set(self, dataset: List[List], percentage_of_data_for_validation: int) -> Tuple[List[list], List[list]]:
"""
Creates a validation set of the data and deletes the data used for validation from the original data set
:param dataset: list of lsit
:param percentage_of_data_for_validation: int percentage to use
:return: tuple( validation set, modified data set
"""
fold_length = int(math.floor(len(dataset) / percentage_of_data_for_validation))
stratified_data = self.get_stratified_data(dataset, fold_length, 1)
stratified_data = stratified_data[0]
for stratified_data_point in stratified_data:
if stratified_data_point in dataset:
index = dataset.index(stratified_data_point)
del dataset[index]
return stratified_data, dataset
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from oslo_serialization import jsonutils
import six
from heat.api.aws import utils as aws_utils
from heat.common import exception
from heat.common.i18n import _
from heat.engine import function
from heat.engine.hot import functions as hot_funcs
class FindInMap(function.Function):
"""A function for resolving keys in the template mappings.
Takes the form::
{ "Fn::FindInMap" : [ "mapping",
"key",
"value" ] }
"""
def __init__(self, stack, fn_name, args):
super(FindInMap, self).__init__(stack, fn_name, args)
try:
self._mapname, self._mapkey, self._mapvalue = self.args
except ValueError as ex:
raise KeyError(six.text_type(ex))
def result(self):
mapping = self.stack.t.maps[function.resolve(self._mapname)]
key = function.resolve(self._mapkey)
value = function.resolve(self._mapvalue)
return mapping[key][value]
class GetAZs(function.Function):
"""A function for retrieving the availability zones.
Takes the form::
{ "Fn::GetAZs" : "<region>" }
"""
def result(self):
# TODO(therve): Implement region scoping
if self.stack is None:
return ['nova']
else:
return self.stack.get_availability_zones()
class ParamRef(function.Function):
"""A function for resolving parameter references.
Takes the form::
{ "Ref" : "<param_name>" }
"""
def __init__(self, stack, fn_name, args):
super(ParamRef, self).__init__(stack, fn_name, args)
self.parameters = self.stack.parameters
def result(self):
param_name = function.resolve(self.args)
try:
return self.parameters[param_name]
except KeyError:
raise exception.InvalidTemplateReference(resource=param_name,
key='unknown')
def Ref(stack, fn_name, args):
"""A function for resolving parameters or resource references.
Takes the form::
{ "Ref" : "<param_name>" }
or::
{ "Ref" : "<resource_name>" }
"""
if stack is None or args in stack:
RefClass = hot_funcs.GetResource
else:
RefClass = ParamRef
return RefClass(stack, fn_name, args)
class GetAtt(hot_funcs.GetAttThenSelect):
"""A function for resolving resource attributes.
Takes the form::
{ "Fn::GetAtt" : [ "<resource_name>",
"<attribute_name>" ] }
"""
def _parse_args(self):
try:
resource_name, attribute = self.args
except ValueError:
raise ValueError(_('Arguments to "%s" must be of the form '
'[resource_name, attribute]') % self.fn_name)
return resource_name, attribute, []
class Select(function.Function):
"""A function for selecting an item from a list or map.
Takes the form (for a list lookup)::
{ "Fn::Select" : [ "<index>", [ "<value_1>", "<value_2>", ... ] ] }
or (for a map lookup)::
{ "Fn::Select" : [ "<index>", { "<key_1>": "<value_1>", ... } ] }
If the selected index is not found, this function resolves to an empty
string.
"""
def __init__(self, stack, fn_name, args):
super(Select, self).__init__(stack, fn_name, args)
try:
self._lookup, self._strings = self.args
except ValueError:
raise ValueError(_('Arguments to "%s" must be of the form '
'[index, collection]') % self.fn_name)
def result(self):
index = function.resolve(self._lookup)
strings = function.resolve(self._strings)
if strings == '':
# an empty string is a common response from other
# functions when result is not currently available.
# Handle by returning an empty string
return ''
if isinstance(strings, six.string_types):
# might be serialized json.
try:
strings = jsonutils.loads(strings)
except ValueError as json_ex:
fmt_data = {'fn_name': self.fn_name,
'err': json_ex}
raise ValueError(_('"%(fn_name)s": %(err)s') % fmt_data)
if isinstance(strings, collections.Mapping):
if not isinstance(index, six.string_types):
raise TypeError(_('Index to "%s" must be a string') %
self.fn_name)
return strings.get(index, '')
try:
index = int(index)
except (ValueError, TypeError):
pass
if (isinstance(strings, collections.Sequence) and
not isinstance(strings, six.string_types)):
if not isinstance(index, six.integer_types):
raise TypeError(_('Index to "%s" must be an integer') %
self.fn_name)
try:
return strings[index]
except IndexError:
return ''
if strings is None:
return ''
raise TypeError(_('Arguments to %s not fully resolved') %
self.fn_name)
class Join(hot_funcs.Join):
"""A function for joining strings.
Takes the form::
{ "Fn::Join" : [ "<delim>", [ "<string_1>", "<string_2>", ... ] ] }
And resolves to::
"<string_1><delim><string_2><delim>..."
"""
class Split(function.Function):
"""A function for splitting strings.
Takes the form::
{ "Fn::Split" : [ "<delim>", "<string_1><delim><string_2>..." ] }
And resolves to::
[ "<string_1>", "<string_2>", ... ]
"""
def __init__(self, stack, fn_name, args):
super(Split, self).__init__(stack, fn_name, args)
example = '"%s" : [ ",", "str1,str2"]]' % self.fn_name
fmt_data = {'fn_name': self.fn_name,
'example': example}
if isinstance(self.args, (six.string_types, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
self._delim, self._strings = self.args
except ValueError:
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
def result(self):
strings = function.resolve(self._strings)
if not isinstance(self._delim, six.string_types):
raise TypeError(_("Delimiter for %s must be string") %
self.fn_name)
if not isinstance(strings, six.string_types):
raise TypeError(_("String to split must be string; got %s") %
type(strings))
return strings.split(self._delim)
class Replace(hot_funcs.Replace):
"""A function for performing string substitutions.
Takes the form::
{ "Fn::Replace" : [
{ "<key_1>": "<value_1>", "<key_2>": "<value_2>", ... },
"<key_1> <key_2>"
] }
And resolves to::
"<value_1> <value_2>"
When keys overlap in the template, longer matches are preferred. For keys
of equal length, lexicographically smaller keys are preferred.
"""
def _parse_args(self):
example = ('{"%s": '
'[ {"$var1": "foo", "%%var2%%": "bar"}, '
'"$var1 is %%var2%%"]}' % self.fn_name)
fmt_data = {'fn_name': self.fn_name,
'example': example}
if isinstance(self.args, (six.string_types, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
mapping, string = self.args
except ValueError:
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
else:
return mapping, string
class Base64(function.Function):
"""A placeholder function for converting to base64.
Takes the form::
{ "Fn::Base64" : "<string>" }
This function actually performs no conversion. It is included for the
benefit of templates that convert UserData to Base64. Heat accepts UserData
in plain text.
"""
def result(self):
resolved = function.resolve(self.args)
if not isinstance(resolved, six.string_types):
raise TypeError(_('"%s" argument must be a string') % self.fn_name)
return resolved
class MemberListToMap(function.Function):
"""A function to convert lists with enumerated keys and values to mapping.
Takes the form::
{ 'Fn::MemberListToMap' : [ 'Name',
'Value',
[ '.member.0.Name=<key_0>',
'.member.0.Value=<value_0>',
... ] ] }
And resolves to::
{ "<key_0>" : "<value_0>", ... }
The first two arguments are the names of the key and value.
"""
def __init__(self, stack, fn_name, args):
super(MemberListToMap, self).__init__(stack, fn_name, args)
try:
self._keyname, self._valuename, self._list = self.args
except ValueError:
correct = '''
{'Fn::MemberListToMap': ['Name', 'Value',
['.member.0.Name=key',
'.member.0.Value=door']]}
'''
raise TypeError(_('Wrong Arguments try: "%s"') % correct)
if not isinstance(self._keyname, six.string_types):
raise TypeError(_('%s Key Name must be a string') % self.fn_name)
if not isinstance(self._valuename, six.string_types):
raise TypeError(_('%s Value Name must be a string') % self.fn_name)
def result(self):
member_list = function.resolve(self._list)
if not isinstance(member_list, collections.Iterable):
raise TypeError(_('Member list must be a list'))
def item(s):
if not isinstance(s, six.string_types):
raise TypeError(_("Member list items must be strings"))
return s.split('=', 1)
partials = dict(item(s) for s in member_list)
return aws_utils.extract_param_pairs(partials,
prefix='',
keyname=self._keyname,
valuename=self._valuename)
class ResourceFacade(hot_funcs.ResourceFacade):
"""A function for retrieving data in a parent provider template.
A function for obtaining data from the facade resource from within the
corresponding provider template.
Takes the form::
{ "Fn::ResourceFacade": "<attribute_type>" }
where the valid attribute types are "Metadata", "DeletionPolicy" and
"UpdatePolicy".
"""
_RESOURCE_ATTRIBUTES = (
METADATA, DELETION_POLICY, UPDATE_POLICY,
) = (
'Metadata', 'DeletionPolicy', 'UpdatePolicy'
)
class If(hot_funcs.If):
"""A function to return corresponding value based on condition evaluation.
Takes the form::
{ "Fn::If" : [ "<condition_name>",
"<value_if_true>",
"<value_if_false>" ] }
The value_if_true to be returned if the specified condition evaluates
to true, the value_if_false to be returned if the specified condition
evaluates to false.
"""
class Equals(hot_funcs.Equals):
"""A function for comparing whether two values are equal.
Takes the form::
{ "Fn::Equals" : [ "<value_1>", "<value_2>" ] }
The value can be any type that you want to compare. Returns true
if the two values are equal or false if they aren't.
"""
class Not(hot_funcs.Not):
"""A function that acts as a NOT operator on a condition.
Takes the form::
{ "Fn::Not" : [ "<condition>" ] }
Returns true for a condition that evaluates to false or
returns false for a condition that evaluates to true.
"""
def _check_args(self):
msg = _('Arguments to "%s" must be of the form: '
'[condition]') % self.fn_name
if (not self.args or
not isinstance(self.args, collections.Sequence) or
isinstance(self.args, six.string_types)):
raise ValueError(msg)
if len(self.args) != 1:
raise ValueError(msg)
self.condition = self.args[0]
class And(hot_funcs.And):
"""A function that acts as an AND operator on conditions.
Takes the form::
{ "Fn::And" : [ "<condition_1>", "<condition_2>", ... ] }
Returns true if all the specified conditions evaluate to true, or returns
false if any one of the conditions evaluates to false. The minimum number
of conditions that you can include is 2.
"""
class Or(hot_funcs.Or):
"""A function that acts as an OR operator on conditions.
Takes the form::
{ "Fn::Or" : [ "<condition_1>", "<condition_2>", ... ] }
Returns true if any one of the specified conditions evaluate to true,
or returns false if all of the conditions evaluates to false. The minimum
number of conditions that you can include is 2.
"""
|
|
"""
Generate a C++ DICOM dictionary from a text file.
This program will read a text file generated from the DICOM data
element regsistry table (DICOM Chapter 6 part 6) and will generate
a hash table that can be used for dictionary lookups.
Usage: python makedict.py nemadict.txt > vtkDICOMDictHash.cxx
Usage: python makedict.py --header nemadict.txt > vtkDICOMDictHash.h
The option "--private" can be added to create a private dictionary.
"""
import sys
import math
header = \
"""/*=========================================================================
This is an automatically generated file. Include errata for any changes.
=========================================================================*/"""
printheader = False
privatedict = False
filename = None
for arg in sys.argv[1:]:
if arg == "--header":
printheader = True
elif arg[0:10] == "--private=":
privatedict = arg[10:]
elif arg[0] != '-' and filename == None:
filename = arg
else:
sys.stderr.write(
"""usage: python makedict.py nemadict.txt > vtkDICOMDictHash.cxx
python makedict.py --header nemadict.txt > vtkDICOMDictHash.h\n""")
sys.exit(1)
# the hash table, in PYTHON
htsize = 1024
# collect private dictionaries
privatelines = {}
# read the file in one go
f = open(filename, 'r')
lines = f.readlines()
f.close()
# look for repeating groups and repeat them
i = 0
rg = []
while i < len(lines):
tag = lines[i].strip()
try:
g, e = tag[1:10].split(',')
except ValueError:
sys.stderr.write("exception: %s\n" % (tag))
if g == "60xx" or g == "50xx":
rg.extend(lines[i:i+6])
i = i + 6
elif rg:
nb = []
for j in range(1,16):
k = 0
m = len(rg)
while k < m:
g, e = rg[k][1:10].split(',')
nb.append("(%s%02X,%s)\n" % (g[0:2], 2*j, e))
nb.append("%s %d\n" % (rg[k+1].strip(), j+1))
nb.append("%s%d\n" % (rg[k+2].strip(), j+1))
nb.append(rg[k+3])
nb.append(rg[k+4])
nb.append(rg[k+5])
k = k + 6
lines = lines[0:i] + nb + lines[i:]
i += 16*len(rg)
rg = []
else:
# check for and filter out the private tags
private = False
try:
private = ((int(g[3]) & 0x1) != 0)
except:
pass
if private:
creator = lines[i + 5].strip()
try:
privatelines[creator] += lines[i:i+6]
except KeyError:
privatelines[creator] = lines[i:i+6]
i = i + 6
def hashstring(s):
"""Compute a string hash based on the function "djb2".
Use at most 64 characters.
"""
h = 5381
s = s[0:64]
for c in s:
h = ((h << 5) + h + ord(c)) & 0xffffffff
return h
def makedict(lines, creator="DICOM"):
# the tables that will be created
enum_list = []
element_list = []
entry_list = []
# a set to keep track of all VM strings encountered
vms = {}
htsize = 1024
if privatedict:
htsize = int(len(lines)/24)
if htsize == 0:
htsize = 1
ht = [None]*htsize
ht2 = [None]*htsize
# iterate through all elements in the table
j = 0
i = 0
n = len(lines)
while i < n:
try:
tag = lines[i].encode('ascii').strip()
i = i + 1
name = lines[i].encode('ascii').strip()
i = i + 1
key = lines[i].encode('ascii').strip()
i = i + 1
vr = lines[i].encode('ascii').strip()
i = i + 1
vm = lines[i].encode('ascii').strip()
i = i + 1
ret = lines[i].encode('ascii').strip()
i = i + 1
except:
sys.stderr.write("non-ascii character encountered on line %d\n" % (i,))
raise TypeError
# replace "Unknown" and "?" with ""
if name in ("Unknown", "Internal", "?"):
name = ""
if key in ("Unknown", "Internal", "?"):
key = ""
# replace "US or SS" with "XS"
if vr in ("US or SS", "SS or US", "xs"):
vr = "XS"
# replace "OB or OW" with "OX"
if vr in ("OB or OW", "OW or OB", "ox"):
vr = "OX"
# replace "see note" with "XX"
if vr in ("", "see note", "See Note"):
vr = "XX"
# replace mixed short with "OW"
if len(vr) > 2:
if vr.find("OW") >= 0:
vr = "OW"
vm = "1"
if vr.find("OB") >= 0:
vr = "OB"
vm = "1"
# replace 'RET' with 1 or 0
if ret and not privatedict:
if not printheader:
ret = {"RET":"1","DICONDE":"2","DICOS":"3"}[ret]
elif ret == "RET":
ret = "1"
else:
ret = "0"
# prefix vm with 'M', change '-' to 'T', change 'n' to 'N'
vm = 'M' + vm.split(' ')[0].replace('-', 'T').replace('n', 'N')
# add to the set of VMs
vms[vm] = True
# this is debug info: make sure no keys are over 63 chars,
# which is the maximum id length in the C++ standard
if len(key) > 63:
print "XXXXXX", key
sys.exit(1)
# get the group, element
g, e = tag[1:10].split(',')
# make sure g, e are hexidecimal integers
try:
gi = int(g, 16)
ei = int(e, 16)
except:
# replace 'x' (which means any digit) with zero
#print "XXXXXX %s %s" % (tag, key)
g = g.replace('xx','00')
e = e.replace('xxxx','0000')
e = e.replace('xxx','000')
e = e.replace('xx','00')
e = e.replace('x','1')
gi = int(g, 16)
ei = int(e, 16)
if key or privatedict:
enum_list.append(
("%-39s = 0x%s%s, // %s %-5s %s" % (key, g, e, vr, vm, ret)).strip())
element_list.append(
"{ 0x%s, 0x%s, %s, VR::%s, VM::%s, \"%s\" }," % (g, e, ret, vr, vm, key))
# create a 16-bit hash from group, element
h = ((gi ^ (gi >> 6)) ^ (ei ^ (ei >> 6)))
# create a string hash
hkey = hashstring(key)
# build the hash tables
h = (h % htsize)
if ht[h] == None:
ht[h] = []
h2 = (hkey % htsize)
if ht2[h2] == None:
ht2[h2] = []
# build the index table
ht[h].append(j)
ht[h].append(ei)
ht2[h2].append(j)
ht2[h2].append((hkey//htsize) & 0xffff)
j = j + 1
# debug: print all VM's that were found
#print vms.keys()
# debug: print statistics about the hash table
#print maxl, minl, k0, k4
return enum_list, element_list, ht, ht2
# write the output file
def printhead(enum_dict, classname):
print header
print
print "#ifndef %s_h" % (classname,)
print "#define %s_h" % (classname,)
print
if not privatedict:
print "//! Tag values defined in the DICOM standard"
print "namespace DC"
print "{"
print "enum EnumType {"
for enum_list in enum_dict.values():
# eliminate the "," for the last enum item
m = len(enum_list)
if m:
enum_list[m-1] = enum_list[m-1].replace(", //", " //")
for l in enum_list:
print l
print "};"
print "} // end namespace DC"
else:
print "// This must be included before the initializer is declared."
print "#include \"vtkDICOMDictionary.h\""
print
print "// Initializer to add dict when header included."
print "struct VTK_DICOM_EXPORT %sInitializer" % (classname,)
print "{"
print " %sInitializer();" % (classname,)
print " ~%sInitializer();" % (classname,)
print "};"
print
print "static %sInitializer %sInitializerInstance;" % (classname,classname);
print
print "#endif /* %s_h */" % (classname,)
def printbody(entry_dict, classname):
print header
print
print "#include \"vtkDICOMDictionary.h\""
print "#include \"%s.h\"" % (classname,)
print
print "namespace {"
print
print "typedef vtkDICOMVR VR;"
print "typedef vtkDICOMVM VM;"
print "typedef vtkDICOMDictEntry::Entry DictEntry;"
ns = ""
if not privatedict:
ns = "vtkDICOMDictionary::"
dn = 0
for name, (entry_list, tag_table, key_table) in entry_dict.items():
dn = dn + 1
ds = ""
print
if len(entry_dict) > 1:
ds = "%03d" % (dn,)
print "// ----- %s -----" % (name,)
print
print "DictEntry Dict%sContents[] = {" % (ds,)
for l in entry_list:
print l
print "};"
for table,tagorkey in [(tag_table,"Tag"),(key_table,"Key")]:
print
print "unsigned short Dict%s%sHashTable[] = {" % (ds,tagorkey)
i = 0
j = len(table) + 1
for l in table:
if l is None:
print "%5d," % (len(table),),
i = i + 1
if i % 10 == 0:
print
else:
print "%5d," % (j,),
i = i + 1
if i % 10 == 0:
print
j = j + len(l) + 1
print "%5d," % (0,),
i = i + 1
if i % 10 == 0:
print
for l in table:
if not (l is None):
print "%5d," % (len(l)/2,),
i = i + 1
if i % 10 == 0:
print
for e in l:
print "%5d," % (e,),
i = i + 1
if i % 10 == 0:
print
if i % 10 != 0:
print
print "};"
if not privatedict:
print
print "} // end anonymous namespace"
print
if len(entry_dict) > 1:
ds = "%03d" % (dn,)
print "vtkDICOMDictionary::Dict %sDict%sData = {" % (ns,ds)
print "\"%s\"," % (name,)
print "%d," % (len(tag_table),)
print "%d," % (len(entry_list),)
print "Dict%sTagHashTable," % (ds,)
print "Dict%sKeyHashTable," % (ds,)
print "Dict%sContents" % (ds,)
print "};"
if privatedict:
print
print "vtkDICOMDictionary::Dict *PrivateDictData[] = {"
dn = 0
for item in entry_dict.items():
dn = dn + 1
print "&Dict%03dData," % (dn,),
if dn % 5 == 0:
print
print "NULL"
print "};"
print
print "} // end anonymous namespace"
print
print "static int %sInitializerCounter;" % (classname,)
print
print "%sInitializer::%sInitializer()" % (classname,classname)
print "{"
print " if (%sInitializerCounter++ == 0)" % (classname,)
print " {"
print " for (vtkDICOMDictionary::Dict **dp = PrivateDictData; *dp != NULL; dp++)"
print " {"
print " vtkDICOMDictionary::AddPrivateDictionary(*dp);"
print " }"
print " }"
print "}"
print
print "%sInitializer::~%sInitializer()" % (classname,classname)
print "{"
print " if (--%sInitializerCounter == 0)" % (classname,)
print " {"
print " for (vtkDICOMDictionary::Dict **dp = PrivateDictData; *dp != NULL; dp++)"
print " {"
print " vtkDICOMDictionary::RemovePrivateDictionary((*dp)->Name);"
print " }"
print " }"
print "}"
if privatedict:
enum_dict = {}
entry_dict = {}
for name, lines in privatelines.items():
enum_list, entry_list, tag_table, key_table = makedict(lines, name)
enum_dict[name] = enum_list
entry_dict[name] = (entry_list, tag_table, key_table)
if printheader:
printhead(enum_dict, privatedict)
else:
printbody(entry_dict, privatedict)
else:
enum_list, entry_list, tag_table, key_table = makedict(lines)
classname = "vtkDICOMDictHash"
if printheader:
printhead({"DICOM" : enum_list}, classname)
else:
printbody({"DICOM" : (entry_list, tag_table, key_table)}, classname)
# informative: these names represent a range of tag values
""" keys with ranges
(0020,3100 to 31FF) SourceImageIDs
(0028,04x0) RowsForNthOrderCoefficients
(0028,04x1) ColumnsForNthOrderCoefficients
(0028,04x2) CoefficientCoding
(0028,04x3) CoefficientCodingPointers
(0028,08x0) CodeLabel
(0028,08x2) NumberOfTables
(0028,08x3) CodeTableLocation
(0028,08x4) BitsForCodeWord
(0028,08x8) ImageDataLocation
(1000,xxx0) EscapeTriplet
(1000,xxx1) RunLengthTriplet
(1000,xxx2) HuffmanTableSize
(1000,xxx3) HuffmanTableTriplet
(1000,xxx4) ShiftTableSize
(1000,xxx5) ShiftTableTriplet
(1010,xxxx) ZonalMap
"""
|
|
# flake8: noqa
import os
import sys
from collections import OrderedDict, defaultdict
from fnmatch import fnmatch
from importlib import import_module
import django
from django.core.management.base import BaseCommand, CommandError
import django.template
from django.template import Context
from django.utils import six
from django.template.loader import get_template # noqa Leave this in to preload template locations
from django.template import engines
from compressor.cache import get_offline_hexdigest, write_offline_manifest
from compressor.conf import settings
from compressor.exceptions import (OfflineGenerationError, TemplateSyntaxError,
TemplateDoesNotExist)
from compressor.utils import get_mod_func
if six.PY3:
# there is an 'io' module in python 2.6+, but io.StringIO does not
# accept regular strings, just unicode objects
from io import StringIO
else:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class Command(BaseCommand):
help = "Compress content outside of the request/response cycle"
def add_arguments(self, parser):
parser.add_argument('--extension', '-e', action='append', dest='extensions',
help='The file extension(s) to examine (default: ".html", '
'separate multiple extensions with commas, or use -e '
'multiple times)')
parser.add_argument('-f', '--force', default=False, action='store_true',
help="Force the generation of compressed content even if the "
"COMPRESS_ENABLED setting is not True.", dest='force')
parser.add_argument('--follow-links', default=False, action='store_true',
help="Follow symlinks when traversing the COMPRESS_ROOT "
"(which defaults to STATIC_ROOT). Be aware that using this "
"can lead to infinite recursion if a link points to a parent "
"directory of itself.", dest='follow_links')
parser.add_argument('--engine', default="django", action="store",
help="Specifies the templating engine. jinja2 or django",
dest="engine")
def get_loaders(self):
template_source_loaders = []
for e in engines.all():
if hasattr(e, 'engine'):
template_source_loaders.extend(
e.engine.get_template_loaders(e.engine.loaders))
loaders = []
# If template loader is CachedTemplateLoader, return the loaders
# that it wraps around. So if we have
# TEMPLATE_LOADERS = (
# ('django.template.loaders.cached.Loader', (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# )),
# )
# The loaders will return django.template.loaders.filesystem.Loader
# and django.template.loaders.app_directories.Loader
# The cached Loader and similar ones include a 'loaders' attribute
# so we look for that.
for loader in template_source_loaders:
if hasattr(loader, 'loaders'):
loaders.extend(loader.loaders)
else:
loaders.append(loader)
return loaders
def __get_parser(self, engine):
if engine == "jinja2":
from compressor.offline.jinja2 import Jinja2Parser
env = settings.COMPRESS_JINJA2_GET_ENVIRONMENT()
parser = Jinja2Parser(charset=settings.FILE_CHARSET, env=env)
elif engine == "django":
from compressor.offline.django import DjangoParser
parser = DjangoParser(charset=settings.FILE_CHARSET)
else:
raise OfflineGenerationError("Invalid templating engine specified.")
return parser
def compress(self, log=None, **options):
"""
Searches templates containing 'compress' nodes and compresses them
"offline" -- outside of the request/response cycle.
The result is cached with a cache-key derived from the content of the
compress nodes (not the content of the possibly linked files!).
"""
engine = options.get("engine", "django")
extensions = options.get('extensions')
extensions = self.handle_extensions(extensions or ['html'])
verbosity = int(options.get("verbosity", 0))
if not log:
log = StringIO()
if not self.get_loaders():
raise OfflineGenerationError("No template loaders defined. You "
"must set TEMPLATE_LOADERS in your "
"settings or set 'loaders' in your "
"TEMPLATES dictionary.")
templates = set()
if engine == 'django':
paths = set()
for loader in self.get_loaders():
try:
module = import_module(loader.__module__)
get_template_sources = getattr(module,
'get_template_sources', None)
if get_template_sources is None:
get_template_sources = loader.get_template_sources
paths.update(str(origin) for origin in get_template_sources(''))
except (ImportError, AttributeError, TypeError):
# Yeah, this didn't work out so well, let's move on
pass
if not paths:
raise OfflineGenerationError("No template paths found. None of "
"the configured template loaders "
"provided template paths. See "
"https://docs.djangoproject.com/en/1.8/topics/templates/ "
"for more information on template "
"loaders.")
if verbosity > 1:
log.write("Considering paths:\n\t" + "\n\t".join(paths) + "\n")
for path in paths:
for root, dirs, files in os.walk(path,
followlinks=options.get('followlinks', False)):
templates.update(os.path.join(root, name)
for name in files if not name.startswith('.') and
any(fnmatch(name, "*%s" % glob) for glob in extensions))
elif engine == 'jinja2' and django.VERSION >= (1, 8):
env = settings.COMPRESS_JINJA2_GET_ENVIRONMENT()
if env and hasattr(env, 'list_templates'):
templates |= set([env.loader.get_source(env, template)[1] for template in
env.list_templates(filter_func=lambda _path:
os.path.splitext(_path)[-1] in extensions)])
if not templates:
raise OfflineGenerationError("No templates found. Make sure your "
"TEMPLATE_LOADERS and TEMPLATE_DIRS "
"settings are correct.")
if verbosity > 1:
log.write("Found templates:\n\t" + "\n\t".join(templates) + "\n")
contexts = settings.COMPRESS_OFFLINE_CONTEXT
if isinstance(contexts, six.string_types):
try:
module, function = get_mod_func(contexts)
contexts = getattr(import_module(module), function)()
except (AttributeError, ImportError, TypeError) as e:
raise ImportError("Couldn't import offline context function %s: %s" %
(settings.COMPRESS_OFFLINE_CONTEXT, e))
elif not isinstance(contexts, (list, tuple)):
contexts = [contexts]
contexts = list(contexts) # evaluate generator
parser = self.__get_parser(engine)
compressor_nodes = OrderedDict()
for template_name in templates:
try:
template = parser.parse(template_name)
except IOError: # unreadable file -> ignore
if verbosity > 0:
log.write("Unreadable template at: %s\n" % template_name)
continue
except TemplateSyntaxError as e: # broken template -> ignore
if verbosity > 0:
log.write("Invalid template %s: %s\n" % (template_name, e))
continue
except TemplateDoesNotExist: # non existent template -> ignore
if verbosity > 0:
log.write("Non-existent template at: %s\n" % template_name)
continue
except UnicodeDecodeError:
if verbosity > 0:
log.write("UnicodeDecodeError while trying to read "
"template %s\n" % template_name)
continue
for context_dict in contexts:
context = parser.get_init_context(context_dict)
context = Context(context)
try:
nodes = list(parser.walk_nodes(template, context=context))
except (TemplateDoesNotExist, TemplateSyntaxError) as e:
# Could be an error in some base template
if verbosity > 0:
log.write("Error parsing template %s: %s\n" % (template_name, e))
continue
if nodes:
template.template_name = template_name
template_nodes = compressor_nodes.setdefault(template, OrderedDict())
for node in nodes:
template_nodes.setdefault(node, []).append(context)
if not compressor_nodes:
raise OfflineGenerationError(
"No 'compress' template tags found in templates."
"Try running compress command with --follow-links and/or"
"--extension=EXTENSIONS")
if verbosity > 0:
log.write("Found 'compress' tags in:\n\t" +
"\n\t".join((t.template_name
for t in compressor_nodes.keys())) + "\n")
log.write("Compressing... ")
block_count = 0
compressed_contexts = []
results = []
offline_manifest = OrderedDict()
for template, nodes in compressor_nodes.items():
template._log = log
template._log_verbosity = verbosity
for node, contexts in nodes.items():
for context in contexts:
if context not in compressed_contexts:
compressed_contexts.append(context)
context.push()
if not parser.process_template(template, context):
continue
parser.process_node(template, context, node)
rendered = parser.render_nodelist(template, context, node)
key = get_offline_hexdigest(rendered)
if key in offline_manifest:
continue
try:
result = parser.render_node(template, context, node)
except Exception as e:
raise CommandError("An error occurred during rendering %s: "
"%s" % (template.template_name, e))
offline_manifest[key] = result
context.pop()
results.append(result)
block_count += 1
write_offline_manifest(offline_manifest)
context_count = len(compressed_contexts)
log.write("done\nCompressed %d block(s) from %d template(s) for %d context(s).\n" %
(block_count, len(compressor_nodes), context_count))
return block_count, results
def handle_extensions(self, extensions=('html',)):
"""
organizes multiple extensions that are separated with commas or
passed by using --extension/-e multiple times.
for example: running 'django-admin compress -e js,txt -e xhtml -a'
would result in an extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
['.html', '.js']
>>> handle_extensions(['.html, txt,.tpl'])
['.html', '.tpl', '.txt']
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set(ext_list)
def handle(self, **options):
if not settings.COMPRESS_ENABLED and not options.get("force"):
raise CommandError(
"Compressor is disabled. Set the COMPRESS_ENABLED "
"setting or use --force to override.")
if not settings.COMPRESS_OFFLINE:
if not options.get("force"):
raise CommandError(
"Offline compression is disabled. Set "
"COMPRESS_OFFLINE or use the --force to override.")
self.compress(sys.stdout, **options)
Command.requires_system_checks = False
|
|
# -*- coding: utf-8 -*-
import sys
import unittest2
from unipath import Path
sys.path.append(Path(__file__).ancestor(2))
from scrapy.selector import Selector
from utils import get_response
from localfinance.parsing.zone import DepartmentZoneParser
class DepartmentFinance2008ParsingTestCase(unittest2.TestCase):
def setUp(self):
self.response = get_response('test/data/department_2008_account.html',
encoding='windows-1252')
self.data = {
'population': 535489,
'operating_revenues': 455303000,
'operating_real_revenues': 453230000,
'local_tax': 183583000,
'refund_tax': 0,
'other_tax': 99211000,
'advertisement_tax': 34398000,
'tipp': 31453000,
'allocation_and_stake': 158788000,
'allocation': 109209000,
'realignment': 17887000,
'operating_costs': 426105000,
'operating_real_costs': 392785000,
'staff_costs': 77310000,
'purchases_and_external_costs': 57394000,
'subsidies_and_contingents': 247743000,
'mandatory_contributions_and_stakes': 52527000,
'subsidies': 17595000,
'individual_aids': 174671000,
'pch': 5510000,
'apa': 40781000,
'rsa': 0,
'accomodation_costs': 0,
'financial_costs': 9761000,
'net_profit': 29198000,
'self_financing_capacity': 60444000,
'investment_ressources': 155286000,
'fctva': 7913000,
'received_subsidies': 20379000,
'sold_fixed_assets': 704000,
'loans': 45000000,
'investments_usage': 129789000,
'investments_direct_costs': 68893000,
'paid_subsidies': 44227000,
'debt_repayments': 14897000,
'residual_financing_capacity': -25497000,
'thirdparty_balance': 323000,
'financing_capacity': -25173000,
'global_profit': 54372000,
'debt_at_end_year': 260957000,
'debt_annual_costs': 24298000,
'home_tax_value': 50719000,
'home_tax_basis': 441803000,
'home_tax_rate': 0.1148,
'home_tax_cuts_on_deliberation': 1146000,
'property_tax_value': 59808000,
'property_tax_basis': 385356000,
'property_tax_rate': 0.1552,
'property_tax_cuts_on_deliberation': 953000,
'land_property_tax_value': 587000,
'land_property_tax_basis': 1745000,
'land_property_tax_rate': 0.3363,
'land_property_tax_cuts_on_deliberation': 4000,
'business_tax_value': 73789000,
'business_tax_basis': 822619000,
'business_tax_rate': 0.0897,
'business_tax_cuts_on_deliberation': 1637000,
}
def test_parsing(self):
parser = DepartmentZoneParser('', 2008, '')
data = parser.parse(Selector(self.response))
for key, val in self.data.items():
self.assertAlmostEqual(data[key], val)
class DepartmentFinance2009ParsingTestCase(unittest2.TestCase):
def setUp(self):
self.response = get_response('test/data/department_2009_account.html', encoding='windows-1252')
self.data = {
'population': 537061,
'operating_revenues': 465068000,
'operating_real_revenues': 459748000,
'local_tax': 193093000,
'refund_tax': 0,
'other_tax': 99257000,
'tipp': 39185000,
'allocation_and_stake': 158439000,
'allocation': 110390000,
'realignment': 15679000,
'operating_costs': 463765000,
'operating_real_costs': 428409000,
'staff_costs': 86827000,
'purchases_and_external_costs': 57954000,
'subsidies_and_contingents': 272400000,
'mandatory_contributions_and_stakes': 54939000,
'subsidies': 16009000,
'individual_aids': 113380000,
'pch': 7565000,
'apa': 45375000,
'rsa': 28671000,
'accomodation_costs': 79145000,
'financial_costs': 10238000,
'net_profit': 1303000,
'self_financing_capacity': 31339000,
'debt_at_end_year': 294726000,
'debt_annual_costs': 26249000,
'home_tax_value': 52485000,
'home_tax_basis': 457175000,
'home_tax_rate': 0.1148,
'home_tax_cuts_on_deliberation': 0,
'property_tax_value': 62591000,
'property_tax_basis': 403301000,
'property_tax_rate': 0.1552,
'property_tax_cuts_on_deliberation': 33000,
'land_property_tax_value': 596000,
'land_property_tax_basis': 1775000,
'land_property_tax_rate': 0.3363,
'land_property_tax_cuts_on_deliberation': 0,
'business_tax_value': 75344000,
'business_tax_basis': 839954000,
'business_tax_rate': 0.0897,
'business_tax_cuts_on_deliberation': 3937000,
}
def test_parsing(self):
parser = DepartmentZoneParser('', 2009, '')
data = parser.parse(Selector(self.response))
for key, val in self.data.items():
self.assertAlmostEqual(data[key], val)
class DepartmentFinance2010ParsingTestCase(unittest2.TestCase):
def setUp(self):
self.response = get_response('test/data/department_2010_account.html',
encoding='windows-1252')
self.data = {
'population': 537820,
'operating_revenues': 504060000,
'operating_real_revenues': 498856000,
'local_tax': 213518000,
'refund_tax': 0,
'other_tax': 113116000,
'advertisement_tax': 30331000,
'tipp': 45951000,
'allocation_and_stake': 160322000,
'compensation_2010_value': 79465000
}
def test_parsing(self):
parser = DepartmentZoneParser('', 2010, '')
data = parser.parse(Selector(self.response))
for key, val in self.data.items():
self.assertAlmostEqual(data[key], val)
class DepartmentFinance2011ParsingTestCase(unittest2.TestCase):
def setUp(self):
self.response = get_response('test/data/department_2011_account.html',
encoding='windows-1252')
self.data = {
'property_tax_basis': 430294000,
'property_tax_value': 136490000,
'property_tax_rate': 0.3172,
'business_profit_contribution_basis': 0,
'business_profit_contribution_value': 38474000,
'business_network_tax_value': 913000,
}
def test_parsing(self):
parser = DepartmentZoneParser('', 2011, '')
data = parser.parse(Selector(self.response))
for key, val in self.data.items():
self.assertAlmostEqual(data[key], val)
class DepartmentFinance2012ParsingTestCase(unittest2.TestCase):
def setUp(self):
self.response = get_response('test/data/department_2012_account.html',
encoding='windows-1252')
self.data = {
'property_tax_basis': 445315000,
'property_tax_value': 141253000,
'property_tax_rate': 0.3172,
'business_profit_contribution_basis': 0,
'business_profit_contribution_value': 40288000,
'business_network_tax_value': 974000,
}
def test_parsing(self):
parser = DepartmentZoneParser('', 2012, '')
data = parser.parse(Selector(self.response))
for key, val in self.data.items():
self.assertAlmostEqual(data[key], val)
class DepartmentFinance2013ParsingTestCase(unittest2.TestCase):
def setUp(self):
self.response = get_response('test/data/department_2013_account.html',
encoding='windows-1252')
self.data = {
'operating_revenues': 531628000,
'local_tax': 188257000,
'other_tax': 140564000,
'advertisement_tax': 31324000,
'allocation': 111353000,
'working_capital': 25320000,
'property_tax_basis': 458250000,
'property_tax_value': 145357000,
'property_tax_rate': 0.3172,
'business_profit_contribution_basis': 0,
'business_profit_contribution_value': 40973000,
'business_network_tax_value': 1004000,
}
def test_parsing(self):
parser = DepartmentZoneParser('', 2013, '')
data = parser.parse(Selector(self.response))
for key, val in self.data.items():
self.assertAlmostEqual(data[key], val)
class DepartmentFinance2014ParsingTestCase(unittest2.TestCase):
def setUp(self):
self.response = get_response('test/data/department_2014_account.html',
encoding='windows-1252')
self.data = {
'operating_revenues': 543861000,
'local_tax': 190228000,
'other_tax': 150642000,
'advertisement_tax': 42176000,
'allocation': 108938000,
'working_capital': 26133000,
'property_tax_basis': 464872000,
'property_tax_value': 147459000,
'property_tax_rate': 0.3172,
'property_tax_cuts_on_deliberation': 204000,
'business_profit_contribution_basis': 0,
'business_profit_contribution_value': 40139000,
'business_profit_contribution_cuts_on_deliberation': 34000,
'business_network_tax_value': 1074000,
}
def test_parsing(self):
parser = DepartmentZoneParser('', 2014, '')
data = parser.parse(Selector(self.response))
for key, val in self.data.items():
self.assertAlmostEqual(data[key], val)
if __name__ == '__main__':
unittest2.main()
|
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import argparse
import xml.etree.ElementTree as ET
import requests
import urlparse
def create_headers(args):
return { 'X-Api-Key': args.api_key }
def finish_command(command, response):
print(command, response.status_code, response.reason)
print(response.text)
if response.status_code < 400:
sys.exit(0)
else:
sys.exit(2)
def create_build(args):
build = {}
build['buildType'] = args.build_type
build['number'] = args.build_number
build['source'] = args.build_source
build['status'] = 'running'
if build['buildType'] == 'Pull_Request':
build['buildType'] = 'Pull Request'
r = requests.post(urlparse.urljoin(args.url, "api/builds"), headers=create_headers(args), json=build)
if r.status_code < 400:
if args.property_file_format:
print("MBED_BUILD_ID=" + r.text)
else:
print(r.text)
sys.exit(0)
else:
sys.exit(2)
def finish_build(args):
data = {}
data['status'] = 'completed'
r = requests.put(urlparse.urljoin(args.url, "api/builds/" + args.build_id), headers=create_headers(args), json=data)
finish_command('finish-build', r)
def abort_build(args):
data = {}
data['status'] = 'aborted'
r = requests.put(urlparse.urljoin(args.url, "api/builds/" + args.build_id), headers=create_headers(args), json=data)
finish_command('abort-build', r)
def add_project_runs(args):
'''
-------------------------------------
Notes on 'project_run_data' structure:
--------------------------------------
'projectRuns' - Tree structure used to keep track of what projects have
been logged in different report files. The tree is organized as follows:
'projectRuns': { - Root element of tree
'hostOs': { - Host OS on which project was built/tested
- ex. windows, linux, or mac
'platform': { - Platform for which project was built/tested
(Corresponds to platform names in targets.py)
- ex. K64F, LPC1768, NRF51822, etc.
'toolchain': { - Toolchain with which project was built/tested
(Corresponds to TOOLCHAIN_CLASSES names in toolchains/__init__.py)
- ex. ARM, uARM, GCC_ARM, etc.
'project': { - Project that was build/tested
(Corresponds to test id in tests.py or library id in libraries.py)
- For tests, ex. MBED_A1, MBED_11, DTCT_1 etc.
- For libraries, ex. MBED, RTX, RTOS, etc.
},
...
},
...
},
...
}
}
'platforms_set' - Set of all the platform names mentioned in the given report files
'toolchains_set' - Set of all the toolchain names mentioned in the given report files
'names_set' - Set of all the project names mentioned in the given report files
'hostOses_set' - Set of all the host names given (only given by the command line arguments)
'''
project_run_data = {}
project_run_data['projectRuns'] = {}
project_run_data['platforms_set'] = set()
project_run_data['vendors_set'] = set()
project_run_data['toolchains_set'] = set()
project_run_data['names_set'] = set()
project_run_data['hostOses_set'] = set()
project_run_data['hostOses_set'].add(args.host_os)
add_report(project_run_data, args.build_report, True, args.build_id, args.host_os)
if (args.test_report):
add_report(project_run_data, args.test_report, False, args.build_id, args.host_os)
ts_data = format_project_run_data(project_run_data)
r = requests.post(urlparse.urljoin(args.url, "api/projectRuns"), headers=create_headers(args), json=ts_data)
finish_command('add-project-runs', r)
def format_project_run_data(project_run_data):
ts_data = {}
ts_data['projectRuns'] = []
for hostOs in project_run_data['projectRuns'].values():
for platform in hostOs.values():
for toolchain in platform.values():
for project in toolchain.values():
ts_data['projectRuns'].append(project)
ts_data['platforms'] = list(project_run_data['platforms_set'])
ts_data['vendors'] = list(project_run_data['vendors_set'])
ts_data['toolchains'] = list(project_run_data['toolchains_set'])
ts_data['names'] = list(project_run_data['names_set'])
ts_data['hostOses'] = list(project_run_data['hostOses_set'])
return ts_data
def find_project_run(projectRuns, project):
keys = ['hostOs', 'platform', 'toolchain', 'project']
elem = projectRuns
for key in keys:
if not project[key] in elem:
return None
elem = elem[project[key]]
return elem
def add_project_run(projectRuns, project):
keys = ['hostOs', 'platform', 'toolchain']
elem = projectRuns
for key in keys:
if not project[key] in elem:
elem[project[key]] = {}
elem = elem[project[key]]
elem[project['project']] = project
def update_project_run_results(project_to_update, project, is_build):
if is_build:
project_to_update['buildPass'] = project['buildPass']
project_to_update['buildResult'] = project['buildResult']
project_to_update['buildOutput'] = project['buildOutput']
else:
project_to_update['testPass'] = project['testPass']
project_to_update['testResult'] = project['testResult']
project_to_update['testOutput'] = project['testOutput']
def update_project_run(projectRuns, project, is_build):
found_project = find_project_run(projectRuns, project)
if found_project:
update_project_run_results(found_project, project, is_build)
else:
add_project_run(projectRuns, project)
def add_report(project_run_data, report_file, is_build, build_id, host_os):
tree = None
try:
tree = ET.parse(report_file)
except:
print(sys.exc_info()[0])
print('Invalid path to report: %s', report_file)
sys.exit(1)
test_suites = tree.getroot()
for test_suite in test_suites:
platform = ""
toolchain = ""
vendor = ""
for properties in test_suite.findall('properties'):
for property in properties.findall('property'):
if property.attrib['name'] == 'target':
platform = property.attrib['value']
project_run_data['platforms_set'].add(platform)
elif property.attrib['name'] == 'toolchain':
toolchain = property.attrib['value']
project_run_data['toolchains_set'].add(toolchain)
elif property.attrib['name'] == 'vendor':
vendor = property.attrib['value']
project_run_data['vendors_set'].add(vendor)
for test_case in test_suite.findall('testcase'):
projectRun = {}
projectRun['build'] = build_id
projectRun['hostOs'] = host_os
projectRun['platform'] = platform
projectRun['toolchain'] = toolchain
projectRun['project'] = test_case.attrib['classname'].split('.')[-1]
projectRun['vendor'] = vendor
project_run_data['names_set'].add(projectRun['project'])
skipped = test_case.findall('skipped')
if not skipped:
system_outs = test_case.findall('system-out')
output = ""
if system_outs:
output = system_outs[0].text
if is_build:
projectRun['buildOutput'] = output
else:
projectRun['testOutput'] = output
errors = test_case.findall('error')
failures = test_case.findall('failure')
projectRunPass = None
result = None
if errors:
projectRunPass = False
result = errors[0].attrib['message']
elif failures:
projectRunPass = False
result = failures[0].attrib['message']
else:
projectRunPass = True
result = 'OK'
if is_build:
projectRun['buildPass'] = projectRunPass
projectRun['buildResult'] = result
else:
projectRun['testPass'] = projectRunPass
projectRun['testResult'] = result
update_project_run(project_run_data['projectRuns'], projectRun, is_build)
def main(arguments):
# Register and parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', required=True, help='url to ci site')
parser.add_argument('-k', '--api-key', required=True, help='api-key for posting data')
subparsers = parser.add_subparsers(help='subcommand help')
create_build_parser = subparsers.add_parser('create-build', help='create a new build')
create_build_parser.add_argument('-b', '--build-number', required=True, help='build number')
create_build_parser.add_argument('-T', '--build-type', choices=['Nightly', 'Limited', 'Pull_Request', 'Release'], required=True, help='type of build')
create_build_parser.add_argument('-s', '--build-source', required=True, help='url to source of build')
create_build_parser.add_argument('-p', '--property-file-format', action='store_true', help='print result in the property file format')
create_build_parser.set_defaults(func=create_build)
finish_build_parser = subparsers.add_parser('finish-build', help='finish a running build')
finish_build_parser.add_argument('-b', '--build-id', required=True, help='build id')
finish_build_parser.set_defaults(func=finish_build)
abort_build_parser = subparsers.add_parser('abort-build', help='abort a running build')
abort_build_parser.add_argument('-b', '--build-id', required=True, help='build id')
abort_build_parser.set_defaults(func=abort_build)
add_project_runs_parser = subparsers.add_parser('add-project-runs', help='add project runs to a build')
add_project_runs_parser.add_argument('-b', '--build-id', required=True, help='build id')
add_project_runs_parser.add_argument('-r', '--build-report', required=True, help='path to junit xml build report')
add_project_runs_parser.add_argument('-t', '--test-report', required=False, help='path to junit xml test report')
add_project_runs_parser.add_argument('-o', '--host-os', required=True, help='host os on which test was run')
add_project_runs_parser.set_defaults(func=add_project_runs)
args = parser.parse_args(arguments)
args.func(args)
if __name__ == '__main__':
main(sys.argv[1:])
|
|
# encoding: utf-8
"""
exabgp.py
Created by Thomas Mangin on 2009-08-30.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
import os
import sys
import platform
import syslog
import string
from exabgp.logger import Logger
from exabgp.version import version
# import before the fork to improve copy on write memory savings
from exabgp.reactor.loop import Reactor
from exabgp.dep import docopt
from exabgp.dep import lsprofcalltree
from exabgp.configuration.usage import usage
from exabgp.debug import setup_report
setup_report()
def is_bgp (s):
return all(c in string.hexdigits or c == ':' for c in s)
def __exit (memory, code):
if memory:
from exabgp.dep import objgraph
print "memory utilisation"
print
print objgraph.show_most_common_types(limit=20)
print
print
print "generating memory utilisation graph"
print
obj = objgraph.by_type('Reactor')
objgraph.show_backrefs([obj], max_depth=10)
sys.exit(code)
def main ():
options = docopt.docopt(usage, help=False)
major = int(sys.version[0])
minor = int(sys.version[2])
if major != 2 or minor < 5:
sys.exit('This program can not work (is not tested) with your python version (< 2.5 or >= 3.0)')
if options["--version"]:
print 'ExaBGP : %s' % version
print 'Python : %s' % sys.version.replace('\n',' ')
print 'Uname : %s' % platform.version()
sys.exit(0)
if options["--folder"]:
folder = os.path.realpath(os.path.normpath(options["--folder"]))
elif os.environ.get('ETC',None):
folder = os.path.join(os.path.realpath(os.path.normpath(os.environ.get('ETC','etc'))),'exabgp')
elif sys.argv[0].endswith('/bin/exabgp'):
folder = sys.argv[0][:-len('/bin/exabgp')] + '/etc/exabgp'
elif sys.argv[0].endswith('/sbin/exabgp'):
folder = sys.argv[0][:-len('/sbin/exabgp')] + '/etc/exabgp'
else:
folder = '/etc/exabgp'
if not os.environ.get('ETC',''):
os.environ['ETC'] = folder
envfile = 'exabgp.env' if not options["--env"] else options["--env"]
if not envfile.startswith('/'):
envfile = '%s/%s' % (folder, envfile)
from exabgp.configuration.setup import environment
try:
env = environment.setup(envfile)
except environment.Error,exc:
print usage
print '\nconfiguration issue,', str(exc)
sys.exit(1)
logger = Logger()
named_pipe = os.environ.get('NAMED_PIPE','')
if named_pipe:
from exabgp.application.control import main as control
control(named_pipe)
sys.exit(0)
if options["--decode"]:
decode = ''.join(options["--decode"]).replace(':','').replace(' ','')
if not is_bgp(decode):
print usage
print 'Environment values are:\n' + '\n'.join(' - %s' % _ for _ in environment.default())
print ""
print "The BGP message must be an hexadecimal string."
print ""
print "All colons or spaces are ignored, for example:"
print ""
print " --decode 001E0200000007900F0003000101"
print " --decode 001E:02:0000:0007:900F:0003:0001:01"
print " --decode FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF001E0200000007900F0003000101"
print " --decode FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:001E:02:0000:0007:900F:0003:0001:01"
print " --decode 'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF 001E02 00000007900F0003000101'"
sys.exit(1)
else:
decode = ''
# Make sure our child has a named pipe name
if env.api.file:
os.environ['NAMED_PIPE'] = env.api.file
duration = options["--signal"]
if duration and duration.isdigit():
pid = os.fork()
if pid:
import time
import signal
try:
time.sleep(int(duration))
os.kill(pid,signal.SIGUSR1)
except KeyboardInterrupt:
pass
try:
pid,code = os.wait()
sys.exit(code)
except KeyboardInterrupt:
try:
pid,code = os.wait()
sys.exit(code)
except Exception:
sys.exit(0)
if options["--help"]:
print(usage)
print 'Environment values are:\n' + '\n'.join(' - %s' % _ for _ in environment.default())
sys.exit(0)
if options["--decode"]:
env.log.parser = True
env.debug.route = decode
env.tcp.bind = ''
if options["--profile"]:
env.profile.enable = True
if options["--profile"].lower() in ['1','true']:
env.profile.file = True
elif options["--profile"].lower() in ['0','false']:
env.profile.file = False
else:
env.profile.file = options["--profile"]
if envfile and not os.path.isfile(envfile):
comment = 'environment file missing\ngenerate it using "exabgp --fi > %s"' % envfile
else:
comment = ''
if options["--full-ini"] or options["--fi"]:
for line in environment.iter_ini():
print line
sys.exit(0)
if options["--full-env"] or options["--fe"]:
print
for line in environment.iter_env():
print line
sys.exit(0)
if options["--diff-ini"] or options["--di"]:
for line in environment.iter_ini(True):
print line
sys.exit(0)
if options["--diff-env"] or options["--de"]:
for line in environment.iter_env(True):
print line
sys.exit(0)
if options["--once"]:
env.tcp.once = True
if options["--debug"]:
env.log.all = True
env.log.level = syslog.LOG_DEBUG
if options["--pdb"]:
# The following may fail on old version of python (but is required for debug.py)
os.environ['PDB'] = 'true'
env.debug.pdb = True
if options["--test"]:
env.debug.selfcheck = True
env.log.parser = True
if options["--memory"]:
env.debug.memory = True
configurations = []
# check the file only once that we have parsed all the command line options and allowed them to run
if options["<configuration>"]:
for f in options["<configuration>"]:
normalised = os.path.realpath(os.path.normpath(f))
if os.path.isfile(normalised):
configurations.append(normalised)
continue
if f.startswith('etc/exabgp'):
normalised = os.path.join(folder,f[11:])
if os.path.isfile(normalised):
configurations.append(normalised)
continue
logger.configuration('one of the arguments passed as configuration is not a file (%s)' % f,'error')
sys.exit(1)
else:
print(usage)
print 'Environment values are:\n' + '\n'.join(' - %s' % _ for _ in environment.default())
print '\nno configuration file provided'
sys.exit(1)
from exabgp.bgp.message.update.attribute import Attribute
Attribute.caching = env.cache.attributes
if env.debug.rotate or len(configurations) == 1:
run(env,comment,configurations)
if not (env.log.destination in ('syslog','stdout','stderr') or env.log.destination.startswith('host:')):
logger.configuration('can not log to files when running multiple configuration (as we fork)','error')
sys.exit(1)
try:
# run each configuration in its own process
pids = []
for configuration in configurations:
pid = os.fork()
if pid == 0:
run(env,comment,[configuration],os.getpid())
else:
pids.append(pid)
# If we get a ^C / SIGTERM, ignore just continue waiting for our child process
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
# wait for the forked processes
for pid in pids:
os.waitpid(pid,0)
except OSError,exc:
logger.reactor('Can not fork, errno %d : %s' % (exc.errno,exc.strerror),'critical')
sys.exit(1)
def run (env, comment, configurations, pid=0):
logger = Logger()
if comment:
logger.configuration(comment)
if not env.profile.enable:
ok = Reactor(configurations).run()
__exit(env.debug.memory,0 if ok else 1)
try:
import cProfile as profile
except ImportError:
import profile
if not env.profile.file or env.profile.file == 'stdout':
ok = profile.run('Reactor(configurations).run()')
__exit(env.debug.memory,0 if ok else 1)
if pid:
profile_name = "%s-pid-%d" % (env.profile.file,pid)
else:
profile_name = env.profile.file
notice = ''
if os.path.isdir(profile_name):
notice = 'profile can not use this filename as outpout, it is not a directory (%s)' % profile_name
if os.path.exists(profile_name):
notice = 'profile can not use this filename as outpout, it already exists (%s)' % profile_name
if not notice:
logger.reactor('profiling ....')
profiler = profile.Profile()
profiler.enable()
try:
ok = Reactor(configurations).run()
except Exception:
raise
finally:
profiler.disable()
kprofile = lsprofcalltree.KCacheGrind(profiler)
with open(profile_name, 'w+') as write:
kprofile.output(write)
__exit(env.debug.memory,0 if ok else 1)
else:
logger.reactor("-"*len(notice))
logger.reactor(notice)
logger.reactor("-"*len(notice))
Reactor(configurations).run()
__exit(env.debug.memory,1)
if __name__ == '__main__':
main()
|
|
"""Useful utilities for higher level polynomial classes. """
from sympy import S, sympify, Integer, Rational, Symbol, Add, Mul, Pow, ask
from sympy.polys.polyerrors import PolynomialError, GeneratorsNeeded
_gens_order = {
'a': 301, 'b': 302, 'c': 303, 'd': 304,
'e': 305, 'f': 306, 'g': 307, 'h': 308,
'i': 309, 'j': 310, 'k': 311, 'l': 312,
'm': 313, 'n': 314, 'o': 315, 'p': 216,
'q': 217, 'r': 218, 's': 219, 't': 220,
'u': 221, 'v': 222, 'w': 223, 'x': 124,
'y': 125, 'z': 126,
}
_max_order = 1000
def _sort_gens(gens, **args):
"""Sort generators in a reasonably intelligent way. """
sort = args.get('sort')
wrt = args.get('wrt')
gens_order = {}
if sort is not None:
for i, elt in enumerate(sort.split('<')):
gens_order[elt.strip()] = i+1
if wrt is not None:
wrt = str(wrt)
def order_key(x):
x = str(x)
if x == wrt:
return (0, x)
try:
return ( gens_order[x], x)
except KeyError:
pass
try:
return (_gens_order[x], x)
except KeyError:
pass
return (_max_order, x)
try:
gens = sorted(gens, key=order_key)
except TypeError: # pragma: no cover
pass
return tuple(gens)
def _unify_gens(f_gens, g_gens):
"""Unify generators in a reasonably intelligent way. """
f_gens = list(f_gens)
g_gens = list(g_gens)
if f_gens == g_gens:
return tuple(f_gens)
gens, common, k = [], [], 0
for gen in f_gens:
if gen in g_gens:
common.append(gen)
for i, gen in enumerate(g_gens):
if gen in common:
g_gens[i], k = common[k], k+1
for gen in common:
i = f_gens.index(gen)
gens.extend(f_gens[:i])
f_gens = f_gens[i+1:]
i = g_gens.index(gen)
gens.extend(g_gens[:i])
g_gens = g_gens[i+1:]
gens.append(gen)
gens.extend(f_gens)
gens.extend(g_gens)
return tuple(gens)
def _analyze_gens(gens):
"""Support for passing generators as `*gens` and `[gens]`. """
if len(gens) == 1 and hasattr(gens[0], '__iter__'):
return tuple(gens[0])
else:
return tuple(gens)
def _sort_factors(factors, **args):
"""Sort low-level factors in increasing 'complexity' order. """
def order_if_multiple_key((f, n)):
return (len(f), n, f)
def order_no_multiple_key(f):
return (len(f), f)
if args.get('multiple', True):
return sorted(factors, key=order_if_multiple_key)
else:
return sorted(factors, key=order_no_multiple_key)
def _analyze_power(base, exp):
"""Extract non-integer part of `exp` to the `base`. """
if exp.is_Number:
if exp.is_Rational:
if not exp.is_Integer:
base = Pow(base, Rational(1, exp.q))
exp = exp.p
else:
base, exp = Pow(base, exp), 1
else:
exp, tail = exp.as_coeff_mul()
if exp.is_Number:
if exp.is_Rational:
if not exp.is_Integer:
tail += (Rational(1, exp.q),)
exp = exp.p
else:
tail, exp = (exp,) + tail, 1
else: # pragma: no cover
raise PolynomialError("got invalid polynomial term")
base = Pow(base, Mul(*tail))
if exp < 0:
exp, base = -exp, Pow(base, -S.One)
return base, exp
def _dict_from_basic_if_gens(ex, gens, **args):
"""Convert `ex` to a multinomial given a generators list. """
k, indices = len(gens), {}
for i, g in enumerate(gens):
indices[g] = i
result = {}
for term in Add.make_args(ex):
coeff, monom = [], [0]*k
for factor in Mul.make_args(term):
if factor.is_Number:
coeff.append(factor)
else:
try:
base, exp = _analyze_power(*factor.as_base_exp())
monom[indices[base]] = exp
except KeyError:
if not factor.has(*gens):
coeff.append(factor)
else:
raise PolynomialError("%s contains an element of the generators set" % factor)
monom = tuple(monom)
if result.has_key(monom):
result[monom] += Mul(*coeff)
else:
result[monom] = Mul(*coeff)
return result
def _dict_from_basic_no_gens(ex, **args):
"""Figure out generators and convert `ex` to a multinomial. """
domain = args.get('domain')
if domain is not None:
def _is_coeff(factor):
return factor in domain
else:
extension = args.get('extension')
if extension is True:
def _is_coeff(factor):
return ask(factor, 'algebraic')
else:
greedy = args.get('greedy', True)
if greedy is True:
def _is_coeff(factor):
return False
else:
def _is_coeff(factor):
return factor.is_number
gens, terms = set([]), []
for term in Add.make_args(ex):
coeff, elements = [], {}
for factor in Mul.make_args(term):
if factor.is_Number or _is_coeff(factor):
coeff.append(factor)
else:
base, exp = _analyze_power(*factor.as_base_exp())
elements[base] = exp
gens.add(base)
terms.append((coeff, elements))
if not gens:
raise GeneratorsNeeded("specify generators to give %s a meaning" % ex)
gens = _sort_gens(gens, **args)
k, indices = len(gens), {}
for i, g in enumerate(gens):
indices[g] = i
result = {}
for coeff, term in terms:
monom = [0]*k
for base, exp in term.iteritems():
monom[indices[base]] = exp
monom = tuple(monom)
if result.has_key(monom):
result[monom] += Mul(*coeff)
else:
result[monom] = Mul(*coeff)
return result, tuple(gens)
def dict_from_basic(ex, gens=None, **args):
"""Converts a SymPy expression to a multinomial. """
if args.get('expand', True):
ex = ex.expand()
if gens is not None:
return _dict_from_basic_if_gens(ex, gens, **args)
else:
return _dict_from_basic_no_gens(ex, **args)
def basic_from_dict(rep, *gens):
"""Converts a multinomial to a SymPy expression. """
result = []
for monom, coeff in rep.iteritems():
term = [coeff]
for g, m in zip(gens, monom):
term.append(Pow(g, m))
result.append(Mul(*term))
return Add(*result)
def _dict_reorder(rep, gens, new_gens):
"""Reorder levels using dict representation. """
gens = list(gens)
monoms = rep.keys()
coeffs = rep.values()
new_monoms = [ [] for _ in xrange(len(rep)) ]
for gen in new_gens:
try:
j = gens.index(gen)
for M, new_M in zip(monoms, new_monoms):
new_M.append(M[j])
except ValueError:
for new_M in new_monoms:
new_M.append(0)
return map(tuple, new_monoms), coeffs
|
|
# pylint: disable=wrong-or-nonexistent-copyright-notice
"""Code to interact with GitHub API to label and auto-merge pull requests."""
import datetime
import json
import os
import sys
import time
import traceback
from typing import Callable, Optional, List, Any, Dict, Set, Union
from google.cloud import secretmanager_v1beta1
from dev_tools.github_repository import GithubRepository
GITHUB_REPO_NAME = 'cirq'
GITHUB_REPO_ORGANIZATION = 'quantumlib'
ACCESS_TOKEN_ENV_VARIABLE = 'CIRQ_BOT_GITHUB_ACCESS_TOKEN'
POLLING_PERIOD = datetime.timedelta(seconds=10)
USER_AUTO_MERGE_LABEL = 'automerge'
HEAD_AUTO_MERGE_LABEL = 'front_of_queue_automerge'
AUTO_MERGE_LABELS = [USER_AUTO_MERGE_LABEL, HEAD_AUTO_MERGE_LABEL]
RECENTLY_MODIFIED_THRESHOLD = datetime.timedelta(seconds=30)
PR_SIZE_LABELS = ['size: U', 'size: XS', 'size: S', 'size: M', 'size: L', 'size: XL']
PR_SIZES = [0, 10, 50, 250, 1000, 1 << 30]
def get_pr_size_label(tot_changes: int) -> str:
i = 0
ret = ''
while i < len(PR_SIZES):
if tot_changes < PR_SIZES[i]:
ret = PR_SIZE_LABELS[i]
break
i += 1
return ret
def is_recent_date(date: datetime.datetime) -> bool:
d = datetime.datetime.utcnow() - date
return d < RECENTLY_MODIFIED_THRESHOLD
class CannotAutomergeError(RuntimeError):
def __init__(self, *args, may_be_temporary: bool = False):
super().__init__(*args)
self.may_be_temporary = may_be_temporary
class PullRequestDetails:
def __init__(self, payload: Any, repo: GithubRepository) -> None:
self.payload = payload
self.repo = repo
@staticmethod
def from_github(repo: GithubRepository, pull_id: int) -> 'PullRequestDetails':
"""Retrieves a single pull request.
References:
https://developer.github.com/v3/pulls/#get-a-single-pull-request
Args:
repo: The github repo to get the pull request from.
pull_id: The id of the pull request.
Raises:
RuntimeError: If the request does not return status 200 (success).
"""
url = "https://api.github.com/repos/{}/{}/pulls/{}".format(
repo.organization, repo.name, pull_id
)
response = repo.get(url)
if response.status_code != 200:
raise RuntimeError(
'Pull check failed. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
payload = json.JSONDecoder().decode(response.content.decode())
return PullRequestDetails(payload, repo)
@property
def remote_repo(self) -> GithubRepository:
"""Return the GithubRepository corresponding to this pull request."""
return GithubRepository(
organization=self.payload['head']['repo']['owner']['login'],
name=self.payload['head']['repo']['name'],
access_token=self.repo.access_token,
)
def is_on_fork(self) -> bool:
local = (self.repo.organization.lower(), self.repo.name.lower())
remote = (self.remote_repo.organization.lower(), self.remote_repo.name.lower())
return local != remote
def has_label(self, desired_label: str) -> bool:
return any(label['name'] == desired_label for label in self.payload['labels'])
@property
def last_updated(self) -> datetime.datetime:
return datetime.datetime.strptime(self.payload['updated_at'], '%Y-%m-%dT%H:%M:%SZ')
@property
def modified_recently(self) -> bool:
return is_recent_date(self.last_updated)
@property
def marked_automergeable(self) -> bool:
return any(self.has_label(label) for label in AUTO_MERGE_LABELS)
@property
def marked_size(self) -> bool:
return any(self.has_label(label) for label in PR_SIZE_LABELS)
@property
def pull_id(self) -> int:
return self.payload['number']
@property
def branch_name(self) -> str:
return self.payload['head']['ref']
@property
def base_branch_name(self) -> str:
return self.payload['base']['ref']
@property
def branch_sha(self) -> str:
return self.payload['head']['sha']
@property
def title(self) -> str:
return self.payload['title']
@property
def body(self) -> str:
return self.payload['body']
@property
def additions(self) -> int:
return int(self.payload['additions'])
@property
def deletions(self) -> int:
return int(self.payload['deletions'])
@property
def tot_changes(self) -> int:
return self.deletions + self.additions
def check_collaborator_has_write(
repo: GithubRepository, username: str
) -> Optional[CannotAutomergeError]:
"""Checks whether the given user is a collaborator (admin and write access).
References:
https://developer.github.com/v3/issues/events/#list-events-for-an-issue
Args:
repo: The github repo to check.
username: The github username to check whether the user is a collaborator.
Returns:
CannotAutomergeError if the user does not have admin and write permissions and so
cannot use automerge, None otherwise.
Raises:
RuntimeError: If the request does not return status 200 (success).
"""
url = "https://api.github.com/repos/{}/{}/collaborators/{}/permission" "".format(
repo.organization, repo.name, username
)
response = repo.get(url)
if response.status_code != 200:
raise RuntimeError(
'Collaborator check failed. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
payload = json.JSONDecoder().decode(response.content.decode())
if payload['permission'] not in ['admin', 'write']:
return CannotAutomergeError('Only collaborators with write permission can use automerge.')
return None
def get_all(repo: GithubRepository, url_func: Callable[[int], str]) -> List[Any]:
"""Get all results, accounting for pagination.
Args:
repo: The github repo to call GET on.
url_func: A function from an integer page number to the url to get the result for that page.
Returns:
A list of the results by page.
Raises:
RuntimeError: If the request does not return status 200 (success).
"""
results: List[Any] = []
page = 0
has_next = True
while has_next:
url = url_func(page)
response = repo.get(url)
if response.status_code != 200:
raise RuntimeError(
f'Request failed to {url}. Code: {response.status_code}.'
f' Content: {response.content!r}.'
)
payload = json.JSONDecoder().decode(response.content.decode())
results += payload
has_next = 'link' in response.headers and 'rel="next"' in response.headers['link']
page += 1
return results
def check_auto_merge_labeler(
repo: GithubRepository, pull_id: int
) -> Optional[CannotAutomergeError]:
"""Checks whether the given pull request had an automerge id and user who added it was admin.
References:
https://developer.github.com/v3/issues/events/#list-events-for-an-issue
Args:
repo: The github repo to check.
pull_id: The github pull id to check.
Returns:
CannotAutomergeError if the automerge iid is missing or the user who added is not an admin.
"""
events = get_all(
repo,
lambda page: (
"https://api.github.com/repos/{}/{}/issues/{}/events"
"?per_page=100&page={}".format(repo.organization, repo.name, pull_id, page)
),
)
relevant = [
event
for event in events
if event['event'] == 'labeled' and event['label']['name'] in AUTO_MERGE_LABELS
]
if not relevant:
return CannotAutomergeError('"automerge" label was never added.')
return check_collaborator_has_write(repo, relevant[-1]['actor']['login'])
def add_comment(repo: GithubRepository, pull_id: int, text: str) -> None:
"""Add a comment to a pull request.
References:
https://developer.github.com/v3/issues/comments/#create-a-comment
Arg:
rep: The github repo whose pull request should have a comment added to.
pull_id: The id of the pull request to comment on.
text: The text of the comment.
Raises:
RuntimeError: If the request does not return status 201 (created).
"""
url = "https://api.github.com/repos/{}/{}/issues/{}/comments".format(
repo.organization, repo.name, pull_id
)
data = {'body': text}
response = repo.post(url, json=data)
if response.status_code != 201:
raise RuntimeError(
'Add comment failed. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
def edit_comment(repo: GithubRepository, text: str, comment_id: int) -> None:
"""Edits an existing github comment.
References:
https://developer.github.com/v3/issues/comments/#edit-a-comment
Args:
repo: The github repo that contains the comment.
text: The new comment text.
comment_id: The id of the comment to edit.
Raises:
RuntimeError: If the request does not return status 200 (success).
"""
url = "https://api.github.com/repos/{}/{}/issues/comments/{}".format(
repo.organization, repo.name, comment_id
)
data = {'body': text}
response = repo.patch(url, json=data)
if response.status_code != 200:
raise RuntimeError(
'Edit comment failed. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
def get_branch_details(repo: GithubRepository, branch: str) -> Any:
"""Get details about a github branch.
References:
https://developer.github.com/v3/repos/branches/#get-branch
Args:
repo: The github repo that has the branch.
branch: The name of the branch.
Returns:
The raw response to the query to get details.
Raises:
RuntimeError: If the request does not return status 200 (success).
"""
url = "https://api.github.com/repos/{}/{}/branches/{}".format(
repo.organization, repo.name, branch
)
response = repo.get(url)
if response.status_code != 200:
raise RuntimeError(
'Failed to get branch details. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
return json.JSONDecoder().decode(response.content.decode())
def get_pr_statuses(pr: PullRequestDetails) -> List[Dict[str, Any]]:
"""List the commit statuses of a specific pull request.
References:
https://developer.github.com/v3/repos/statuses/#list-statuses-for-a-specific-ref
Args:
pr: The pull request details.
Returns:
The raw response to the request.
Raises:
RuntimeError: If the request does not return status 200 (success).
"""
url = "https://api.github.com/repos/{}/{}/commits/{}/statuses".format(
pr.repo.organization, pr.repo.name, pr.branch_sha
)
response = pr.repo.get(url)
if response.status_code != 200:
raise RuntimeError(
'Get statuses failed. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
return json.JSONDecoder().decode(response.content.decode())
def get_pr_check_status(pr: PullRequestDetails) -> Any:
"""Get the combined status for a pull request.
References:
https://developer.github.com/v3/repos/statuses/#get-the-combined-status-for-a-specific-ref
Args:
pr: The pull request details.
Returns:
The raw response to the request.
Raises:
RuntimeError: If the request does not return status 200 (success).
"""
url = "https://api.github.com/repos/{}/{}/commits/{}/status".format(
pr.repo.organization, pr.repo.name, pr.branch_sha
)
response = pr.repo.get(url)
if response.status_code != 200:
raise RuntimeError(
'Get status failed. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
return json.JSONDecoder().decode(response.content.decode())
def classify_pr_status_check_state(pr: PullRequestDetails) -> Optional[bool]:
"""Classify the pull request status.
Args:
pr: The pull request whose status should be checked.
Returns:
True if the status is successful, False if the status has failed, and None if the
status is pending.
Raises:
RuntimeError: If the status state is of an unknown type.
"""
has_failed = False
has_pending = False
check_status = get_pr_check_status(pr)
state = check_status['state']
if state == 'failure':
has_failed = True
elif state == 'pending':
has_pending = True
elif state != 'success':
raise RuntimeError(f'Unrecognized status state: {state!r}')
check_data = get_pr_checks(pr)
for check in check_data['check_runs']:
if check['status'] != 'completed':
has_pending = True
elif check['conclusion'] != 'success':
has_failed = True
if has_failed:
return False
if has_pending:
return None
return True
def classify_pr_synced_state(pr: PullRequestDetails) -> Optional[bool]:
"""Get the mergeable state of the pull request.
References:
https://developer.github.com/v3/pulls/#get-a-single-pull-request
https://developer.github.com/v4/enum/mergestatestatus/
Args:
pr: The pull request to query for mergable state.
Returns:
True if the classification is clean, False if it is behind, and None otherwise.
"""
state = pr.payload['mergeable_state'].lower()
classification = {
'behind': False,
'clean': True,
}
return classification.get(state, None)
def get_pr_review_status(pr: PullRequestDetails, per_page: int = 100) -> Any:
"""Gets the review status of the pull request.
References:
https://developer.github.com/v3/pulls/reviews/#list-reviews-on-a-pull-request
Args:
pr: The pull reuqest whose review status will be checked.
per_page: The number of results to return per page.
Returns:
The full response from the review query.
Raises:
RuntimeError: If the request does not return status 200 (success).
"""
url = (
f"https://api.github.com/repos/{pr.repo.organization}/{pr.repo.name}"
f"/pulls/{pr.pull_id}/reviews"
f"?per_page={per_page}"
)
response = pr.repo.get(url)
if response.status_code != 200:
raise RuntimeError(
'Get review failed. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
return json.JSONDecoder().decode(response.content.decode())
def get_pr_checks(pr: PullRequestDetails) -> Dict[str, Any]:
"""List checks for a pull request.
References:
https://developer.github.com/v3/checks/runs/#list-check-runs-for-a-specific-ref
Args:
pr: The pull request to get checks for.
Returns:
The raw response of the request.
Raises:
RuntimeError: If the request does not return status 200 (success).
"""
url = (
f"https://api.github.com/repos/{pr.repo.organization}/{pr.repo.name}"
f"/commits/{pr.branch_sha}/check-runs?per_page=100"
)
response = pr.repo.get(url, headers={'Accept': 'application/vnd.github.antiope-preview+json'})
if response.status_code != 200:
raise RuntimeError(
'Get check-runs failed. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
return json.JSONDecoder().decode(response.content.decode())
_last_print_was_tick = False
_tick_count = 0
def log(*args):
global _last_print_was_tick
if _last_print_was_tick:
print()
_last_print_was_tick = False
print(*args)
def wait_for_polling_period():
global _last_print_was_tick
global _tick_count
_last_print_was_tick = True
print('.', end='', flush=True)
_tick_count += 1
if _tick_count == 100:
print()
_tick_count = 0
time.sleep(POLLING_PERIOD.total_seconds())
def absent_status_checks(pr: PullRequestDetails, master_data: Optional[Any] = None) -> Set[str]:
if pr.base_branch_name == 'master' and master_data is not None:
branch_data = master_data
else:
branch_data = get_branch_details(pr.repo, pr.base_branch_name)
status_data = get_pr_statuses(pr)
check_data = get_pr_checks(pr)
statuses_present = {status['context'] for status in status_data}
checks_present = {check['name'] for check in check_data['check_runs']}
reqs = branch_data['protection']['required_status_checks']['contexts']
return set(reqs) - statuses_present - checks_present
def get_repo_ref(repo: GithubRepository, ref: str) -> Dict[str, Any]:
"""Get a given github reference.
References:
https://developer.github.com/v3/git/refs/#get-a-reference
Args:
repo: The github repo to get the reference from.
ref: The id of the reference.
Returns:
The raw response of the request for the reference..
Raises:
RuntimeError: If the request does not return status 200 (success).
"""
url = f"https://api.github.com/repos/{repo.organization}/{repo.name}/git/refs/{ref}"
response = repo.get(url)
if response.status_code != 200:
raise RuntimeError(
'Refs get failed. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
payload = json.JSONDecoder().decode(response.content.decode())
return payload
def get_master_sha(repo: GithubRepository) -> str:
"""Get the sha hash for the given repo."""
ref = get_repo_ref(repo, 'heads/master')
return ref['object']['sha']
def list_pr_comments(repo: GithubRepository, pull_id: int) -> List[Dict[str, Any]]:
"""List comments for a given pull request.
References:
https://developer.github.com/v3/issues/comments/#list-comments-on-an-issue
Args:
repo: The github repo for the pull request.
pull_id: The id of the pull request.
Returns:
A list of the raw responses for the pull requests.
Raises:
RuntimeError: If the request does not return status 200 (success).
"""
url = "https://api.github.com/repos/{}/{}/issues/{}/comments".format(
repo.organization, repo.name, pull_id
)
response = repo.get(url)
if response.status_code != 200:
raise RuntimeError(
'Comments get failed. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
payload = json.JSONDecoder().decode(response.content.decode())
return payload
def delete_comment(repo: GithubRepository, comment_id: int) -> None:
"""Delete a comment.
References:
https://developer.github.com/v3/issues/comments/#delete-a-comment
Args:
repo: The github repo where the comment lives.
comment_id: The id of the comment to delete.
Raises:
RuntimeError: If the request does not return status 204 (no content).
"""
url = "https://api.github.com/repos/{}/{}/issues/comments/{}".format(
repo.organization, repo.name, comment_id
)
response = repo.delete(url)
if response.status_code != 204:
raise RuntimeError(
'Comment delete failed. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
def update_branch(pr: PullRequestDetails) -> Union[bool, CannotAutomergeError]:
"""Equivalent to hitting the 'update branch' button on a PR.
As of Feb 2020 this API feature is still in beta. Note that currently, if
you attempt to update branch when already synced to master, a vacuous merge
commit will be created.
References:
https://developer.github.com/v3/pulls/#update-a-pull-request-branch
Args:
pr: The pull request to update.
Returns:
True if the update was successful and CannotAutomergeError if it is not possible to
perform the update.
"""
url = (
f"https://api.github.com/repos/{pr.repo.organization}/{pr.repo.name}"
f"/pulls/{pr.pull_id}/update-branch"
)
data = {
'expected_head_sha': pr.branch_sha,
}
response = pr.repo.put(
url,
json=data,
# Opt into BETA feature.
headers={'Accept': 'application/vnd.github.lydian-preview+json'},
)
if response.status_code == 422:
return CannotAutomergeError(
"Failed to update branch (incorrect expected_head_sha).",
may_be_temporary=True,
)
if response.status_code != 202:
return CannotAutomergeError(
f"Unrecognized update-branch status code ({response.status_code}).",
)
return True
def attempt_sync_with_master(pr: PullRequestDetails) -> Union[bool, CannotAutomergeError]:
"""Sync a pull request with the master branch.
References:
https://developer.github.com/v3/repos/merging/#perform-a-merge
Args:
pr: The pull request to sync.
Returns:
True if the sync was successful and CannotAutomergeError if it was not possible to sync.
Raises:
RuntimeError: If the merge request returned a failed response.
"""
master_sha = get_master_sha(pr.repo)
remote = pr.remote_repo
url = f"https://api.github.com/repos/{remote.organization}/{remote.name}/merges"
data = {
'base': pr.branch_name,
'head': master_sha,
'commit_message': 'Update branch (automerge)',
}
response = pr.remote_repo.post(url, json=data)
if response.status_code == 201:
# Merge succeeded.
log(f'Synced #{pr.pull_id} ({pr.title!r}) with master.')
return True
if response.status_code == 204:
# Already merged.
return False
if response.status_code == 409:
# Merge conflict.
return CannotAutomergeError("There's a merge conflict.")
if response.status_code == 403:
# Permission denied.
return CannotAutomergeError(
"Spurious failure. Github API requires me to be an admin on the "
"fork repository to merge master into the PR branch. Hit "
"'Update Branch' for me before trying again."
)
raise RuntimeError(
'Sync with master failed for unknown reason. '
'Code: {}. Content: {!r}.'.format(response.status_code, response.content)
)
def attempt_squash_merge(pr: PullRequestDetails) -> Union[bool, CannotAutomergeError]:
"""Perform a squash merge on a pull request.
References:
https://developer.github.com/v3/pulls/#merge-a-pull-request-merge-button
Args:
pr: The pull request to squash merge.
Returns:
True if the squash merge was successful and CannotAutomergeError if the square merge
was not possible
Raises:
RuntimeError: If the request to merge returned a failed merge response.
"""
url = "https://api.github.com/repos/{}/{}/pulls/{}/merge".format(
pr.repo.organization, pr.repo.name, pr.pull_id
)
data = {
'commit_title': f'{pr.title} (#{pr.pull_id})',
'commit_message': pr.body,
'sha': pr.branch_sha,
'merge_method': 'squash',
}
response = pr.repo.put(url, json=data)
if response.status_code == 200:
# Merge succeeded.
log(f'Merged PR#{pr.pull_id} ({pr.title!r}):\n{indent(pr.body)}\n')
return True
if response.status_code == 405:
return CannotAutomergeError("Pull Request is not mergeable.")
if response.status_code == 409:
# Need to sync.
return False
raise RuntimeError(
f'Merge failed. Code: {response.status_code}. Content: {response.content!r}.'
)
def auto_delete_pr_branch(pr: PullRequestDetails) -> bool:
"""Delete a branch.
References:
https://developer.github.com/v3/git/refs/#delete-a-reference
Args:
pr: The pull request to delete.
Returns:
True of the delete was successful, False otherwise.
Raises:
RuntimeError: If the request does not return status 204 (no content).
"""
open_pulls = list_open_pull_requests(pr.repo, base_branch=pr.branch_name)
if any(open_pulls):
log(f'Not deleting branch {pr.branch_name!r}. It is used elsewhere.')
return False
remote = pr.remote_repo
if pr.is_on_fork():
log(
'Not deleting branch {!r}. It belongs to a fork ({}/{}).'.format(
pr.branch_name, pr.remote_repo.organization, pr.remote_repo.name
)
)
return False
url = "https://api.github.com/repos/{}/{}/git/refs/heads/{}".format(
remote.organization, remote.name, pr.branch_name
)
response = pr.repo.delete(url)
if response.status_code == 204:
# Delete succeeded.
log(f'Deleted branch {pr.branch_name!r}.')
return True
log(f'Delete failed. Code: {response.status_code}. Content: {response.content!r}.')
return False
def branch_data_modified_recently(payload: Any) -> bool:
"""Whether the branch was modified recently."""
modified_date = datetime.datetime.strptime(
payload['commit']['commit']['committer']['date'], '%Y-%m-%dT%H:%M:%SZ'
)
return is_recent_date(modified_date)
def add_labels_to_pr(repo: GithubRepository, pull_id: int, *labels: str) -> None:
"""Add lables to a pull request.
References:
https://developer.github.com/v3/issues/labels/#add-labels-to-an-issue
Args:
repo: The github repo where the pull request lives.
pull_id: The id of the pull request.
*labels: The labels to add to the pull request.
Raises:
RuntimeError: If the request to add labels returned anything other than success.
"""
url = "https://api.github.com/repos/{}/{}/issues/{}/labels".format(
repo.organization, repo.name, pull_id
)
response = repo.post(url, json=list(labels))
if response.status_code != 200:
raise RuntimeError(
'Add labels failed. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
def remove_label_from_pr(repo: GithubRepository, pull_id: int, label: str) -> bool:
"""Removes a label from a pull request.
References:
https://developer.github.com/v3/issues/labels/#remove-a-label-from-an-issue
Args:
repo: The github repo for the pull request.
pull_id: The id for the pull request.
label: The label to remove.
Raises:
RuntimeError: If the request does not return status 200 (success).
Returns:
True if the label existed and was deleted. False if the label did not exist.
"""
url = "https://api.github.com/repos/{}/{}/issues/{}/labels/{}".format(
repo.organization, repo.name, pull_id, label
)
response = repo.delete(url)
if response.status_code == 404:
payload = json.JSONDecoder().decode(response.content.decode())
if payload['message'] == 'Label does not exist':
return False
if response.status_code == 200:
# Removed the label.
return True
raise RuntimeError(
'Label remove failed. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
def list_open_pull_requests(
repo: GithubRepository, base_branch: Optional[str] = None, per_page: int = 100
) -> List[PullRequestDetails]:
"""List open pull requests.
Args:
repo: The github repo for the pull requests.
base_branch: The branch for which to request pull requests.
per_page: The number of results to obtain per page.
Returns:
A list of the pull requests.
Raises:
RuntimeError: If the request does not return status 200 (success).
"""
url = (
f"https://api.github.com/repos/{repo.organization}/{repo.name}/pulls"
f"?per_page={per_page}"
)
data = {
'state': 'open',
}
if base_branch is not None:
data['base'] = base_branch
response = repo.get(url, json=data)
if response.status_code != 200:
raise RuntimeError(
'List pulls failed. Code: {}. Content: {!r}.'.format(
response.status_code, response.content
)
)
pulls = json.JSONDecoder().decode(response.content.decode())
results = [PullRequestDetails(pull, repo) for pull in pulls]
# Filtering via the API doesn't seem to work, so we do it ourselves.
if base_branch is not None:
results = [result for result in results if result.base_branch_name == base_branch]
return results
def find_auto_mergeable_prs(repo: GithubRepository) -> List[int]:
open_prs = list_open_pull_requests(repo)
auto_mergeable_prs = [pr for pr in open_prs if pr.marked_automergeable]
return [pr.payload['number'] for pr in auto_mergeable_prs]
def find_problem_with_automergeability_of_pr(
pr: PullRequestDetails, master_branch_data: Any
) -> Optional[CannotAutomergeError]:
# Sanity.
if pr.payload['state'] != 'open':
return CannotAutomergeError('Not an open pull request.')
if pr.base_branch_name != 'master':
return CannotAutomergeError('Can only automerge into master.')
if pr.payload['mergeable_state'] == 'dirty':
return CannotAutomergeError('There are merge conflicts.')
# If a user removes the automerge label, remove the head label for them.
if pr.has_label(HEAD_AUTO_MERGE_LABEL) and not pr.has_label(USER_AUTO_MERGE_LABEL):
return CannotAutomergeError(
f'The {USER_AUTO_MERGE_LABEL} label was removed.', may_be_temporary=True
)
# Only collaborators with write access can use the automerge labels.
label_problem = check_auto_merge_labeler(pr.repo, pr.pull_id)
if label_problem is not None:
return label_problem
# Check review status.
review_status = get_pr_review_status(pr)
if not any(review['state'] == 'APPROVED' for review in review_status):
return CannotAutomergeError('No approved review.')
if any(review['state'] == 'REQUEST_CHANGES' for review in review_status):
return CannotAutomergeError('A review is requesting changes.')
# Any failing status checks?
status_check_state = classify_pr_status_check_state(pr)
if status_check_state is False:
return CannotAutomergeError('A status check is failing.')
# Some issues can only be detected after waiting a bit.
if not pr.modified_recently:
# Nothing is setting a required status check.
missing_statuses = absent_status_checks(pr, master_branch_data)
if missing_statuses:
return CannotAutomergeError(
'A required status check is not present.\n\n'
'Missing statuses: {!r}'.format(sorted(missing_statuses))
)
# Can't figure out how to make it merge.
if pr.payload['mergeable_state'] == 'blocked':
if status_check_state is True:
return CannotAutomergeError(
"Merging is blocked (I don't understand why).", may_be_temporary=True
)
if pr.payload['mergeable'] is False:
return CannotAutomergeError(
"PR isn't classified as mergeable (I don't understand why).", may_be_temporary=True
)
return None
def cannot_merge_pr(pr: PullRequestDetails, reason: CannotAutomergeError):
log(f'Cancelled automerge of PR#{pr.pull_id} ({pr.title!r}): {reason.args[0]}')
add_comment(pr.repo, pr.pull_id, f'Automerge cancelled: {reason}')
for label in AUTO_MERGE_LABELS:
if pr.has_label(label):
remove_label_from_pr(pr.repo, pr.pull_id, label)
def drop_temporary(
pr: PullRequestDetails,
problem: Optional[CannotAutomergeError],
prev_seen_times: Dict[int, datetime.datetime],
next_seen_times: Dict[int, datetime.datetime],
) -> Optional[CannotAutomergeError]:
"""Filters out problems that may be temporary."""
if problem is not None and problem.may_be_temporary:
since = prev_seen_times.get(pr.pull_id, datetime.datetime.utcnow())
if is_recent_date(since):
next_seen_times[pr.pull_id] = since
return None
return problem
def gather_auto_mergeable_prs(
repo: GithubRepository, problem_seen_times: Dict[int, datetime.datetime]
) -> List[PullRequestDetails]:
result = []
raw_prs = list_open_pull_requests(repo)
master_branch_data = get_branch_details(repo, 'master')
if branch_data_modified_recently(master_branch_data):
return []
prev_seen_times = dict(problem_seen_times)
problem_seen_times.clear()
for raw_pr in raw_prs:
if not raw_pr.marked_automergeable:
continue
# Looking up a single PR gives more data, e.g. the 'mergeable' entry.
pr = PullRequestDetails.from_github(repo, raw_pr.pull_id)
problem = find_problem_with_automergeability_of_pr(pr, master_branch_data)
if problem is None:
result.append(pr)
persistent_problem = drop_temporary(
pr, problem, prev_seen_times=prev_seen_times, next_seen_times=problem_seen_times
)
if persistent_problem is not None:
cannot_merge_pr(pr, persistent_problem)
return result
def merge_desirability(pr: PullRequestDetails) -> Any:
synced = classify_pr_synced_state(pr) is True
tested = synced and (classify_pr_status_check_state(pr) is True)
forked = pr.is_on_fork()
# 1. Prefer to merge already-synced PRs. This minimizes the number of builds
# performed by travis.
# 2. Prefer to merge synced PRs from forks. This minimizes manual labor;
# currently the bot can't resync these PRs. Secondarily, avoid unsynced
# PRs from forks until necessary because they will fail when hit.
# 3. Prefer to merge PRs where the status checks have already completed.
# This is just faster, because the next build can be started sooner.
# 4. Use seniority as a tie breaker.
# Desired order is:
# TF
# SF
# T_
# S_
# __
# _F
# (S = synced, T = tested, F = forked.)
if forked:
if tested:
rank = 5
elif synced:
rank = 4
else:
rank = 0
else:
if tested:
rank = 3
elif synced:
rank = 2
else:
rank = 1
return rank, -pr.pull_id
def pick_head_pr(active_prs: List[PullRequestDetails]) -> Optional[PullRequestDetails]:
if not active_prs:
return None
for pr in sorted(active_prs, key=merge_desirability, reverse=True):
if pr.has_label(HEAD_AUTO_MERGE_LABEL):
return pr
promoted = max(active_prs, key=merge_desirability)
log(f'Front of queue: PR#{promoted.pull_id} ({promoted.title!r})')
add_labels_to_pr(promoted.repo, promoted.pull_id, HEAD_AUTO_MERGE_LABEL)
return promoted
def merge_duty_cycle(
repo: GithubRepository, persistent_temporary_problems: Dict[int, datetime.datetime]
):
"""Checks and applies auto merge labeling operations."""
active_prs = gather_auto_mergeable_prs(repo, persistent_temporary_problems)
head_pr = pick_head_pr(active_prs)
if head_pr is None:
return
state = classify_pr_synced_state(head_pr)
if state is False:
result = update_branch(head_pr)
elif state is True:
result = attempt_squash_merge(head_pr)
if result is True:
auto_delete_pr_branch(head_pr)
for label in AUTO_MERGE_LABELS:
remove_label_from_pr(repo, head_pr.pull_id, label)
else:
# `gather_auto_mergeable_prs` is responsible for this case.
result = False
if isinstance(result, CannotAutomergeError):
cannot_merge_pr(head_pr, result)
def label_duty_cycle(repo: GithubRepository):
"""Checks and applies size labeling operations."""
open_prs = list_open_pull_requests(repo)
size_unlabeled_prs = [pr for pr in open_prs if not pr.marked_size]
for pr in size_unlabeled_prs:
full_pr_data = PullRequestDetails.from_github(repo, pr.pull_id)
new_label = get_pr_size_label(full_pr_data.tot_changes)
log(f'Adding size label {new_label} to #{full_pr_data.pull_id} ({full_pr_data.title!r}).')
add_labels_to_pr(repo, pr.pull_id, new_label)
def indent(text: str) -> str:
return ' ' + text.replace('\n', '\n ')
def main():
access_token = os.getenv(ACCESS_TOKEN_ENV_VARIABLE)
if not access_token:
project_id = 'cirq-infra'
print(f'{ACCESS_TOKEN_ENV_VARIABLE} not set. Trying secret manager.', file=sys.stderr)
client = secretmanager_v1beta1.SecretManagerServiceClient()
secret_name = f'projects/{project_id}/secrets/cirq-bot-api-key/versions/1'
response = client.access_secret_version(name=secret_name)
access_token = response.payload.data.decode('UTF-8')
repo = GithubRepository(
organization=GITHUB_REPO_ORGANIZATION, name=GITHUB_REPO_NAME, access_token=access_token
)
log('Watching for automergeable PRs.')
problem_seen_times: Dict[int, datetime.datetime] = {}
while True:
try:
merge_duty_cycle(repo, problem_seen_times)
label_duty_cycle(repo)
except Exception: # Anything but a keyboard interrupt / system exit.
traceback.print_exc()
wait_for_polling_period()
if __name__ == '__main__':
main()
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class sslpolicy(base_resource) :
""" Configuration for SSL policy resource. """
def __init__(self) :
self._name = ""
self._rule = ""
self._reqaction = ""
self._action = ""
self._undefaction = ""
self._comment = ""
self._hits = 0
self._undefhits = 0
self._description = ""
self._policytype = ""
self.___count = 0
@property
def name(self) :
ur"""Name for the new SSL policy. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the policy is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my policy" or 'my policy').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name for the new SSL policy. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the policy is created.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my policy" or 'my policy').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def rule(self) :
ur"""Expression, against which traffic is evaluated. Written in the classic or default syntax.
Note:
Maximum length of a string literal in the expression is 255 characters. A longer string can be split into smaller strings of up to 255 characters each, and the smaller strings concatenated with the + operator. For example, you can create a 500-character string as follows: '"<string of 255 characters>" + "<string of 245 characters>"'
(Classic expressions are not supported in the cluster build.)
The following requirements apply only to the NetScaler CLI:
* If the expression includes one or more spaces, enclose the entire expression in double quotation marks.
* If the expression itself includes double quotation marks, escape the quotations by using the character.
* Alternatively, you can use single quotation marks to enclose the rule, in which case you do not have to escape the double quotation marks.
"""
try :
return self._rule
except Exception as e:
raise e
@rule.setter
def rule(self, rule) :
ur"""Expression, against which traffic is evaluated. Written in the classic or default syntax.
Note:
Maximum length of a string literal in the expression is 255 characters. A longer string can be split into smaller strings of up to 255 characters each, and the smaller strings concatenated with the + operator. For example, you can create a 500-character string as follows: '"<string of 255 characters>" + "<string of 245 characters>"'
(Classic expressions are not supported in the cluster build.)
The following requirements apply only to the NetScaler CLI:
* If the expression includes one or more spaces, enclose the entire expression in double quotation marks.
* If the expression itself includes double quotation marks, escape the quotations by using the character.
* Alternatively, you can use single quotation marks to enclose the rule, in which case you do not have to escape the double quotation marks.
"""
try :
self._rule = rule
except Exception as e:
raise e
@property
def reqaction(self) :
ur"""The name of the action to be performed on the request. Refer to 'add ssl action' command to add a new action. Builtin actions like NOOP, RESET, DROP, CLIENTAUTH and NOCLIENTAUTH are also allowed.<br/>Minimum length = 1.
"""
try :
return self._reqaction
except Exception as e:
raise e
@reqaction.setter
def reqaction(self, reqaction) :
ur"""The name of the action to be performed on the request. Refer to 'add ssl action' command to add a new action. Builtin actions like NOOP, RESET, DROP, CLIENTAUTH and NOCLIENTAUTH are also allowed.<br/>Minimum length = 1
"""
try :
self._reqaction = reqaction
except Exception as e:
raise e
@property
def action(self) :
ur"""Name of the built-in or user-defined action to perform on the request. Available built-in actions are NOOP, RESET, DROP, CLIENTAUTH, and NOCLIENTAUTH.
"""
try :
return self._action
except Exception as e:
raise e
@action.setter
def action(self, action) :
ur"""Name of the built-in or user-defined action to perform on the request. Available built-in actions are NOOP, RESET, DROP, CLIENTAUTH, and NOCLIENTAUTH.
"""
try :
self._action = action
except Exception as e:
raise e
@property
def undefaction(self) :
ur"""Name of the action to be performed when the result of rule evaluation is undefined. Possible values for control policies: CLIENTAUTH, NOCLIENTAUTH, NOOP, RESET, DROP. Possible values for data policies: NOOP, RESET or DROP.
"""
try :
return self._undefaction
except Exception as e:
raise e
@undefaction.setter
def undefaction(self, undefaction) :
ur"""Name of the action to be performed when the result of rule evaluation is undefined. Possible values for control policies: CLIENTAUTH, NOCLIENTAUTH, NOOP, RESET, DROP. Possible values for data policies: NOOP, RESET or DROP.
"""
try :
self._undefaction = undefaction
except Exception as e:
raise e
@property
def comment(self) :
ur"""Any comments associated with this policy.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
ur"""Any comments associated with this policy.
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def hits(self) :
ur"""Number of hits for this policy.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def undefhits(self) :
ur"""Number of Undef hits.
"""
try :
return self._undefhits
except Exception as e:
raise e
@property
def description(self) :
ur"""Description of the policy.
"""
try :
return self._description
except Exception as e:
raise e
@property
def policytype(self) :
ur""".<br/>Possible values = Classic Policy, Advanced Policy.
"""
try :
return self._policytype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(sslpolicy_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.sslpolicy
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add sslpolicy.
"""
try :
if type(resource) is not list :
addresource = sslpolicy()
addresource.name = resource.name
addresource.rule = resource.rule
addresource.reqaction = resource.reqaction
addresource.action = resource.action
addresource.undefaction = resource.undefaction
addresource.comment = resource.comment
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ sslpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].rule = resource[i].rule
addresources[i].reqaction = resource[i].reqaction
addresources[i].action = resource[i].action
addresources[i].undefaction = resource[i].undefaction
addresources[i].comment = resource[i].comment
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete sslpolicy.
"""
try :
if type(resource) is not list :
deleteresource = sslpolicy()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ sslpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ sslpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update sslpolicy.
"""
try :
if type(resource) is not list :
updateresource = sslpolicy()
updateresource.name = resource.name
updateresource.rule = resource.rule
updateresource.action = resource.action
updateresource.undefaction = resource.undefaction
updateresource.comment = resource.comment
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ sslpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].rule = resource[i].rule
updateresources[i].action = resource[i].action
updateresources[i].undefaction = resource[i].undefaction
updateresources[i].comment = resource[i].comment
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of sslpolicy resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = sslpolicy()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ sslpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ sslpolicy() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the sslpolicy resources that are configured on netscaler.
"""
try :
if not name :
obj = sslpolicy()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = sslpolicy()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [sslpolicy() for _ in range(len(name))]
obj = [sslpolicy() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = sslpolicy()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of sslpolicy resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslpolicy()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the sslpolicy resources configured on NetScaler.
"""
try :
obj = sslpolicy()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of sslpolicy resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslpolicy()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Policytype:
Classic_Policy = "Classic Policy"
Advanced_Policy = "Advanced Policy"
class sslpolicy_response(base_response) :
def __init__(self, length=1) :
self.sslpolicy = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.sslpolicy = [sslpolicy() for _ in range(length)]
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cql3handling import simple_cql_types
class CQLHelpTopics(object):
def get_help_topics(self):
return [t[5:] for t in dir(self) if t.startswith('help_')]
def print_help_topic(self, topic):
getattr(self, 'help_' + topic.lower())()
def help_types(self):
print "\n CQL types recognized by this version of cqlsh:\n"
for t in simple_cql_types:
print ' ' + t
print """
For information on the various recognizable input formats for these
types, or on controlling the formatting of cqlsh query output, see
one of the following topics:
HELP TIMESTAMP_INPUT
HELP DATE_INPUT
HELP TIME_INPUT
HELP BLOB_INPUT
HELP UUID_INPUT
HELP BOOLEAN_INPUT
HELP INT_INPUT
HELP TEXT_OUTPUT
HELP TIMESTAMP_OUTPUT
"""
def help_timestamp_input(self):
print """
Timestamp input
CQL supports any of the following ISO 8601 formats for timestamp
specification:
yyyy-mm-dd HH:mm
yyyy-mm-dd HH:mm:ss
yyyy-mm-dd HH:mmZ
yyyy-mm-dd HH:mm:ssZ
yyyy-mm-dd'T'HH:mm
yyyy-mm-dd'T'HH:mmZ
yyyy-mm-dd'T'HH:mm:ss
yyyy-mm-dd'T'HH:mm:ssZ
yyyy-mm-dd
yyyy-mm-ddZ
The Z in these formats refers to an RFC-822 4-digit time zone,
expressing the time zone's difference from UTC. For example, a
timestamp in Pacific Standard Time might be given thus:
2012-01-20 16:14:12-0800
If no time zone is supplied, the current time zone for the Cassandra
server node will be used.
"""
def help_date_input(self):
print """
Date input
CQL supports the following format for date specification:
yyyy-mm-dd
"""
def help_time_input(self):
print """
Time input
CQL supports the following format for time specification:
HH:MM:SS
HH:MM:SS.mmm
HH:MM:SS.mmmuuu
HH:MM:SS.mmmuuunnn
"""
def help_blob_input(self):
print """
Blob input
CQL blob data must be specified in a string literal as hexidecimal
data. Example: to store the ASCII values for the characters in the
string "CQL", use '43514c'.
"""
def help_uuid_input(self):
print """
UUID input
UUIDs may be specified in CQL using 32 hexidecimal characters,
split up using dashes in the standard UUID format:
XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
"""
def help_boolean_input(self):
print """
Boolean input
CQL accepts the strings 'true' and 'false' (case insensitive)
as input for boolean types.
"""
def help_int_input(self):
print """
Integer input
CQL accepts the following integer types:
tinyint - 1-byte signed integer
smallint - 2-byte signed integer
int - 4-byte signed integer
bigint - 8-byte signed integer
"""
def help_timestamp_output(self):
print """
Timestamp output
Cqlsh will display timestamps in the following format by default:
yyyy-mm-dd HH:mm:ssZ
which is a format acceptable as CQL timestamp input as well.
The output format can be changed by setting 'time_format' property
in the [ui] section of .cqlshrc file.
"""
def help_text_output(self):
print """
Textual output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
help_ascii_output = help_text_output
def help_create_index(self):
print """
CREATE INDEX [<indexname>] ON <cfname> ( <colname> );
A CREATE INDEX statement is used to create a new, automatic secondary
index on the given CQL table, for the named column. A name for the
index itself can be specified before the ON keyword, if desired. A
single column name must be specified inside the parentheses. It is not
necessary for the column to exist on any current rows (Cassandra is
schema-optional), but the column must already have a type (specified
during the CREATE TABLE, or added afterwards with ALTER TABLE).
"""
def help_drop(self):
print """
There are different variants of DROP. For more information, see
one of the following:
HELP DROP_KEYSPACE;
HELP DROP_TABLE;
HELP DROP_INDEX;
HELP DROP_FUNCTION;
HELP DROP_AGGREGATE;
"""
def help_drop_keyspace(self):
print """
DROP KEYSPACE <keyspacename>;
A DROP KEYSPACE statement results in the immediate, irreversible
removal of a keyspace, including all column families in it, and all
data contained in those column families.
"""
def help_drop_table(self):
print """
DROP TABLE <tablename>;
A DROP TABLE statement results in the immediate, irreversible
removal of a CQL table and the underlying column family, including all
data contained in it.
"""
help_drop_columnfamily = help_drop_table
def help_drop_index(self):
print """
DROP INDEX <indexname>;
A DROP INDEX statement is used to drop an existing secondary index.
"""
def help_drop_function(self):
print """
DROP FUNCTION ( IF EXISTS )?
( <keyspace> '.' )? <function-name>
( '(' <arg-type> ( ',' <arg-type> )* ')' )?
DROP FUNCTION statement removes a function created using CREATE FUNCTION.
You must specify the argument types (signature) of the function to drop if there
are multiple functions with the same name but a different signature
(overloaded functions).
DROP FUNCTION with the optional IF EXISTS keywords drops a function if it exists.
"""
def help_drop_aggregate(self):
print """
DROP AGGREGATE ( IF EXISTS )?
( <keyspace> '.' )? <aggregate-name>
( '(' <arg-type> ( ',' <arg-type> )* ')' )?
The DROP AGGREGATE statement removes an aggregate created using CREATE AGGREGATE.
You must specify the argument types of the aggregate to drop if there are multiple
aggregates with the same name but a different signature (overloaded aggregates).
DROP AGGREGATE with the optional IF EXISTS keywords drops an aggregate if it exists,
and does nothing if a function with the signature does not exist.
Signatures for user-defined aggregates follow the same rules as for
user-defined functions.
"""
def help_truncate(self):
print """
TRUNCATE <tablename>;
TRUNCATE accepts a single argument for the table name, and permanently
removes all data from it.
"""
def help_create(self):
print """
There are different variants of CREATE. For more information, see
one of the following:
HELP CREATE_KEYSPACE;
HELP CREATE_TABLE;
HELP CREATE_INDEX;
HELP CREATE_FUNCTION;
HELP CREATE_AGGREGATE;
"""
def help_use(self):
print """
USE <keyspacename>;
Tells cqlsh and the connected Cassandra instance that you will be
working in the given keyspace. All subsequent operations on tables
or indexes will be in the context of this keyspace, unless otherwise
specified, until another USE command is issued or the connection
terminates.
As always, when a keyspace name does not work as a normal identifier or
number, it can be quoted using double quotes.
"""
def help_create_aggregate(self):
print """
CREATE ( OR REPLACE )? AGGREGATE ( IF NOT EXISTS )?
( <keyspace> '.' )? <aggregate-name>
'(' <arg-type> ( ',' <arg-type> )* ')'
SFUNC ( <keyspace> '.' )? <state-functionname>
STYPE <state-type>
( FINALFUNC ( <keyspace> '.' )? <final-functionname> )?
( INITCOND <init-cond> )?
CREATE AGGREGATE creates or replaces a user-defined aggregate.
CREATE AGGREGATE with the optional OR REPLACE keywords either creates an aggregate
or replaces an existing one with the same signature. A CREATE AGGREGATE without
OR REPLACE fails if an aggregate with the same signature already exists.
CREATE AGGREGATE with the optional IF NOT EXISTS keywords either creates an aggregate
if it does not already exist.
OR REPLACE and IF NOT EXIST cannot be used together.
Aggregates belong to a keyspace. If no keyspace is specified in <aggregate-name>, the
current keyspace is used (i.e. the keyspace specified using the USE statement). It is
not possible to create a user-defined aggregate in one of the system keyspaces.
Signatures for user-defined aggregates follow the same rules as for
user-defined functions.
STYPE defines the type of the state value and must be specified.
The optional INITCOND defines the initial state value for the aggregate. It defaults
to null. A non-null INITCOND must be specified for state functions that are declared
with RETURNS NULL ON NULL INPUT.
SFUNC references an existing function to be used as the state modifying function. The
type of first argument of the state function must match STYPE. The remaining argument
types of the state function must match the argument types of the aggregate function.
State is not updated for state functions declared with RETURNS NULL ON NULL INPUT and
called with null.
The optional FINALFUNC is called just before the aggregate result is returned. It must
take only one argument with type STYPE. The return type of the FINALFUNC may be a
different type. A final function declared with RETURNS NULL ON NULL INPUT means that
the aggregate's return value will be null, if the last state is null.
If no FINALFUNC is defined, the overall return type of the aggregate function is STYPE.
If a FINALFUNC is defined, it is the return type of that function.
"""
def help_create_function(self):
print """
CREATE ( OR REPLACE )? FUNCTION ( IF NOT EXISTS )?
( <keyspace> '.' )? <function-name>
'(' <arg-name> <arg-type> ( ',' <arg-name> <arg-type> )* ')'
( CALLED | RETURNS NULL ) ON NULL INPUT
RETURNS <type>
LANGUAGE <language>
AS <body>
CREATE FUNCTION creates or replaces a user-defined function.
Signatures are used to distinguish individual functions. The signature consists of:
The fully qualified function name - i.e keyspace plus function-name
The concatenated list of all argument types
Note that keyspace names, function names and argument types are subject to the default
naming conventions and case-sensitivity rules.
CREATE FUNCTION with the optional OR REPLACE keywords either creates a function or
replaces an existing one with the same signature. A CREATE FUNCTION without OR REPLACE
fails if a function with the same signature already exists.
Behavior on invocation with null values must be defined for each function. There are
two options:
RETURNS NULL ON NULL INPUT declares that the function will always return null if any
of the input arguments is null. CALLED ON NULL INPUT declares that the function will
always be executed.
If the optional IF NOT EXISTS keywords are used, the function will only be created if
another function with the same signature does not exist.
OR REPLACE and IF NOT EXIST cannot be used together.
Functions belong to a keyspace. If no keyspace is specified in <function-name>, the
current keyspace is used (i.e. the keyspace specified using the USE statement).
It is not possible to create a user-defined function in one of the system keyspaces.
"""
def help_create_table(self):
print """
CREATE TABLE <cfname> ( <colname> <type> PRIMARY KEY [,
<colname> <type> [, ...]] )
[WITH <optionname> = <val> [AND <optionname> = <val> [...]]];
CREATE TABLE statements create a new CQL table under the current
keyspace. Valid table names are strings of alphanumeric characters and
underscores, which begin with a letter.
Each table requires a primary key, which will correspond to the
underlying columnfamily key and key validator. It's important to
note that the key type you use must be compatible with the partitioner
in use. For example, OrderPreservingPartitioner and
CollatingOrderPreservingPartitioner both require UTF-8 keys.
In cql3 mode, a table can have multiple columns composing the primary
key (see HELP COMPOUND_PRIMARY_KEYS).
For more information, see one of the following:
HELP CREATE_TABLE_TYPES;
HELP CREATE_TABLE_OPTIONS;
"""
help_create_columnfamily = help_create_table
def help_compound_primary_keys(self):
print """
CREATE TABLE <cfname> ( <partition_key> <type>, <clustering_key1> type, <clustering_key2> type,
[, ...]], PRIMARY KEY (<partition_key>, <clustering_key1>, <clustering_key2>);
CREATE TABLE allows a primary key composed of multiple columns. When this is the case, specify
the columns that take part in the compound key after all columns have been specified.
, PRIMARY KEY( <key1>, <key2>, ... )
The partitioning key itself can be a compound key, in which case the first element of the PRIMARY KEY
phrase should be parenthesized, as
PRIMARY KEY ((<partition_key_part1>, <partition_key_part2>), <clustering_key>)
"""
def help_create_table_types(self):
print """
CREATE TABLE: Specifying column types
CREATE ... (KEY <type> PRIMARY KEY,
othercol <type>) ...
It is possible to assign columns a type during table creation. Columns
configured with a type are validated accordingly when a write occurs,
and intelligent CQL drivers and interfaces will be able to decode the
column values correctly when receiving them. Column types are specified
as a parenthesized, comma-separated list of column term and type pairs.
See HELP TYPES; for the list of recognized types.
"""
help_create_columnfamily_types = help_create_table_types
def help_create_table_options(self):
print """
CREATE TABLE: Specifying columnfamily options
CREATE TABLE blah (...)
WITH optionname = val AND otheroption = val2;
A number of optional keyword arguments can be supplied to control the
configuration of a new CQL table, such as the size of the associated
row and key caches for the underlying Cassandra columnfamily. Consult
your CQL reference for the complete list of options and possible
values.
"""
help_create_columnfamily_options = help_create_table_options
def help_alter_alter(self):
print """
ALTER TABLE: altering existing typed columns
ALTER TABLE addamsFamily ALTER lastKnownLocation TYPE uuid;
ALTER TABLE ... ALTER changes the expected storage type for a column.
The column must already have a type in the column family metadata. The
column may or may not already exist in current rows-- but be aware that
no validation of existing data is done. The bytes stored in values for
that column will remain unchanged, and if existing data is not
deserializable according to the new type, this may cause your CQL
driver or interface to report errors.
"""
def help_alter_add(self):
print """
ALTER TABLE: adding a typed column
ALTER TABLE addamsFamily ADD gravesite varchar;
The ALTER TABLE ... ADD variant adds a typed column to a column
family. The column must not already have a type in the column family
metadata. See the warnings on HELP ALTER_ALTER regarding the lack of
validation of existing data; they apply here as well.
"""
def help_alter_drop(self):
print """
ALTER TABLE: dropping a typed column
ALTER TABLE addamsFamily DROP gender;
An ALTER TABLE ... DROP statement removes the type of a column
from the column family metadata. Note that this does _not_ remove the
column from current rows; it just removes the metadata saying that the
bytes stored under that column are expected to be deserializable
according to a certain type.
"""
def help_alter_with(self):
print """
ALTER TABLE: changing column family properties
ALTER TABLE addamsFamily WITH comment = 'Glad to be here!'
AND read_repair_chance = 0.2;
An ALTER TABLE ... WITH statement makes adjustments to the
table properties, as defined when the table was created (see
HELP CREATE_TABLE_OPTIONS and your Cassandra documentation for
information about the supported parameter names and values).
"""
def help_delete_columns(self):
print """
DELETE: specifying columns
DELETE col1, col2, col3 FROM ...
Following the DELETE keyword is an optional comma-delimited list of
column name terms. When no column names are given, the remove applies
to the entire row(s) matched by the WHERE clause.
When column names do not parse as valid CQL identifiers, they can be
quoted in single quotes (CQL 2) or double quotes (CQL 3).
"""
def help_delete_where(self):
print """
DELETE: specifying rows
DELETE ... WHERE keycol = 'some_key_value';
DELETE ... WHERE keycol1 = 'val1' AND keycol2 = 'val2';
DELETE ... WHERE keycol IN (key1, key2);
The WHERE clause is used to determine to which row(s) a DELETE
applies. The first form allows the specification of a precise row
by specifying a particular primary key value (if the primary key has
multiple columns, values for each must be given). The second form
allows a list of key values to be specified using the IN operator
and a parenthesized list of comma-delimited key values.
"""
def help_update_set(self):
print """
UPDATE: Specifying Columns and Row
UPDATE ... SET name1 = value1, name2 = value2
WHERE <key> = keyname;
UPDATE ... SET name1 = value1, name2 = value2
WHERE <key> IN ('<key1>', '<key2>', ...)
Rows are created or updated by supplying column names and values in
term assignment format. Multiple columns can be set by separating the
name/value pairs using commas.
"""
def help_update_counters(self):
print """
UPDATE: Updating Counter Columns
UPDATE ... SET name1 = name1 + <value> ...
UPDATE ... SET name1 = name1 - <value> ...
Counter columns can be incremented or decremented by an arbitrary
numeric value though the assignment of an expression that adds or
subtracts the value.
"""
def help_update_where(self):
print """
UPDATE: Selecting rows to update
UPDATE ... WHERE <keyname> = <keyval>;
UPDATE ... WHERE <keyname> IN (<keyval1>, <keyval2>, ...);
UPDATE ... WHERE <keycol1> = <keyval1> AND <keycol2> = <keyval2>;
Each update statement requires a precise set of keys to be specified
using a WHERE clause.
If the table's primary key consists of multiple columns, an explicit
value must be given for each for the UPDATE statement to make sense.
"""
def help_select_table(self):
print """
SELECT: Specifying Table
SELECT ... FROM [<keyspace>.]<tablename> ...
The FROM clause is used to specify the CQL table applicable to a SELECT
query. The keyspace in which the table exists can optionally be
specified along with the table name, separated by a dot (.). This will
not change the current keyspace of the session (see HELP USE).
"""
help_select_columnfamily = help_select_table
def help_select_where(self):
print """
SELECT: Filtering rows
SELECT ... WHERE <key> = keyname AND name1 = value1
SELECT ... WHERE <key> >= startkey and <key> =< endkey AND name1 = value1
SELECT ... WHERE <key> IN ('<key>', '<key>', '<key>', ...)
The WHERE clause provides for filtering the rows that appear in
results. The clause can filter on a key name, or range of keys, and in
the case of indexed columns, on column values. Key filters are
specified using the KEY keyword or key alias name, a relational
operator (one of =, >, >=, <, and <=), and a term value. When terms
appear on both sides of a relational operator it is assumed the filter
applies to an indexed column. With column index filters, the term on
the left of the operator is the name, the term on the right is the
value to filter _on_.
Note: The greater-than and less-than operators (> and <) result in key
ranges that are inclusive of the terms. There is no supported notion of
"strictly" greater-than or less-than; these operators are merely
supported as aliases to >= and <=.
"""
def help_select_limit(self):
print """
SELECT: Limiting results
SELECT ... WHERE <clause> [LIMIT n] ...
Limiting the number of rows returned can be achieved by adding the
LIMIT option to a SELECT expression. LIMIT defaults to 10,000 when left
unset.
"""
class CQL3HelpTopics(CQLHelpTopics):
def help_create_keyspace(self):
print """
CREATE KEYSPACE <ksname>
WITH replication = {'class':'<strategy>' [,'<option>':<val>]};
The CREATE KEYSPACE statement creates a new top-level namespace (aka
"keyspace"). Valid names are any string constructed of alphanumeric
characters and underscores. Names which do not work as valid
identifiers or integers should be quoted as string literals. Properties
such as replication strategy and count are specified during creation
as key-value pairs in the 'replication' map:
class [required]: The name of the replication strategy class
which should be used for the new keyspace. Some often-used classes
are SimpleStrategy and NetworkTopologyStrategy.
other options [optional]: Most strategies require additional arguments
which can be supplied as key-value pairs in the 'replication' map.
Examples:
To create a keyspace with NetworkTopologyStrategy and strategy option of "DC1"
with a value of "1" and "DC2" with a value of "2" you would use
the following statement:
CREATE KEYSPACE <ksname>
WITH replication = {'class':'NetworkTopologyStrategy', 'DC1':1, 'DC2':2};
To create a keyspace with SimpleStrategy and "replication_factor" option
with a value of "3" you would use this statement:
CREATE KEYSPACE <ksname>
WITH replication = {'class':'SimpleStrategy', 'replication_factor':3};
"""
def help_begin(self):
print """
BEGIN [UNLOGGED|COUNTER] BATCH [USING TIMESTAMP <timestamp>]
<insert or update or delete statement> ;
[ <another insert or update or delete statement ;
[...]]
APPLY BATCH;
BATCH supports setting a client-supplied optional global timestamp
which will be used for each of the operations included in the batch.
Only data modification statements (specifically, UPDATE, INSERT,
and DELETE) are allowed in a BATCH statement. BATCH is _not_ an
analogue for SQL transactions.
_NOTE: Counter mutations are allowed only within COUNTER batches._
_NOTE: While there are no isolation guarantees, UPDATE queries are
atomic within a given record._
"""
help_apply = help_begin
def help_select(self):
print """
SELECT <selectExpr>
FROM [<keyspace>.]<table>
[WHERE <clause>]
[ORDER BY <colname> [DESC]]
[LIMIT m];
SELECT is used to read one or more records from a CQL table. It returns
a set of rows matching the selection criteria specified.
For more information, see one of the following:
HELP SELECT_EXPR
HELP SELECT_TABLE
HELP SELECT_WHERE
HELP SELECT_LIMIT
"""
def help_delete(self):
print """
DELETE [<col1> [, <col2>, ...] FROM [<keyspace>.]<tablename>
[USING TIMESTAMP <timestamp>]
WHERE <keyname> = <keyvalue>;
A DELETE is used to perform the removal of one or more columns from one
or more rows. Each DELETE statement requires a precise set of row keys
to be specified using a WHERE clause and the KEY keyword or key alias.
For more information, see one of the following:
HELP DELETE_USING
HELP DELETE_COLUMNS
HELP DELETE_WHERE
"""
def help_delete_using(self):
print """
DELETE: the USING clause
DELETE ... USING TIMESTAMP <timestamp>;
<timestamp> defines the optional timestamp for the new tombstone
record. It must be an integer. Cassandra timestamps are generally
specified using milliseconds since the Unix epoch (1970-01-01 00:00:00
UTC).
"""
def help_update(self):
print """
UPDATE [<keyspace>.]<columnFamily>
[USING [TIMESTAMP <timestamp>]
[AND TTL <timeToLive>]]
SET name1 = value1, name2 = value2 WHERE <keycol> = keyval
[IF EXISTS];
An UPDATE is used to write one or more columns to a record in a table.
No results are returned. The record's primary key must be completely
and uniquely specified; that is, if the primary key includes multiple
columns, all must be explicitly given in the WHERE clause.
Statements begin with the UPDATE keyword followed by the name of the
table to be updated.
For more information, see one of the following:
HELP UPDATE_USING
HELP UPDATE_SET
HELP UPDATE_COUNTERS
HELP UPDATE_WHERE
"""
def help_update_using(self):
print """
UPDATE: the USING clause
UPDATE ... USING TIMESTAMP <timestamp>;
UPDATE ... USING TTL <timeToLive>;
The USING clause allows setting of certain query and data parameters.
If multiple parameters need to be set, these may be joined using AND.
Example:
UPDATE ... USING TTL 43200 AND TIMESTAMP 1351620509603
<timestamp> defines the optional timestamp for the new column value(s).
It must be an integer. Cassandra timestamps are generally specified
using milliseconds since the Unix epoch (1970-01-01 00:00:00 UTC).
<timeToLive> defines the optional time to live (TTL) in seconds for the
new column value(s). It must be an integer.
"""
def help_insert(self):
print """
INSERT INTO [<keyspace>.]<tablename>
( <colname1>, <colname2> [, <colname3> [, ...]] )
VALUES ( <colval1>, <colval2> [, <colval3> [, ...]] )
[USING TIMESTAMP <timestamp>]
[AND TTL <timeToLive>];
An INSERT is used to write one or more columns to a record in a
CQL table. No results are returned.
Values for all component columns in the table's primary key must
be given. Also, there must be at least one non-primary-key column
specified (Cassandra rows are not considered to exist with only
a key and no associated columns).
Unlike in SQL, the semantics of INSERT and UPDATE are identical.
In either case a record is created if none existed before, and
udpated when it does. For more information, see one of the
following:
HELP UPDATE
HELP UPDATE_USING
"""
def help_select_expr(self):
print """
SELECT: Specifying Columns
SELECT name1, name2, name3 FROM ...
SELECT COUNT(*) FROM ...
The SELECT expression determines which columns will appear in the
results and takes the form of a comma separated list of names.
It is worth noting that unlike the projection in a SQL SELECT, there is
no guarantee that the results will contain all of the columns
specified. This is because Cassandra is schema-less and there are no
guarantees that a given column exists.
When the COUNT aggregate function is specified as a column to fetch, a
single row will be returned, with a single column named "count" whose
value is the number of rows from the pre-aggregation resultset.
Currently, COUNT is the only function supported by CQL.
"""
def help_alter_drop(self):
print """
ALTER TABLE: dropping a typed column
ALTER TABLE addamsFamily DROP gender;
An ALTER TABLE ... DROP statement removes the type of a column
from the column family metadata. Dropped columns will immediately
become unavailable in the queries and will not be included in
compacted sstables in the future. If a column is readded, queries
won't return values written before the column was last dropped.
It is assumed that timestamps represent actual time, so if this
is not your case, you should NOT readd previously dropped columns.
Columns can't be dropped from tables defined with COMPACT STORAGE.
"""
def help_create(self):
super(CQL3HelpTopics, self).help_create()
print """ HELP CREATE_USER;
HELP CREATE_ROLE;
"""
def help_alter(self):
print """
ALTER TABLE <tablename> ALTER <columnname> TYPE <type>;
ALTER TABLE <tablename> ADD <columnname> <type>;
ALTER TABLE <tablename> RENAME <columnname> TO <columnname>
[AND <columnname> TO <columnname>]
ALTER TABLE <tablename> WITH <optionname> = <val> [AND <optionname> = <val> [...]];
An ALTER statement is used to manipulate table metadata. It allows you
to add new typed columns, drop existing columns, change the data
storage type of existing columns, or change table properties.
No results are returned.
See one of the following for more information:
HELP ALTER_ALTER;
HELP ALTER_ADD;
HELP ALTER_DROP;
HELP ALTER_RENAME;
HELP ALTER_WITH;
"""
def help_alter_rename(self):
print """
ALTER TABLE: renaming a column
ALTER TABLE <tablename> RENAME <columnname> TO <columnname>
[AND <columnname> TO <columnname>]
The ALTER TABLE ... RENAME variant renames a typed column in a column
family.
"""
def help_drop(self):
super(CQL3HelpTopics, self).help_create()
print """ HELP DROP_USER;
HELP DROP_ROLE;
"""
def help_list(self):
print """
There are different variants of LIST. For more information, see
one of the following:
HELP LIST_USERS;
HELP LIST_PERMISSIONS;
"""
def help_create_user(self):
print """
CREATE USER <username> [WITH PASSWORD 'password'] [NOSUPERUSER | SUPERUSER];
CREATE USER creates a new Cassandra user account.
Only superusers can issue CREATE USER requests.
To create a superuser account use SUPERUSER option (NOSUPERUSER is the default).
WITH PASSWORD clause should only be used with password-based authenticators,
e.g. PasswordAuthenticator, SimpleAuthenticator.
"""
def help_alter_user(self):
print """
ALTER USER <username> [WITH PASSWORD 'password'] [NOSUPERUSER | SUPERUSER];
Use ALTER USER to change a user's superuser status and/or password (only
with password-based authenticators).
Superusers can change a user's password or superuser status (except their own).
Users cannot change their own superuser status. Ordinary users can only change their
password (if the configured authenticator is password-based).
"""
def help_drop_user(self):
print """
DROP USER <username>;
DROP USER removes an existing user. You have to be logged in as a superuser
to issue a DROP USER statement. A user cannot drop themselves.
"""
def help_list_users(self):
print """
LIST USERS;
List existing users and their superuser status.
"""
def help_grant(self):
print """
GRANT (<permission> [PERMISSION] | ALL [PERMISSIONS])
ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>
TO [ROLE <rolename> | USER <username>]
Grant the specified permission (or all permissions) on a resource
to a role or user.
To be able to grant a permission on some resource you have to
have that permission yourself and also AUTHORIZE permission on it,
or on one of its parent resources.
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_revoke(self):
print """
REVOKE (<permission> [PERMISSION] | ALL [PERMISSIONS])
ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>
FROM [ROLE <rolename> | USER <username>]
Revokes the specified permission (or all permissions) on a resource
from a role or user.
To be able to revoke a permission on some resource you have to
have that permission yourself and also AUTHORIZE permission on it,
or on one of its parent resources.
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_list_permissions(self):
print """
LIST (<permission> [PERMISSION] | ALL [PERMISSIONS])
[ON ALL KEYSPACES
| KEYSPACE <keyspace>
| [TABLE] [<keyspace>.]<table>]
[OF [ROLE <rolename> | USER <username>]
[NORECURSIVE]
Omitting ON <resource> part will list permissions on ALL KEYSPACES,
every keyspace and table.
Omitting OF [ROLE <rolename> | USER <username>] part will list permissions
of all roles and users.
Omitting NORECURSIVE specifier will list permissions of the resource
and all its parents (table, table's keyspace and ALL KEYSPACES).
See HELP PERMISSIONS for more info on the available permissions.
"""
def help_permissions(self):
print """
PERMISSIONS
Cassandra has 6 permissions:
ALTER: required for ALTER KEYSPCE, ALTER TABLE, CREATE INDEX, DROP INDEX
AUTHORIZE: required for GRANT, REVOKE
CREATE: required for CREATE KEYSPACE, CREATE TABLE
DROP: required for DROP KEYSPACE, DROP TABLE
MODIFY: required for INSERT, DELETE, UPDATE, TRUNCATE
SELECT: required for SELECT
"""
def help_create_role(self):
print """
CREATE ROLE <rolename>;
CREATE ROLE creates a new Cassandra role.
Only superusers can issue CREATE ROLE requests.
To create a superuser account use SUPERUSER option (NOSUPERUSER is the default).
"""
def help_drop_role(self):
print """
DROP ROLE <rolename>;
DROP ROLE removes an existing role. You have to be logged in as a superuser
to issue a DROP ROLE statement.
"""
def help_list_roles(self):
print """
LIST ROLES [OF [ROLE <rolename> | USER <username>] [NORECURSIVE]];
Only superusers can use the OF clause to list the roles granted to a role or user.
If a superuser omits the OF clause then all the created roles will be listed.
If a non-superuser calls LIST ROLES then the roles granted to that user are listed.
If NORECURSIVE is provided then only directly granted roles are listed.
"""
def help_grant_role(self):
print """
GRANT ROLE <rolename> TO [ROLE <rolename> | USER <username>]
Grant the specified role to another role or user. You have to be logged
in as superuser to issue a GRANT ROLE statement.
"""
def help_revoke_role(self):
print """
REVOKE ROLE <rolename> FROM [ROLE <rolename> | USER <username>]
Revoke the specified role from another role or user. You have to be logged
in as superuser to issue a REVOKE ROLE statement.
"""
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class MetricBaselineOperations(object):
"""MetricBaselineOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2017_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_uri, # type: str
metric_name, # type: str
timespan=None, # type: Optional[str]
interval=None, # type: Optional[datetime.timedelta]
aggregation=None, # type: Optional[str]
sensitivities=None, # type: Optional[str]
result_type=None, # type: Optional[Union[str, "_models.ResultType"]]
**kwargs # type: Any
):
# type: (...) -> "_models.BaselineResponse"
"""**Gets the baseline values for a specific metric**.
:param resource_uri: The identifier of the resource. It has the following structure:
subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/{providerName}/{resourceName}.
For example:
subscriptions/b368ca2f-e298-46b7-b0ab-012281956afa/resourceGroups/vms/providers/Microsoft.Compute/virtualMachines/vm1.
:type resource_uri: str
:param metric_name: The name of the metric to retrieve the baseline for.
:type metric_name: str
:param timespan: The timespan of the query. It is a string with the following format
'startDateTime_ISO/endDateTime_ISO'.
:type timespan: str
:param interval: The interval (i.e. timegrain) of the query.
:type interval: ~datetime.timedelta
:param aggregation: The aggregation type of the metric to retrieve the baseline for.
:type aggregation: str
:param sensitivities: The list of sensitivities (comma separated) to retrieve.
:type sensitivities: str
:param result_type: Allows retrieving only metadata of the baseline. On data request all
information is retrieved.
:type result_type: str or ~$(python-base-namespace).v2017_11_01_preview.models.ResultType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BaselineResponse, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2017_11_01_preview.models.BaselineResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BaselineResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'metricName': self._serialize.url("metric_name", metric_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if timespan is not None:
query_parameters['timespan'] = self._serialize.query("timespan", timespan, 'str')
if interval is not None:
query_parameters['interval'] = self._serialize.query("interval", interval, 'duration')
if aggregation is not None:
query_parameters['aggregation'] = self._serialize.query("aggregation", aggregation, 'str')
if sensitivities is not None:
query_parameters['sensitivities'] = self._serialize.query("sensitivities", sensitivities, 'str')
if result_type is not None:
query_parameters['resultType'] = self._serialize.query("result_type", result_type, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BaselineResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/baseline/{metricName}'} # type: ignore
def calculate_baseline(
self,
resource_uri, # type: str
time_series_information, # type: "_models.TimeSeriesInformation"
**kwargs # type: Any
):
# type: (...) -> "_models.CalculateBaselineResponse"
"""**Lists the baseline values for a resource**.
:param resource_uri: The identifier of the resource. It has the following structure:
subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/{providerName}/{resourceName}.
For example:
subscriptions/b368ca2f-e298-46b7-b0ab-012281956afa/resourceGroups/vms/providers/Microsoft.Compute/virtualMachines/vm1.
:type resource_uri: str
:param time_series_information: Information that need to be specified to calculate a baseline
on a time series.
:type time_series_information: ~$(python-base-namespace).v2017_11_01_preview.models.TimeSeriesInformation
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CalculateBaselineResponse, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2017_11_01_preview.models.CalculateBaselineResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CalculateBaselineResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.calculate_baseline.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(time_series_information, 'TimeSeriesInformation')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CalculateBaselineResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
calculate_baseline.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/calculatebaseline'} # type: ignore
|
|
import os
import unittest
import sys
if sys.version < '3':
from StringIO import StringIO
else:
from io import StringIO # NOQA
try:
import __builtin__
except ImportError:
import builtins
__builtin__ = builtins # NOQA
# Make sure we'll find the required files...
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, "xmltreenode"))
sys.path.append(os.path.dirname(__file__))
import xmlparser
def __outputDiff(string_a, string_b, index_a, pos):
""" Outputs differences in two strings, used by assertEqualString.
Raises ValueError
@param string_a First string
@param string_b Second string
@param index_a Index in first string
@param pos Position of difference
"""
msg = "String difference at %s:\n" % (index_a)
ba = (index_a - 20)
if ba < 0:
ba = 0
aa = (index_a + 10)
if aa > len(string_a):
aa = len(string_a)
ab = (index_a + 10)
if ab > len(string_b):
ab = len(string_b)
msg_a = string_a[ba:aa]
msg_b = string_b[ba:ab]
pos += msg_a.count('\n')
msg += "\n" + msg_a.replace('\n', '\\n') + "\n"
msg += msg_b.replace('\n', '\\n') + "\n"
if index_a < pos:
pos = index_a
msg += '-' * pos + '^'
raise ValueError(msg)
def assertEqualString(string_a, string_b, ignoreSpace=False, ignoreNewLine=False):
""" Asserts that given two strings are equal.
Return silently when ok, otherwise raises exception with verbosal output.
@param string_a First string
@param string_b Secondb string
@param ignoreSpace When comparing strings, ignore all space characters
@param ignoreNewLine When comparing strings, ignore all new line characters
"""
if string_a is None and string_b is None:
return
if string_a is None:
raise ValueError('First string is None')
if string_b is None:
raise ValueError('Second string is None')
index_a = 0
if ignoreSpace:
string_a = string_a.replace(' ', '')
string_b = string_b.replace(' ', '')
if ignoreNewLine:
string_a = string_a.replace('\n', '')
string_b = string_b.replace('\n', '')
while index_a < len(string_a) and index_a < len(string_b):
if string_a[index_a] != string_b[index_a]:
__outputDiff(string_a, string_b, index_a, 20)
index_a += 1
if len(string_a) != len(string_b):
pos = min(len(string_a), len(string_b))
__outputDiff(string_a, string_b, index_a, pos)
class TestXmlparser(unittest.TestCase):
def setUp(self):
self.dummyXML = """
<root>
<a>
<b></b>
</a>
<a myattr="c">
<c />
<c />
<c2 />
</a>
<d>
<e>1</e>
<e>2</e>
<e>3</e>
</d>
</root>
"""
def test_xmlparser_CustomXMLParser_getRoot(self):
iparse = xmlparser.CustomXMLParser()
res = iparse.load(self.dummyXML, sourceIsFile=False)
self.assertNotEqual(res, None)
root = iparse.getRoot()
self.assertEqual(root.getData(), "root")
self.assertEqual(root.numChildren(), 3)
def test_xmlparser_CustomXMLParser_getitem(self):
iparse = xmlparser.CustomXMLParser()
self.assertEqual(iparse["root"], None)
res = iparse.load(self.dummyXML, sourceIsFile=False)
self.assertNotEqual(res, None)
root = iparse["root"]
self.assertEqual(root["a"], "b")
self.assertEqual(root["d"].numChildren(), 3)
def test_xmlparser_CustomXMLParser_contains(self):
iparse = xmlparser.CustomXMLParser()
self.assertFalse("root" in iparse)
res = iparse.load(self.dummyXML, sourceIsFile=False)
self.assertNotEqual(res, None)
self.assertTrue("root" in iparse)
def test_xmlparser_start_and_end_basic(self):
data = """<tag1>
<tag2 a="1" b="2" c="3" d="4" />
<tag3 attr1="1234567890" attr2="abcd1234" />
</tag1>
"""
tag2_attr = {'b': '2', 'c': '3', 'd': '4', 'a': '1'}
tag3_attr = {'attr1': '1234567890', 'attr2': 'abcd1234'}
iparse = xmlparser.CustomXMLParser()
iparse.start('tag1', None)
iparse.end('tag1')
iparse.start('tag2', tag2_attr)
iparse.end('tag2')
iparse.start('tag3', tag3_attr)
iparse.end('tag3')
assertEqualString(iparse.getRoot().toString(), data, ignoreSpace=True)
def test_xmlparser_start_end_and_data_basic(self):
data = """<tag1>
<tag2 a="1" b="2" c="3" d="4">Hello!</tag2>
<tag3 attr1="1234567890" attr2="abcd1234">Something important here.</tag3>
</tag1>
"""
tag2_attr = {'b': '2', 'c': '3', 'd': '4', 'a': '1'}
tag3_attr = {'attr1': '1234567890', 'attr2': 'abcd1234'}
iparse = xmlparser.CustomXMLParser()
iparse.start('tag1', None)
iparse.end('tag1')
iparse.start('tag2', tag2_attr)
iparse.data("Hello!")
iparse.end('tag2')
iparse.start('tag3', tag3_attr)
iparse.data("Something important here.")
iparse.end('tag3')
assertEqualString(iparse.getRoot().toString(), data, ignoreSpace=True)
def test_xmlparser_load_singletag_xml(self):
singletag = """<first>
<tag name="%s" />
</first>
"""
iparse = xmlparser.CustomXMLParser()
res = iparse.load(singletag % 'tmp', sourceIsFile=False)
self.assertNotEqual(type(res), list)
self.assertEqual(len(res.getRoot()), 1)
self.assertEqual(len(res), len(res.getRoot()))
assertEqualString(res.getRoot().toString(), singletag % 'tmp', ignoreSpace=True)
def test_xmlparser_load_singletag_xml_with_comment_lines(self):
singletag = """<!-- THIS IS COMMENT -->
<first>
<tag name="tmp" />
<!-- ANOTHER COMMENT
WITH TWO LINES -->
</first>
"""
expected_output = """<first>
<!-- THIS IS COMMENT -->
<tag name="tmp" />
<!-- ANOTHER COMMENT
WITH TWO LINES -->
</first>
"""
iparse = xmlparser.CustomXMLParser()
res = iparse.load(singletag, sourceIsFile=False)
assertEqualString(res.getRoot().toString(), expected_output, ignoreSpace=True)
def test_xmlparser_load_multitag_xml_special_with_addDummy(self):
tag1 = """<first>
<tag args="tmp1" />
</first>
"""
tag2 = """<second>
<tag />
<tag name="tmp2" />
</second>
"""
multitag_special = tag1 + tag2
multitag_special_with_dummy = """<dummy>
<first>
<tag args="tmp1" />
</first>
<second>
<tag />
<tag name="tmp2" />
</second>
</dummy>
"""
parse = xmlparser.CustomXMLParser()
res = parse.load(multitag_special, sourceIsFile=False, addDummy=True)
self.assertNotEqual(type(res), list)
assertEqualString(res.getRoot().toString(), multitag_special_with_dummy, ignoreSpace=True)
res = res.getRoot().getChildren()
self.assertEqual(type(res), list)
self.assertEqual(len(res), 2)
self.assertEqual(res[0].getData(), "first")
self.assertEqual(res[1].getData(), "second")
assertEqualString(res[0].toString(), tag1, ignoreSpace=True)
assertEqualString(res[1].toString(), tag2, ignoreSpace=True)
def test_xmlparser_load_invalid_xml_string_raise_IOError(self):
singletag = """<first>
<tag name="
</first>
"""
try:
sys.stdout = StringIO()
iparse = xmlparser.CustomXMLParser()
self.assertRaisesRegexp(ValueError, 'Input is not valid XML: not well-formed', iparse.load, singletag, sourceIsFile=False)
except:
raise
finally:
sys.stdout = sys.__stdout__
def _raise_exception(self, ex):
raise ex
def _dummy_return(self, value):
return value
def test_xmlparser_load_invalid_xml_file_raise_IOError(self):
singletag = """<first>
<tag name=
</first>
"""
iparse = xmlparser.CustomXMLParser()
io = StringIO(singletag)
try:
orig_open = __builtin__.open
__builtin__.open = lambda filen, mode: self._dummy_return(io)
self.assertRaisesRegexp(ValueError, 'Input is not valid XML: dummy_file, not well-formed', iparse.load, 'dummy_file', sourceIsFile=True)
except:
raise
finally:
__builtin__.open = orig_open
def test_xmlparser_load_invalid_file_raise_IOError(self):
iparse = xmlparser.CustomXMLParser()
try:
orig_open = __builtin__.open
__builtin__.open = lambda filen, mode: self._raise_exception(IOError("Error"))
self.assertRaisesRegexp(ValueError, 'File dummy_file not found!', iparse.load, 'dummy_file', sourceIsFile=True)
except:
raise
finally:
__builtin__.open = orig_open
def test_xmlparser_load_invalid_file_no_IOError_only_print(self):
iparse = xmlparser.CustomXMLParser()
try:
sys.stdout = StringIO()
orig_open = __builtin__.open
__builtin__.open = lambda filen, mode: self._raise_exception(IOError("Error"))
iparse.ignoreErrors(True)
ret = iparse.load('dummy_file', sourceIsFile=True)
self.assertEqual(ret, None)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, "ERROR: File dummy_file not found!")
except:
raise
finally:
__builtin__.open = orig_open
sys.stdout = sys.__stdout__
def test_xmlparser_len_empty_zero(self):
iparse = xmlparser.CustomXMLParser()
self.assertEqual(len(iparse), 0)
|
|
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2017,2018
"""
Testing support for streaming applications.
********
Overview
********
Allows testing of a streaming application by creation conditions
on streams that are expected to become valid during the processing.
`Tester` is designed to be used with Python's `unittest` module.
A complete application may be tested or fragments of it, for example a sub-graph can be tested
in isolation that takes input data and scores it using a model.
Supports execution of the application on
:py:const:`~streamsx.topology.context.ContextTypes.STREAMING_ANALYTICS_SERVICE`,
:py:const:`~streamsx.topology.context.ContextTypes.DISTRIBUTED`
or :py:const:`~streamsx.topology.context.ContextTypes.STANDALONE`.
A :py:class:`Tester` instance is created and associated with the :py:class:`Topology` to be tested.
Conditions are then created against streams, such as a stream must receive 10 tuples using
:py:meth:`~Tester.tuple_count`.
Here is a simple example that tests a filter correctly only passes tuples with values greater than 5::
import unittest
from streamsx.topology.topology import Topology
from streamsx.topology.tester import Tester
class TestSimpleFilter(unittest.TestCase):
def setUp(self):
# Sets self.test_ctxtype and self.test_config
Tester.setup_streaming_analytics(self)
def test_filter(self):
# Declare the application to be tested
topology = Topology()
s = topology.source([5, 7, 2, 4, 9, 3, 8])
s = s.filter(lambda x : x > 5)
# Create tester and assign conditions
tester = Tester(topology)
tester.contents(s, [7, 9, 8])
# Submit the application for test
# If it fails an AssertionError will be raised.
tester.test(self.test_ctxtype, self.test_config)
A stream may have any number of conditions and any number of streams may be tested.
A :py:meth:`~Tester.local_check` is supported where a method of the
unittest class is executed once the job becomes healthy. This performs
checks from the context of the Python unittest class, such as
checking external effects of the application or using the REST api to
monitor the application.
A test fails-fast if any of the following occur:
* Any condition fails. E.g. a tuple failing a :py:meth:`~Tester.tuple_check`.
* The :py:meth:`~Tester.local_check` (if set) raises an error.
* The job for the test:
* Fails to become healthy.
* Becomes unhealthy during the test run.
* Any processing element (PE) within the job restarts.
A test timeouts if it does not fail but its conditions do not become valid.
The timeout is not fixed as an absolute test run time, but as a time since "progress"
was made. This can allow tests to pass when healthy runs are run in a constrained
environment that slows execution. For example with a tuple count condition of ten,
progress is indicated by tuples arriving on a stream, so that as long as gaps
between tuples are within the timeout period the test remains running until ten tuples appear.
.. note:: The test timeout value is not configurable.
.. note:: The submitted job (application under test) has additional elements (streams & operators) inserted to implement the conditions. These are visible through various APIs including the Streams console raw graph view. Such elements are put into the `Tester` category.
.. warning::
Streaming Analytics service or IBM Streams 4.2 or later is required when using `Tester`.
.. versionchanged:: 1.9 - Python 2.7 supported (except with Streaming Analytics service).
"""
from __future__ import unicode_literals
from future.builtins import *
import streamsx.ec as ec
import streamsx.topology.context as stc
import os
import unittest
import logging
import collections
import threading
from streamsx.rest import StreamsConnection
from streamsx.rest import StreamingAnalyticsConnection
from streamsx.topology.context import ConfigParams
import time
import json
import sys
import streamsx.topology.tester_runtime as sttrt
_logger = logging.getLogger('streamsx.topology.test')
class Tester(object):
"""Testing support for a Topology.
Allows testing of a Topology by creating conditions against the contents
of its streams.
Conditions may be added to a topology at any time before submission.
If a topology is submitted directly to a context then the graph
is not modified. This allows testing code to be inserted while
the topology is being built, but not acted upon unless the topology
is submitted in test mode.
If a topology is submitted through the test method then the topology
may be modified to include operations to ensure the conditions are met.
.. warning::
For future compatibility applications under test should not include intended failures that cause
a processing element to stop or restart. Thus, currently testing is against expected application behavior.
Args:
topology: Topology to be tested.
"""
def __init__(self, topology):
self.topology = topology
topology.tester = self
self._conditions = {}
self.local_check = None
self._run_for = 0
@staticmethod
def setup_standalone(test):
"""
Set up a unittest.TestCase to run tests using IBM Streams standalone mode.
Requires a local IBM Streams install define by the ``STREAMS_INSTALL``
environment variable. If ``STREAMS_INSTALL`` is not set, then the
test is skipped.
A standalone application under test will run until a condition
fails or all the streams are finalized or when the
:py:meth:`run_for` time (if set) elapses.
Applications that include infinite streams must include set a
run for time using :py:meth:`run_for` to ensure the test completes
Two attributes are set in the test case:
* test_ctxtype - Context type the test will be run in.
* test_config- Test configuration.
Args:
test(unittest.TestCase): Test case to be set up to run tests using Tester
Returns: None
"""
if not 'STREAMS_INSTALL' in os.environ:
raise unittest.SkipTest("Skipped due to no local IBM Streams install")
test.test_ctxtype = stc.ContextTypes.STANDALONE
test.test_config = {}
@staticmethod
def setup_distributed(test):
"""
Set up a unittest.TestCase to run tests using IBM Streams distributed mode.
Requires a local IBM Streams install define by the ``STREAMS_INSTALL``
environment variable. If ``STREAMS_INSTALL`` is not set then the
test is skipped.
The Streams instance to use is defined by the environment variables:
* ``STREAMS_ZKCONNECT`` - Zookeeper connection string (optional)
* ``STREAMS_DOMAIN_ID`` - Domain identifier
* ``STREAMS_INSTANCE_ID`` - Instance identifier
The user used to submit and monitor the job is set by the
optional environment variables:
* ``STREAMS_USERNAME - User name defaulting to `streamsadmin`.
* ``STREAMS_PASSWORD - User password defaulting to `passw0rd`.
The defaults match the setup for testing on a IBM Streams Quick
Start Edition (QSE) virtual machine.
.. warning::
``streamtool`` is used to submit the job and requires that ``streamtool`` does not prompt for authentication. This is achieved by using ``streamtool genkey``.
.. seealso::
`Generating authentication keys for IBM Streams <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.2.1/com.ibm.streams.cfg.doc/doc/ibminfospherestreams-user-security-authentication-rsa.html>`_
Two attributes are set in the test case:
* test_ctxtype - Context type the test will be run in.
* test_config - Test configuration.
Args:
test(unittest.TestCase): Test case to be set up to run tests using Tester
Returns: None
"""
if not 'STREAMS_INSTALL' in os.environ:
raise unittest.SkipTest("Skipped due to no local IBM Streams install")
if not 'STREAMS_INSTANCE_ID' in os.environ:
raise unittest.SkipTest("Skipped due to STREAMS_INSTANCE_ID environment variable not set")
if not 'STREAMS_DOMAIN_ID' in os.environ:
raise unittest.SkipTest("Skipped due to STREAMS_DOMAIN_ID environment variable not set")
test.test_ctxtype = stc.ContextTypes.DISTRIBUTED
test.test_config = {}
@staticmethod
def setup_streaming_analytics(test, service_name=None, force_remote_build=False):
"""
Set up a unittest.TestCase to run tests using Streaming Analytics service on IBM Bluemix cloud platform.
The service to use is defined by:
* VCAP_SERVICES environment variable containing `streaming_analytics` entries.
* service_name which defaults to the value of STREAMING_ANALYTICS_SERVICE_NAME environment variable.
If VCAP_SERVICES is not set or a service name is not defined, then the test is skipped.
Two attributes are set in the test case:
* test_ctxtype - Context type the test will be run in.
* test_config - Test configuration.
Args:
test(unittest.TestCase): Test case to be set up to run tests using Tester
service_name(str): Name of Streaming Analytics service to use. Must exist as an
entry in the VCAP services. Defaults to value of STREAMING_ANALYTICS_SERVICE_NAME environment variable.
If run with Python 2 the test is skipped, only Python 3.5
is supported with Streaming Analytics service.
Returns: None
"""
if sys.version_info.major == 2:
raise unittest.SkipTest('Skipped due to running with Python 2')
if not 'VCAP_SERVICES' in os.environ:
raise unittest.SkipTest("Skipped due to VCAP_SERVICES environment variable not set")
test.test_ctxtype = stc.ContextTypes.STREAMING_ANALYTICS_SERVICE
if service_name is None:
service_name = os.environ.get('STREAMING_ANALYTICS_SERVICE_NAME', None)
if service_name is None:
raise unittest.SkipTest("Skipped due to no service name supplied")
test.test_config = {'topology.service.name': service_name}
if force_remote_build:
test.test_config['topology.forceRemoteBuild'] = True
def add_condition(self, stream, condition):
"""Add a condition to a stream.
Conditions are normally added through :py:meth:`tuple_count`, :py:meth:`contents` or :py:meth:`tuple_check`.
This allows an additional conditions that are implementations of :py:class:`Condition`.
Args:
stream(Stream): Stream to be tested.
condition(Condition): Arbitrary condition.
Returns:
Stream: stream
"""
self._conditions[condition.name] = (stream, condition)
return stream
def tuple_count(self, stream, count, exact=True):
"""Test that a stream contains a number of tuples.
If `exact` is `True`, then condition becomes valid when `count`
tuples are seen on `stream` during the test. Subsequently if additional
tuples are seen on `stream` then the condition fails and can never
become valid.
If `exact` is `False`, then the condition becomes valid once `count`
tuples are seen on `stream` and remains valid regardless of
any additional tuples.
Args:
stream(Stream): Stream to be tested.
count(int): Number of tuples expected.
exact(bool): `True` if the stream must contain exactly `count`
tuples, `False` if the stream must contain at least `count` tuples.
Returns:
Stream: stream
"""
_logger.debug("Adding tuple count (%d) condition to stream %s.", count, stream)
if exact:
name = "ExactCount" + str(len(self._conditions))
cond = sttrt._TupleExactCount(count, name)
cond._desc = "{0} stream expects tuple count equal to {1}.".format(stream.name, count)
else:
name = "AtLeastCount" + str(len(self._conditions))
cond = sttrt._TupleAtLeastCount(count, name)
cond._desc = "'{0}' stream expects tuple count of at least {1}.".format(stream.name, count)
return self.add_condition(stream, cond)
def contents(self, stream, expected, ordered=True):
"""Test that a stream contains the expected tuples.
Args:
stream(Stream): Stream to be tested.
expected(list): Sequence of expected tuples.
ordered(bool): True if the ordering of received tuples must match expected.
Returns:
Stream: stream
"""
name = "StreamContents" + str(len(self._conditions))
if ordered:
cond = sttrt._StreamContents(expected, name)
cond._desc = "'{0}' stream expects tuple ordered contents: {1}.".format(stream.name, expected)
else:
cond = sttrt._UnorderedStreamContents(expected, name)
cond._desc = "'{0}' stream expects tuple unordered contents: {1}.".format(stream.name, expected)
return self.add_condition(stream, cond)
def tuple_check(self, stream, checker):
"""Check each tuple on a stream.
For each tuple ``t`` on `stream` ``checker(t)`` is called.
If the return evaluates to `False` then the condition fails.
Once the condition fails it can never become valid.
Otherwise the condition becomes or remains valid. The first
tuple on the stream makes the condition valid if the checker
callable evaluates to `True`.
The condition can be combined with :py:meth:`tuple_count` with
``exact=False`` to test a stream map or filter with random input data.
An example of combining `tuple_count` and `tuple_check` to test a filter followed
by a map is working correctly across a random set of values::
def rands():
r = random.Random()
while True:
yield r.random()
class TestFilterMap(unittest.testCase):
# Set up omitted
def test_filter(self):
# Declare the application to be tested
topology = Topology()
r = topology.source(rands())
r = r.filter(lambda x : x > 0.7)
r = r.map(lambda x : x + 0.2)
# Create tester and assign conditions
tester = Tester(topology)
# Ensure at least 1000 tuples pass through the filter.
tester.tuple_count(r, 1000, exact=False)
tester.tuple_check(r, lambda x : x > 0.9)
# Submit the application for test
# If it fails an AssertionError will be raised.
tester.test(self.test_ctxtype, self.test_config)
Args:
stream(Stream): Stream to be tested.
checker(callable): Callable that must evaluate to True for each tuple.
"""
name = "TupleCheck" + str(len(self._conditions))
cond = sttrt._TupleCheck(checker, name)
return self.add_condition(stream, cond)
def local_check(self, callable):
"""Perform local check while the application is being tested.
A call to `callable` is made after the application under test is submitted and becomes healthy.
The check is in the context of the Python runtime executing the unittest case,
typically the callable is a method of the test case.
The application remains running until all the conditions are met
and `callable` returns. If `callable` raises an error, typically
through an assertion method from `unittest` then the test will fail.
Used for testing side effects of the application, typically with `STREAMING_ANALYTICS_SERVICE`
or `DISTRIBUTED`. The callable may also use the REST api for context types that support
it to dynamically monitor the running application.
The callable can use `submission_result` and `streams_connection` attributes from :py:class:`Tester` instance
to interact with the job or the running Streams instance.
Simple example of checking the job is healthy::
import unittest
from streamsx.topology.topology import Topology
from streamsx.topology.tester import Tester
class TestLocalCheckExample(unittest.TestCase):
def setUp(self):
Tester.setup_distributed(self)
def test_job_is_healthy(self):
topology = Topology()
s = topology.source(['Hello', 'World'])
self.tester = Tester(topology)
self.tester.tuple_count(s, 2)
# Add the local check
self.tester.local_check = self.local_checks
# Run the test
self.tester.test(self.test_ctxtype, self.test_config)
def local_checks(self):
job = self.tester.submission_result.job
self.assertEqual('healthy', job.health)
.. warning::
A local check must not cancel the job (application under test).
.. warning::
A local check is not supported in standalone mode.
Args:
callable: Callable object.
"""
self.local_check = callable
def run_for(self, duration):
"""Run the test for a minimum number of seconds.
Creates a test wide condition that becomes `valid` when the
application under test has been running for `duration` seconds.
Maybe be called multiple times, the test will run as long as the maximum value provided.
Can be used to test applications without any externally visible
streams, or streams that do not have testable conditions. For
example a complete application may be tested by runnning it for
for ten minutes and use :py:meth:`local_check` to test
any external impacts, such as messages published to a
message queue system.
Args:
duration(float): Minimum number of seconds the test will run for.
.. versionadded: 1.9
"""
self._run_for = max(self._run_for, float(duration))
def test(self, ctxtype, config=None, assert_on_fail=True, username=None, password=None, always_collect_logs=False):
"""Test the topology.
Submits the topology for testing and verifies the test conditions are met and the job remained healthy through its execution.
The submitted application (job) is monitored for the test conditions and
will be canceled when all the conditions are valid or at least one failed.
In addition if a local check was specified using :py:meth:`local_check` then
that callable must complete before the job is cancelled.
The test passes if all conditions became valid and the local check callable (if present) completed without
raising an error.
The test fails if the job is unhealthy, any condition fails or the local check callable (if present) raised an exception.
In the event that the test fails when submitting to the `STREAMING_ANALYTICS_SERVICE` context, the application logs are retrieved as
a tar file and are saved to the current working directory. The filesystem path to the application logs is saved in the
tester's result object under the `application_logs` key, i.e. `tester.result['application_logs']`
Args:
ctxtype(str): Context type for submission.
config: Configuration for submission.
assert_on_fail(bool): True to raise an assertion if the test fails, False to return the passed status.
username(str): **Deprecated**
password(str): **Deprecated**
always_collect_logs(bool): True to always collect the console log and PE trace files of the test.
Attributes:
result: The result of the test. This can contain exit codes, application log paths, or other relevant test information.
submission_result: Result of the application submission from :py:func:`~streamsx.topology.context.submit`.
streams_connection(StreamsConnection): Connection object that can be used to interact with the REST API of
the Streaming Analytics service or instance.
Returns:
bool: `True` if test passed, `False` if test failed if `assert_on_fail` is `False`.
.. deprecated:: 1.8.3
``username`` and ``password`` parameters. When required for
a distributed test use the environment variables
``STREAMS_USERNAME`` and ``STREAMS_PASSWORD`` to define
the Streams user.
"""
# Add the conditions into the graph as sink operators
_logger.debug("Adding conditions to topology %s.", self.topology.name)
for ct in self._conditions.values():
condition = ct[1]
stream = ct[0]
cond_sink = stream.for_each(condition, name=condition.name)
cond_sink.colocate(stream)
cond_sink.category = 'Tester'
cond_sink._op()._layout(hidden=True)
# Standalone uses --kill-after parameter.
if self._run_for and stc.ContextTypes.STANDALONE != ctxtype:
run_cond = sttrt._RunFor(self._run_for)
self.add_condition(None, run_cond)
cond_run_time = self.topology.source(run_cond, name="TestRunTime")
cond_run_time.category = 'Tester'
cond_run_time._op()._layout(hidden=True)
if config is None:
config = {}
config['topology.alwaysCollectLogs'] = always_collect_logs
_logger.debug("Starting test topology %s context %s.", self.topology.name, ctxtype)
if stc.ContextTypes.STANDALONE == ctxtype:
passed = self._standalone_test(config)
elif stc.ContextTypes.DISTRIBUTED == ctxtype:
passed = self._distributed_test(config, username, password)
elif stc.ContextTypes.STREAMING_ANALYTICS_SERVICE == ctxtype or stc.ContextTypes.ANALYTICS_SERVICE == ctxtype:
passed = self._streaming_analytics_test(ctxtype, config)
else:
raise NotImplementedError("Tester context type not implemented:", ctxtype)
if self.result.get('conditions'):
for cn,cnr in self.result['conditions'].items():
c = self._conditions[cn][1]
cdesc = cn
if hasattr(c, '_desc'):
cdesc = c._desc
if 'Fail' == cnr:
_logger.error("Condition: %s : %s", cnr, cdesc)
elif 'NotValid' == cnr:
_logger.warning("Condition: %s : %s", cnr, cdesc)
elif 'Valid' == cnr:
_logger.info("Condition: %s : %s", cnr, cdesc)
if assert_on_fail:
assert passed, "Test failed for topology: " + self.topology.name
if passed:
_logger.info("Test topology %s passed for context:%s", self.topology.name, ctxtype)
else:
_logger.error("Test topology %s failed for context:%s", self.topology.name, ctxtype)
return passed
def _standalone_test(self, config):
""" Test using STANDALONE.
Success is solely indicated by the process completing and returning zero.
"""
if self._run_for:
config = config.copy()
config['topology.standaloneRunTime'] = self._run_for + 5.0
sr = stc.submit(stc.ContextTypes.STANDALONE, self.topology, config)
self.submission_result = sr
self.result = {'passed': sr['return_code'], 'submission_result': sr}
return sr['return_code'] == 0
def _distributed_test(self, config, username, password):
self.streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
if self.streams_connection is None:
# Supply a default StreamsConnection object with SSL verification disabled, because the default
# streams server is not shipped with a valid SSL certificate
self.streams_connection = StreamsConnection(username, password)
self.streams_connection.session.verify = False
config[ConfigParams.STREAMS_CONNECTION] = self.streams_connection
sjr = stc.submit(stc.ContextTypes.DISTRIBUTED, self.topology, config)
self.submission_result = sjr
if sjr['return_code'] != 0:
_logger.error("Failed to submit job to distributed instance.")
return False
return self._distributed_wait_for_result(stc.ContextTypes.DISTRIBUTED, config)
def _streaming_analytics_test(self, ctxtype, config):
sjr = stc.submit(ctxtype, self.topology, config)
self.submission_result = sjr
self.streams_connection = config.get(ConfigParams.STREAMS_CONNECTION)
if self.streams_connection is None:
vcap_services = config.get(ConfigParams.VCAP_SERVICES)
service_name = config.get(ConfigParams.SERVICE_NAME)
self.streams_connection = StreamingAnalyticsConnection(vcap_services, service_name)
if sjr['return_code'] != 0:
_logger.error("Failed to submit job to Streaming Analytics instance")
return False
return self._distributed_wait_for_result(ctxtype, config)
def _distributed_wait_for_result(self, ctxtype, config):
cc = _ConditionChecker(self, self.streams_connection, self.submission_result)
# Wait for the job to be healthy before calling the local check.
if cc._wait_for_healthy():
self._start_local_check()
self.result = cc._complete()
if self.local_check is not None:
self._local_thread.join()
else:
self.result = cc._end(False, _ConditionChecker._UNHEALTHY)
self.result['submission_result'] = self.submission_result
if not self.result['passed'] or config['topology.alwaysCollectLogs']:
path = self._fetch_application_logs(ctxtype)
self.result['application_logs'] = path
cc._canceljob(self.result)
if hasattr(self, 'local_check_exception') and self.local_check_exception is not None:
raise self.local_check_exception
return self.result['passed']
def _fetch_application_logs(self, ctxtype):
# Fetch the logs if submitting to a Streaming Analytics Service
if stc.ContextTypes.STREAMING_ANALYTICS_SERVICE == ctxtype or stc.ContextTypes.ANALYTICS_SERVICE == ctxtype:
application_logs = self.submission_result.job.retrieve_log_trace()
_logger.info("Application logs have been fetched to " + application_logs)
return application_logs
def _start_local_check(self):
self.local_check_exception = None
if self.local_check is None:
return
self._local_thread = threading.Thread(target=self._call_local_check)
self._local_thread.start()
def _call_local_check(self):
try:
self.local_check_value = self.local_check()
except Exception as e:
self.local_check_value = None
self.local_check_exception = e
# Stop nose from seeing tha Tester.test is a test (#1266)
Tester.__test__ = False
#######################################
# Internal functions
#######################################
def _result_to_dict(passed, t):
result = {}
result['passed'] = passed
result['valid'] = t[0]
result['fail'] = t[1]
result['progress'] = t[2]
result['conditions'] = t[3]
return result
class _ConditionChecker(object):
_UNHEALTHY = (False, False, False, None)
def __init__(self, tester, sc, sjr):
self.tester = tester
self._sc = sc
self._sjr = sjr
self._instance_id = sjr['instanceId']
self._job_id = sjr['jobId']
self._sequences = {}
for cn in tester._conditions:
self._sequences[cn] = -1
self.delay = 0.5
self.timeout = 10.0
self.waits = 0
self.additional_checks = 2
self.job = self._find_job()
# Wait for job to be healthy. Returns True
# if the job became healthy, False if not.
def _wait_for_healthy(self):
while (self.waits * self.delay) < self.timeout:
if self._check_job_health():
self.waits = 0
return True
time.sleep(self.delay)
self.waits += 1
self._check_job_health(verbose=True)
return False
def _complete(self):
while (self.waits * self.delay) < self.timeout:
check = self. __check_once()
if check[1]:
return self._end(False, check)
if check[0]:
if self.additional_checks == 0:
return self._end(True, check)
self.additional_checks -= 1
continue
if check[2]:
self.waits = 0
else:
self.waits += 1
time.sleep(self.delay)
return self._end(False, check)
def _end(self, passed, check):
result = _result_to_dict(passed, check)
return result
def _canceljob(self, result):
if self.job is not None:
self.job.cancel(force=not result['passed'])
def __check_once(self):
if not self._check_job_health(verbose=True):
return _ConditionChecker._UNHEALTHY
cms = self._get_job_metrics()
valid = True
progress = False
fail = False
condition_states = {}
for cn in self._sequences:
condition_states[cn] = 'NotValid'
seq_mn = sttrt.Condition._mn('seq', cn)
# If the metrics are missing then the operator
# is probably still starting up, cannot be valid.
if not seq_mn in cms:
valid = False
continue
seq_m = cms[seq_mn]
if seq_m.value != self._sequences[cn]:
# At least one condition making progress
progress = True
self._sequences[cn] = seq_m.value
fail_mn = sttrt.Condition._mn('fail', cn)
if not fail_mn in cms:
valid = False
continue
fail_m = cms[fail_mn]
if fail_m.value != 0:
fail = True
condition_states[cn] = 'Fail'
continue
valid_mn = sttrt.Condition._mn('valid', cn)
if not valid_mn in cms:
valid = False
continue
valid_m = cms[valid_mn]
if valid_m.value == 0:
valid = False
else:
condition_states[cn] = 'Valid'
return (valid, fail, progress, condition_states)
def _check_job_health(self, verbose=False):
self.job.refresh()
if self.job.health != 'healthy':
if verbose:
_logger.error("Job %s health:%s", self.job.name, self.job.health)
return False
for pe in self.job.get_pes():
if pe.launchCount != 1:
if verbose:
_logger.error("PE %s launch count > 1: %s", pe.id, pe.launchCount)
return False
if pe.health != 'healthy':
if verbose:
_logger.error("PE %s health: %s", pe.id, pe.health)
return False
return True
def _find_job(self):
instance = self._sc.get_instance(id=self._instance_id)
return instance.get_job(id=self._job_id)
def _get_job_metrics(self):
"""Fetch all the condition metrics for a job.
We refetch the metrics each time to ensure that we don't miss
any being added, e.g. if an operator is slow to start.
"""
cms = {}
for op in self.job.get_operators():
metrics = op.get_metrics(name=sttrt.Condition._METRIC_PREFIX + '*')
for m in metrics:
cms[m.name] = m
return cms
|
|
from sympy.categories.diagram_drawing import _GrowableGrid, ArrowStringDescription
from sympy.categories import (DiagramGrid, Object, NamedMorphism,
Diagram, XypicDiagramDrawer, xypic_draw_diagram)
from sympy import FiniteSet
def test_GrowableGrid():
grid = _GrowableGrid(1, 2)
# Check dimensions.
assert grid.width == 1
assert grid.height == 2
# Check initialisation of elements.
assert grid[0, 0] is None
assert grid[1, 0] is None
# Check assignment to elements.
grid[0, 0] = 1
grid[1, 0] = "two"
assert grid[0, 0] == 1
assert grid[1, 0] == "two"
# Check appending a row.
grid.append_row()
assert grid.width == 1
assert grid.height == 3
assert grid[0, 0] == 1
assert grid[1, 0] == "two"
assert grid[2, 0] is None
# Check appending a column.
grid.append_column()
assert grid.width == 2
assert grid.height == 3
assert grid[0, 0] == 1
assert grid[1, 0] == "two"
assert grid[2, 0] is None
assert grid[0, 1] is None
assert grid[1, 1] is None
assert grid[2, 1] is None
grid = _GrowableGrid(1, 2)
grid[0, 0] = 1
grid[1, 0] = "two"
# Check prepending a row.
grid.prepend_row()
assert grid.width == 1
assert grid.height == 3
assert grid[0, 0] is None
assert grid[1, 0] == 1
assert grid[2, 0] == "two"
# Check prepending a column.
grid.prepend_column()
assert grid.width == 2
assert grid.height == 3
assert grid[0, 0] is None
assert grid[1, 0] is None
assert grid[2, 0] is None
assert grid[0, 1] is None
assert grid[1, 1] == 1
assert grid[2, 1] == "two"
def test_DiagramGrid():
# Set up some objects and morphisms.
A = Object("A")
B = Object("B")
C = Object("C")
D = Object("D")
E = Object("E")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
h = NamedMorphism(D, A, "h")
k = NamedMorphism(D, B, "k")
# A one-morphism diagram.
d = Diagram([f])
grid = DiagramGrid(d)
assert grid.width == 2
assert grid.height == 1
assert grid[0, 0] == A
assert grid[0, 1] == B
assert grid.morphisms == {f: FiniteSet()}
# A triangle.
d = Diagram([f, g], {g * f: "unique"})
grid = DiagramGrid(d)
assert grid.width == 2
assert grid.height == 2
assert grid[0, 0] == A
assert grid[0, 1] == B
assert grid[1, 0] == C
assert grid[1, 1] is None
assert grid.morphisms == {f: FiniteSet(), g: FiniteSet(),
g * f: FiniteSet("unique")}
# A triangle with a "loop" morphism.
l_A = NamedMorphism(A, A, "l_A")
d = Diagram([f, g, l_A])
grid = DiagramGrid(d)
assert grid.width == 2
assert grid.height == 2
assert grid[0, 0] == A
assert grid[0, 1] == B
assert grid[1, 0] is None
assert grid[1, 1] == C
assert grid.morphisms == {f: FiniteSet(), g: FiniteSet(), l_A: FiniteSet()}
# A simple diagram.
d = Diagram([f, g, h, k])
grid = DiagramGrid(d)
assert grid.width == 3
assert grid.height == 2
assert grid[0, 0] == A
assert grid[0, 1] == B
assert grid[0, 2] == D
assert grid[1, 0] is None
assert grid[1, 1] == C
assert grid[1, 2] is None
assert grid.morphisms == {f: FiniteSet(), g: FiniteSet(), h: FiniteSet(),
k: FiniteSet()}
assert str(grid) == '[[Object("A"), Object("B"), Object("D")], ' \
'[None, Object("C"), None]]'
# A chain of morphisms.
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
h = NamedMorphism(C, D, "h")
k = NamedMorphism(D, E, "k")
d = Diagram([f, g, h, k])
grid = DiagramGrid(d)
assert grid.width == 3
assert grid.height == 3
assert grid[0, 0] == A
assert grid[0, 1] == B
assert grid[0, 2] is None
assert grid[1, 0] is None
assert grid[1, 1] == C
assert grid[1, 2] == D
assert grid[2, 0] is None
assert grid[2, 1] is None
assert grid[2, 2] == E
assert grid.morphisms == {f: FiniteSet(), g: FiniteSet(), h: FiniteSet(),
k: FiniteSet()}
# A square.
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, D, "g")
h = NamedMorphism(A, C, "h")
k = NamedMorphism(C, D, "k")
d = Diagram([f, g, h, k])
grid = DiagramGrid(d)
assert grid.width == 2
assert grid.height == 2
assert grid[0, 0] == A
assert grid[0, 1] == B
assert grid[1, 0] == C
assert grid[1, 1] == D
assert grid.morphisms == {f: FiniteSet(), g: FiniteSet(), h: FiniteSet(),
k: FiniteSet()}
# A strange diagram which resulted from a typo when creating a
# test for five lemma, but which allowed to stop one extra problem
# in the algorithm.
A = Object("A")
B = Object("B")
C = Object("C")
D = Object("D")
E = Object("E")
A_ = Object("A'")
B_ = Object("B'")
C_ = Object("C'")
D_ = Object("D'")
E_ = Object("E'")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
h = NamedMorphism(C, D, "h")
i = NamedMorphism(D, E, "i")
# These 4 morphisms should be between primed objects.
j = NamedMorphism(A, B, "j")
k = NamedMorphism(B, C, "k")
l = NamedMorphism(C, D, "l")
m = NamedMorphism(D, E, "m")
o = NamedMorphism(A, A_, "o")
p = NamedMorphism(B, B_, "p")
q = NamedMorphism(C, C_, "q")
r = NamedMorphism(D, D_, "r")
s = NamedMorphism(E, E_, "s")
d = Diagram([f, g, h, i, j, k, l, m, o, p, q, r, s])
grid = DiagramGrid(d)
assert grid.width == 3
assert grid.height == 4
assert grid[0, 0] is None
assert grid[0, 1] == A
assert grid[0, 2] == A_
assert grid[1, 0] == C
assert grid[1, 1] == B
assert grid[1, 2] == B_
assert grid[2, 0] == C_
assert grid[2, 1] == D
assert grid[2, 2] == D_
assert grid[3, 0] is None
assert grid[3, 1] == E
assert grid[3, 2] == E_
morphisms = {}
for m in [f, g, h, i, j, k, l, m, o, p, q, r, s]:
morphisms[m] = FiniteSet()
assert grid.morphisms == morphisms
# A cube.
A1 = Object("A1")
A2 = Object("A2")
A3 = Object("A3")
A4 = Object("A4")
A5 = Object("A5")
A6 = Object("A6")
A7 = Object("A7")
A8 = Object("A8")
# The top face of the cube.
f1 = NamedMorphism(A1, A2, "f1")
f2 = NamedMorphism(A1, A3, "f2")
f3 = NamedMorphism(A2, A4, "f3")
f4 = NamedMorphism(A3, A4, "f3")
# The bottom face of the cube.
f5 = NamedMorphism(A5, A6, "f5")
f6 = NamedMorphism(A5, A7, "f6")
f7 = NamedMorphism(A6, A8, "f7")
f8 = NamedMorphism(A7, A8, "f8")
# The remaining morphisms.
f9 = NamedMorphism(A1, A5, "f9")
f10 = NamedMorphism(A2, A6, "f10")
f11 = NamedMorphism(A3, A7, "f11")
f12 = NamedMorphism(A4, A8, "f11")
d = Diagram([f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12])
grid = DiagramGrid(d)
assert grid.width == 4
assert grid.height == 3
assert grid[0, 0] is None
assert grid[0, 1] == A5
assert grid[0, 2] == A6
assert grid[0, 3] is None
assert grid[1, 0] is None
assert grid[1, 1] == A1
assert grid[1, 2] == A2
assert grid[1, 3] is None
assert grid[2, 0] == A7
assert grid[2, 1] == A3
assert grid[2, 2] == A4
assert grid[2, 3] == A8
morphisms = {}
for m in [f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12]:
morphisms[m] = FiniteSet()
assert grid.morphisms == morphisms
# A line diagram.
A = Object("A")
B = Object("B")
C = Object("C")
D = Object("D")
E = Object("E")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
h = NamedMorphism(C, D, "h")
i = NamedMorphism(D, E, "i")
d = Diagram([f, g, h, i])
grid = DiagramGrid(d, layout="sequential")
assert grid.width == 5
assert grid.height == 1
assert grid[0, 0] == A
assert grid[0, 1] == B
assert grid[0, 2] == C
assert grid[0, 3] == D
assert grid[0, 4] == E
assert grid.morphisms == {f: FiniteSet(), g: FiniteSet(), h: FiniteSet(),
i: FiniteSet()}
# Test the transposed version.
grid = DiagramGrid(d, layout="sequential", transpose=True)
assert grid.width == 1
assert grid.height == 5
assert grid[0, 0] == A
assert grid[1, 0] == B
assert grid[2, 0] == C
assert grid[3, 0] == D
assert grid[4, 0] == E
assert grid.morphisms == {f: FiniteSet(), g: FiniteSet(), h: FiniteSet(),
i: FiniteSet()}
# A pullback.
m1 = NamedMorphism(A, B, "m1")
m2 = NamedMorphism(A, C, "m2")
s1 = NamedMorphism(B, D, "s1")
s2 = NamedMorphism(C, D, "s2")
f1 = NamedMorphism(E, B, "f1")
f2 = NamedMorphism(E, C, "f2")
g = NamedMorphism(E, A, "g")
d = Diagram([m1, m2, s1, s2, f1, f2], {g: "unique"})
grid = DiagramGrid(d)
assert grid.width == 3
assert grid.height == 2
assert grid[0, 0] == A
assert grid[0, 1] == B
assert grid[0, 2] == E
assert grid[1, 0] == C
assert grid[1, 1] == D
assert grid[1, 2] is None
morphisms = {g: FiniteSet("unique")}
for m in [m1, m2, s1, s2, f1, f2]:
morphisms[m] = FiniteSet()
assert grid.morphisms == morphisms
# Test the pullback with sequential layout, just for stress
# testing.
grid = DiagramGrid(d, layout="sequential")
assert grid.width == 5
assert grid.height == 1
assert grid[0, 0] == D
assert grid[0, 1] == B
assert grid[0, 2] == A
assert grid[0, 3] == C
assert grid[0, 4] == E
assert grid.morphisms == morphisms
# Test a pullback with object grouping.
grid = DiagramGrid(d, groups=FiniteSet(E, FiniteSet(A, B, C, D)))
assert grid.width == 3
assert grid.height == 2
assert grid[0, 0] == E
assert grid[0, 1] == A
assert grid[0, 2] == B
assert grid[1, 0] is None
assert grid[1, 1] == C
assert grid[1, 2] == D
assert grid.morphisms == morphisms
# Five lemma, actually.
A = Object("A")
B = Object("B")
C = Object("C")
D = Object("D")
E = Object("E")
A_ = Object("A'")
B_ = Object("B'")
C_ = Object("C'")
D_ = Object("D'")
E_ = Object("E'")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
h = NamedMorphism(C, D, "h")
i = NamedMorphism(D, E, "i")
j = NamedMorphism(A_, B_, "j")
k = NamedMorphism(B_, C_, "k")
l = NamedMorphism(C_, D_, "l")
m = NamedMorphism(D_, E_, "m")
o = NamedMorphism(A, A_, "o")
p = NamedMorphism(B, B_, "p")
q = NamedMorphism(C, C_, "q")
r = NamedMorphism(D, D_, "r")
s = NamedMorphism(E, E_, "s")
d = Diagram([f, g, h, i, j, k, l, m, o, p, q, r, s])
grid = DiagramGrid(d)
assert grid.width == 5
assert grid.height == 3
assert grid[0, 0] is None
assert grid[0, 1] == A
assert grid[0, 2] == A_
assert grid[0, 3] is None
assert grid[0, 4] is None
assert grid[1, 0] == C
assert grid[1, 1] == B
assert grid[1, 2] == B_
assert grid[1, 3] == C_
assert grid[1, 4] is None
assert grid[2, 0] == D
assert grid[2, 1] == E
assert grid[2, 2] is None
assert grid[2, 3] == D_
assert grid[2, 4] == E_
morphisms = {}
for m in [f, g, h, i, j, k, l, m, o, p, q, r, s]:
morphisms[m] = FiniteSet()
assert grid.morphisms == morphisms
# Test the five lemma with object grouping.
grid = DiagramGrid(d, FiniteSet(
FiniteSet(A, B, C, D, E), FiniteSet(A_, B_, C_, D_, E_)))
assert grid.width == 6
assert grid.height == 3
assert grid[0, 0] == A
assert grid[0, 1] == B
assert grid[0, 2] is None
assert grid[0, 3] == A_
assert grid[0, 4] == B_
assert grid[0, 5] is None
assert grid[1, 0] is None
assert grid[1, 1] == C
assert grid[1, 2] == D
assert grid[1, 3] is None
assert grid[1, 4] == C_
assert grid[1, 5] == D_
assert grid[2, 0] is None
assert grid[2, 1] is None
assert grid[2, 2] == E
assert grid[2, 3] is None
assert grid[2, 4] is None
assert grid[2, 5] == E_
assert grid.morphisms == morphisms
# Test the five lemma with object grouping, but mixing containers
# to represent groups.
grid = DiagramGrid(d, [(A, B, C, D, E), {A_, B_, C_, D_, E_}])
assert grid.width == 6
assert grid.height == 3
assert grid[0, 0] == A
assert grid[0, 1] == B
assert grid[0, 2] is None
assert grid[0, 3] == A_
assert grid[0, 4] == B_
assert grid[0, 5] is None
assert grid[1, 0] is None
assert grid[1, 1] == C
assert grid[1, 2] == D
assert grid[1, 3] is None
assert grid[1, 4] == C_
assert grid[1, 5] == D_
assert grid[2, 0] is None
assert grid[2, 1] is None
assert grid[2, 2] == E
assert grid[2, 3] is None
assert grid[2, 4] is None
assert grid[2, 5] == E_
assert grid.morphisms == morphisms
# Test the five lemma with object grouping and hints.
grid = DiagramGrid(d, {
FiniteSet(A, B, C, D, E): {"layout": "sequential",
"transpose": True},
FiniteSet(A_, B_, C_, D_, E_): {"layout": "sequential",
"transpose": True}},
transpose=True)
assert grid.width == 5
assert grid.height == 2
assert grid[0, 0] == A
assert grid[0, 1] == B
assert grid[0, 2] == C
assert grid[0, 3] == D
assert grid[0, 4] == E
assert grid[1, 0] == A_
assert grid[1, 1] == B_
assert grid[1, 2] == C_
assert grid[1, 3] == D_
assert grid[1, 4] == E_
assert grid.morphisms == morphisms
# A two-triangle disconnected diagram.
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
f_ = NamedMorphism(A_, B_, "f")
g_ = NamedMorphism(B_, C_, "g")
d = Diagram([f, g, f_, g_], {g * f: "unique", g_ * f_: "unique"})
grid = DiagramGrid(d)
assert grid.width == 4
assert grid.height == 2
assert grid[0, 0] == A
assert grid[0, 1] == B
assert grid[0, 2] == A_
assert grid[0, 3] == B_
assert grid[1, 0] == C
assert grid[1, 1] is None
assert grid[1, 2] == C_
assert grid[1, 3] is None
assert grid.morphisms == {f: FiniteSet(), g: FiniteSet(), f_: FiniteSet(),
g_: FiniteSet(), g * f: FiniteSet("unique"),
g_ * f_: FiniteSet("unique")}
# A two-morphism disconnected diagram.
f = NamedMorphism(A, B, "f")
g = NamedMorphism(C, D, "g")
d = Diagram([f, g])
grid = DiagramGrid(d)
assert grid.width == 4
assert grid.height == 1
assert grid[0, 0] == A
assert grid[0, 1] == B
assert grid[0, 2] == C
assert grid[0, 3] == D
assert grid.morphisms == {f: FiniteSet(), g: FiniteSet()}
# Test a one-object diagram.
f = NamedMorphism(A, A, "f")
d = Diagram([f])
grid = DiagramGrid(d)
assert grid.width == 1
assert grid.height == 1
assert grid[0, 0] == A
# Test a two-object disconnected diagram.
g = NamedMorphism(B, B, "g")
d = Diagram([f, g])
grid = DiagramGrid(d)
assert grid.width == 2
assert grid.height == 1
assert grid[0, 0] == A
assert grid[0, 1] == B
# Test a diagram in which even growing a pseudopod does not
# eventually help.
F = Object("F")
f1 = NamedMorphism(A, B, "f1")
f2 = NamedMorphism(A, C, "f2")
f3 = NamedMorphism(A, D, "f3")
f4 = NamedMorphism(A, E, "f4")
f5 = NamedMorphism(A, A_, "f5")
f6 = NamedMorphism(A, B_, "f6")
f7 = NamedMorphism(A, C_, "f7")
f8 = NamedMorphism(A, D_, "f8")
f9 = NamedMorphism(A, E_, "f9")
f10 = NamedMorphism(A, F, "f10")
d = Diagram([f1, f2, f3, f4, f5, f6, f7, f8, f9, f10])
grid = DiagramGrid(d)
assert grid.width == 5
assert grid.height == 3
assert grid[0, 0] == E
assert grid[0, 1] == C
assert grid[0, 2] == C_
assert grid[0, 3] == E_
assert grid[0, 4] == F
assert grid[1, 0] == D
assert grid[1, 1] == A
assert grid[1, 2] == A_
assert grid[1, 3] is None
assert grid[1, 4] is None
assert grid[2, 0] == D_
assert grid[2, 1] == B
assert grid[2, 2] == B_
assert grid[2, 3] is None
assert grid[2, 4] is None
morphisms = {}
for f in [f1, f2, f3, f4, f5, f6, f7, f8, f9, f10]:
morphisms[f] = FiniteSet()
assert grid.morphisms == morphisms
def test_ArrowStringDescription():
astr = ArrowStringDescription("cm", "", None, "", "", "d", "r", "_", "f")
assert str(astr) == "\\ar[dr]_{f}"
astr = ArrowStringDescription("cm", "", 12, "", "", "d", "r", "_", "f")
assert str(astr) == "\\ar[dr]_{f}"
astr = ArrowStringDescription("cm", "^", 12, "", "", "d", "r", "_", "f")
assert str(astr) == "\\ar@/^12cm/[dr]_{f}"
astr = ArrowStringDescription("cm", "", 12, "r", "", "d", "r", "_", "f")
assert str(astr) == "\\ar[dr]_{f}"
astr = ArrowStringDescription("cm", "", 12, "r", "u", "d", "r", "_", "f")
assert str(astr) == "\\ar@(r,u)[dr]_{f}"
astr = ArrowStringDescription("cm", "", 12, "r", "u", "d", "r", "_", "f")
assert str(astr) == "\\ar@(r,u)[dr]_{f}"
astr = ArrowStringDescription("cm", "", 12, "r", "u", "d", "r", "_", "f")
astr.arrow_style = "{-->}"
assert str(astr) == "\\ar@(r,u)@{-->}[dr]_{f}"
astr = ArrowStringDescription("cm", "_", 12, "", "", "d", "r", "_", "f")
astr.arrow_style = "{-->}"
assert str(astr) == "\\ar@/_12cm/@{-->}[dr]_{f}"
def test_XypicDiagramDrawer_line():
# A linear diagram.
A = Object("A")
B = Object("B")
C = Object("C")
D = Object("D")
E = Object("E")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
h = NamedMorphism(C, D, "h")
i = NamedMorphism(D, E, "i")
d = Diagram([f, g, h, i])
grid = DiagramGrid(d, layout="sequential")
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"A \\ar[r]^{f} & B \\ar[r]^{g} & C \\ar[r]^{h} & D \\ar[r]^{i} & E \n" \
"}\n"
# The same diagram, transposed.
grid = DiagramGrid(d, layout="sequential", transpose=True)
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"A \\ar[d]^{f} \\\\\n" \
"B \\ar[d]^{g} \\\\\n" \
"C \\ar[d]^{h} \\\\\n" \
"D \\ar[d]^{i} \\\\\n" \
"E \n" \
"}\n"
def test_XypicDiagramDrawer_triangle():
# A triangle diagram.
A = Object("A")
B = Object("B")
C = Object("C")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
d = Diagram([f, g], {g * f: "unique"})
grid = DiagramGrid(d)
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"A \\ar[d]_{g\\circ f} \\ar[r]^{f} & B \\ar[ld]^{g} \\\\\n" \
"C & \n" \
"}\n"
# The same diagram, transposed.
grid = DiagramGrid(d, transpose=True)
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"A \\ar[r]^{g\\circ f} \\ar[d]_{f} & C \\\\\n" \
"B \\ar[ru]_{g} & \n" \
"}\n"
# The same diagram, with a masked morphism.
assert drawer.draw(d, grid, masked=[g]) == "\\xymatrix{\n" \
"A \\ar[r]^{g\\circ f} \\ar[d]_{f} & C \\\\\n" \
"B & \n" \
"}\n"
# The same diagram with a formatter for "unique".
def formatter(astr):
astr.label = "\\exists !" + astr.label
astr.arrow_style = "{-->}"
drawer.arrow_formatters["unique"] = formatter
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"A \\ar@{-->}[r]^{\\exists !g\\circ f} \\ar[d]_{f} & C \\\\\n" \
"B \\ar[ru]_{g} & \n" \
"}\n"
# The same diagram with a default formatter.
def default_formatter(astr):
astr.label_displacement = "(0.45)"
drawer.default_arrow_formatter = default_formatter
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"A \\ar@{-->}[r]^(0.45){\\exists !g\\circ f} \\ar[d]_(0.45){f} & C \\\\\n" \
"B \\ar[ru]_(0.45){g} & \n" \
"}\n"
# A triangle diagram with a lot of morphisms between the same
# objects.
f1 = NamedMorphism(B, A, "f1")
f2 = NamedMorphism(A, B, "f2")
g1 = NamedMorphism(C, B, "g1")
g2 = NamedMorphism(B, C, "g2")
d = Diagram([f, f1, f2, g, g1, g2], {f1 * g1: "unique", g2 * f2: "unique"})
grid = DiagramGrid(d, transpose=True)
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid, masked=[f1*g1*g2*f2, g2*f2*f1*g1]) == \
"\\xymatrix{\n" \
"A \\ar[r]^{g_{2}\\circ f_{2}} \\ar[d]_{f} \\ar@/^3mm/[d]^{f_{2}} " \
"& C \\ar@/^3mm/[l]^{f_{1}\\circ g_{1}} \\ar@/^3mm/[ld]^{g_{1}} \\\\\n" \
"B \\ar@/^3mm/[u]^{f_{1}} \\ar[ru]_{g} \\ar@/^3mm/[ru]^{g_{2}} & \n" \
"}\n"
def test_XypicDiagramDrawer_cube():
# A cube diagram.
A1 = Object("A1")
A2 = Object("A2")
A3 = Object("A3")
A4 = Object("A4")
A5 = Object("A5")
A6 = Object("A6")
A7 = Object("A7")
A8 = Object("A8")
# The top face of the cube.
f1 = NamedMorphism(A1, A2, "f1")
f2 = NamedMorphism(A1, A3, "f2")
f3 = NamedMorphism(A2, A4, "f3")
f4 = NamedMorphism(A3, A4, "f3")
# The bottom face of the cube.
f5 = NamedMorphism(A5, A6, "f5")
f6 = NamedMorphism(A5, A7, "f6")
f7 = NamedMorphism(A6, A8, "f7")
f8 = NamedMorphism(A7, A8, "f8")
# The remaining morphisms.
f9 = NamedMorphism(A1, A5, "f9")
f10 = NamedMorphism(A2, A6, "f10")
f11 = NamedMorphism(A3, A7, "f11")
f12 = NamedMorphism(A4, A8, "f11")
d = Diagram([f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12])
grid = DiagramGrid(d)
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"& A_{5} \\ar[r]^{f_{5}} \\ar[ldd]_{f_{6}} & A_{6} \\ar[rdd]^{f_{7}} " \
"& \\\\\n" \
"& A_{1} \\ar[r]^{f_{1}} \\ar[d]^{f_{2}} \\ar[u]^{f_{9}} & A_{2} " \
"\\ar[d]^{f_{3}} \\ar[u]_{f_{10}} & \\\\\n" \
"A_{7} \\ar@/_3mm/[rrr]_{f_{8}} & A_{3} \\ar[r]^{f_{3}} \\ar[l]_{f_{11}} " \
"& A_{4} \\ar[r]^{f_{11}} & A_{8} \n" \
"}\n"
# The same diagram, transposed.
grid = DiagramGrid(d, transpose=True)
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"& & A_{7} \\ar@/^3mm/[ddd]^{f_{8}} \\\\\n" \
"A_{5} \\ar[d]_{f_{5}} \\ar[rru]^{f_{6}} & A_{1} \\ar[d]^{f_{1}} " \
"\\ar[r]^{f_{2}} \\ar[l]^{f_{9}} & A_{3} \\ar[d]_{f_{3}} " \
"\\ar[u]^{f_{11}} \\\\\n" \
"A_{6} \\ar[rrd]_{f_{7}} & A_{2} \\ar[r]^{f_{3}} \\ar[l]^{f_{10}} " \
"& A_{4} \\ar[d]_{f_{11}} \\\\\n" \
"& & A_{8} \n" \
"}\n"
def test_XypicDiagramDrawer_curved_and_loops():
# A simple diagram, with a curved arrow.
A = Object("A")
B = Object("B")
C = Object("C")
D = Object("D")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
h = NamedMorphism(D, A, "h")
k = NamedMorphism(D, B, "k")
d = Diagram([f, g, h, k])
grid = DiagramGrid(d)
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"A \\ar[r]_{f} & B \\ar[d]^{g} & D \\ar[l]^{k} \\ar@/_3mm/[ll]_{h} \\\\\n" \
"& C & \n" \
"}\n"
# The same diagram, transposed.
grid = DiagramGrid(d, transpose=True)
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"A \\ar[d]^{f} & \\\\\n" \
"B \\ar[r]^{g} & C \\\\\n" \
"D \\ar[u]_{k} \\ar@/^3mm/[uu]^{h} & \n" \
"}\n"
# The same diagram, larger and rotated.
assert drawer.draw(d, grid, diagram_format="@+1cm@dr") == \
"\\xymatrix@+1cm@dr{\n" \
"A \\ar[d]^{f} & \\\\\n" \
"B \\ar[r]^{g} & C \\\\\n" \
"D \\ar[u]_{k} \\ar@/^3mm/[uu]^{h} & \n" \
"}\n"
# A simple diagram with three curved arrows.
h1 = NamedMorphism(D, A, "h1")
h2 = NamedMorphism(A, D, "h2")
k = NamedMorphism(D, B, "k")
d = Diagram([f, g, h, k, h1, h2])
grid = DiagramGrid(d)
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"A \\ar[r]_{f} \\ar@/^3mm/[rr]^{h_{2}} & B \\ar[d]^{g} & D \\ar[l]^{k} " \
"\\ar@/_7mm/[ll]_{h} \\ar@/_11mm/[ll]_{h_{1}} \\\\\n" \
"& C & \n" \
"}\n"
# The same diagram, transposed.
grid = DiagramGrid(d, transpose=True)
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"A \\ar[d]^{f} \\ar@/_3mm/[dd]_{h_{2}} & \\\\\n" \
"B \\ar[r]^{g} & C \\\\\n" \
"D \\ar[u]_{k} \\ar@/^7mm/[uu]^{h} \\ar@/^11mm/[uu]^{h_{1}} & \n" \
"}\n"
# The same diagram, with "loop" morphisms.
l_A = NamedMorphism(A, A, "l_A")
l_D = NamedMorphism(D, D, "l_D")
l_C = NamedMorphism(C, C, "l_C")
d = Diagram([f, g, h, k, h1, h2, l_A, l_D, l_C])
grid = DiagramGrid(d)
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"A \\ar[r]_{f} \\ar@/^3mm/[rr]^{h_{2}} \\ar@(u,l)[]^{l_{A}} " \
"& B \\ar[d]^{g} & D \\ar[l]^{k} \\ar@/_7mm/[ll]_{h} " \
"\\ar@/_11mm/[ll]_{h_{1}} \\ar@(r,u)[]^{l_{D}} \\\\\n" \
"& C \\ar@(l,d)[]^{l_{C}} & \n" \
"}\n"
# The same diagram with "loop" morphisms, transposed.
grid = DiagramGrid(d, transpose=True)
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"A \\ar[d]^{f} \\ar@/_3mm/[dd]_{h_{2}} \\ar@(r,u)[]^{l_{A}} & \\\\\n" \
"B \\ar[r]^{g} & C \\ar@(r,u)[]^{l_{C}} \\\\\n" \
"D \\ar[u]_{k} \\ar@/^7mm/[uu]^{h} \\ar@/^11mm/[uu]^{h_{1}} " \
"\\ar@(l,d)[]^{l_{D}} & \n" \
"}\n"
# The same diagram with two "loop" morphisms per object.
l_A_ = NamedMorphism(A, A, "n_A")
l_D_ = NamedMorphism(D, D, "n_D")
l_C_ = NamedMorphism(C, C, "n_C")
d = Diagram([f, g, h, k, h1, h2, l_A, l_D, l_C, l_A_, l_D_, l_C_])
grid = DiagramGrid(d)
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"A \\ar[r]_{f} \\ar@/^3mm/[rr]^{h_{2}} \\ar@(u,l)[]^{l_{A}} " \
"\\ar@/^3mm/@(l,d)[]^{n_{A}} & B \\ar[d]^{g} & D \\ar[l]^{k} " \
"\\ar@/_7mm/[ll]_{h} \\ar@/_11mm/[ll]_{h_{1}} \\ar@(r,u)[]^{l_{D}} " \
"\\ar@/^3mm/@(d,r)[]^{n_{D}} \\\\\n" \
"& C \\ar@(l,d)[]^{l_{C}} \\ar@/^3mm/@(d,r)[]^{n_{C}} & \n" \
"}\n"
# The same diagram with two "loop" morphisms per object, transposed.
grid = DiagramGrid(d, transpose=True)
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == "\\xymatrix{\n" \
"A \\ar[d]^{f} \\ar@/_3mm/[dd]_{h_{2}} \\ar@(r,u)[]^{l_{A}} " \
"\\ar@/^3mm/@(u,l)[]^{n_{A}} & \\\\\n" \
"B \\ar[r]^{g} & C \\ar@(r,u)[]^{l_{C}} \\ar@/^3mm/@(d,r)[]^{n_{C}} \\\\\n" \
"D \\ar[u]_{k} \\ar@/^7mm/[uu]^{h} \\ar@/^11mm/[uu]^{h_{1}} " \
"\\ar@(l,d)[]^{l_{D}} \\ar@/^3mm/@(d,r)[]^{n_{D}} & \n" \
"}\n"
def test_xypic_draw_diagram():
# A linear diagram.
A = Object("A")
B = Object("B")
C = Object("C")
D = Object("D")
E = Object("E")
f = NamedMorphism(A, B, "f")
g = NamedMorphism(B, C, "g")
h = NamedMorphism(C, D, "h")
i = NamedMorphism(D, E, "i")
d = Diagram([f, g, h, i])
grid = DiagramGrid(d, layout="sequential")
drawer = XypicDiagramDrawer()
assert drawer.draw(d, grid) == xypic_draw_diagram(d, layout="sequential")
|
|
# python 2to3
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
# system
import zlib
import hashlib
import logging
import os
import urllib3
import petl
from datetime import datetime, timedelta
# sqlalchemy
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import ForeignKey
# superset
from superset import cache
# local
from bit.models import Connector
from bit.utils.conversions import sqla_python_types
from .. settings import CONNECTOR_INFO
class AppsFlyerConnector(Connector):
__tablename__ = 'bit_{}_connector'.format(CONNECTOR_INFO.get('key'))
# ForeignKey to Connector (Parent)
id = Column(Integer, ForeignKey('bit_connectors.id'), primary_key=True)
__mapper_args__ = {
'polymorphic_identity': CONNECTOR_INFO.get('key')
}
app_id = Column(String(255))
api_token = Column(String(255))
url_pat = Column(String(255))
# no db fields/methods
data_sources = CONNECTOR_INFO.get('reports', {})
fields_types = CONNECTOR_INFO.get('fields_types', {})
replace_values = CONNECTOR_INFO.get('replace_values', {})
replace_in_values = CONNECTOR_INFO.get('replace_in_values', {})
def connector_name(self):
""" String: connector name. """
return CONNECTOR_INFO.get('name', '')
def connector_description(self):
""" String: connector description. """
return CONNECTOR_INFO.get('description', '')
def connector_logo(self):
""" String: connector name. """
logo = '{}/{}/logo.png'.format(
CONNECTOR_INFO.get('static_folder', ''),
CONNECTOR_INFO.get('key', '')
)
return CONNECTOR_INFO.get('logo_pat', '').format(logo)
def connector_info(self):
# logging.info('{} info'.format(self.connector_name))
""" String: connector info. """
# change url
html = '<h4><a href="/appsflyerconnectorview/list/">{name}</a></h4>' \
'{logo}' \
'<p>{description}</p>'.format(
name=self.connector_name(),
logo=self.connector_logo(),
description=self.connector_description(),
)
return html
def get_list_data_sources(self):
return self.data_sources
def admin_data_sources(self):
""" List: data_sources(Reports) """
reports = self.data_sources
ds = [reports.get(report).get('name') for report in reports]
html = '<p style="width:250px;">{}</p>'.format(
'<br/>'.join(sorted(ds))
)
return html
# sync
report_folder = 'reports/{}'.format(CONNECTOR_INFO.get('key'))
report_filename_pat = report_folder + '/{hash}.csv'
data = {}
report = ''
from_date = ''
to_date = ''
def web_test(self):
logging.info('web_test[{}]'.format(CONNECTOR_INFO.get('key')))
return True
def get_report_urls(self, report='', from_date='', to_date=''):
# String: url by report name and dates
if not (report or from_date or to_date):
return False
urls = []
if self.app_id:
app_ids = self.app_id.split(',')
for app_id in app_ids:
if app_id:
urls.append(
self.url_pat.format(
app_id=app_id,
api_token=self.api_token,
report=report,
from_date=from_date,
to_date=to_date
)
)
return urls
def get_report_filename(self, hash=''):
# String: get_report_filename by report name and dates
if not hash:
return False
return self.report_filename_pat.format(hash=hash)
def get_columns(self, report='', from_date='', to_date=''):
self.report = report
if from_date:
self.from_date = from_date
else:
self.from_date = (datetime.utcnow() - timedelta(
days=1
)).date().isoformat()
if to_date:
self.to_date = to_date
else:
self.to_date = (datetime.utcnow()).date().isoformat()
# one day
self.to_date = self.from_date
if not (report or from_date or to_date):
return False
columns = []
self.get_data(self.report, self.from_date, self.to_date)
if self.data:
columns = []
for col in self.data[0]:
columns.append(
{
'name': col,
'type': self.fields_types.get(col, 'String'),
}
)
return columns
def download(self, urls=[]):
# timeout setting for requests
# timeout = urllib3.Timeout(connect=2.0, read=7.0)
# http = urllib3.PoolManager(timeout=timeout)
http = urllib3.PoolManager()
report_data = []
for url in urls:
# print(url)
report_filename = self.get_report_filename(
hashlib.md5(url).hexdigest())
if cache:
# print('use cache')
cache_key = url
cache_timeout = CONNECTOR_INFO.get(
'report_cache_timeout', 60 * 60
)
z_report = cache.get(cache_key)
if z_report is not None:
new_report_data = petl.io.fromcsv(petl.MemorySource(
zlib.decompress(z_report)
))
# print(len(new_report_data))
if not report_data:
# print('NEw cat')
report_data = new_report_data
else:
report_data = petl.cat(
report_data,
new_report_data
)
continue
logging.info('Download Report from {}'.format(url))
r = http.request(
'GET',
url,
retries=urllib3.Retry(
redirect=2,
backoff_factor=2,
)
)
if r.status == 200:
report = r.data
r.release_conn()
z_report = zlib.compress(report)
cache.set(cache_key, z_report, timeout=cache_timeout)
# return petl.io.fromcsv(petl.MemorySource(report))
new_report_data = petl.io.fromcsv(
petl.MemorySource(report)
)
# print(len(new_report_data))
if not report_data:
report_data = new_report_data
else:
report_data = petl.cat(
report_data,
new_report_data
)
elif r.status == 403:
raise Exception(r.data)
else:
logging.info(r.data)
logging.info(r.status)
logging.info(r.headers)
else:
# move to init
# print('Not cache')
if not os.path.exists(self.report_folder):
os.makedirs(self.report_folder)
if not os.path.exists(report_filename):
logging.info('Download Report from {}'.format(url))
r = http.request(
'GET',
url,
retries=urllib3.Retry(
redirect=2,
backoff_factor=2,
)
)
if r.status == 200:
with open(report_filename, 'wb') as f:
f.write(r.data)
r.release_conn()
logging.info('Read from {}'.format(report_filename))
new_report_data = petl.io.fromcsv(report_filename)
if not report_data:
report_data = new_report_data
else:
report_data = petl.cat(
report_data,
new_report_data
)
return report_data
def get_data(self, report='', from_date='', to_date=''):
if not (report or from_date or to_date):
return False
self.report = report
self.from_date = from_date
self.to_date = to_date
report_urls = self.get_report_urls(report, from_date, to_date)
if not report_urls:
return False
raw_data = self.download(report_urls)
if not raw_data:
self.data = []
return False
self.data = raw_data
# print(len(self.data))
if len(self.replace_values):
for field in self.replace_values:
if len(self.replace_values[field]):
try:
self.data = petl.convert(
self.data, field, self.replace_values[field]
)
except Exception as e:
# no field exist
logging.exception('No {} field exist'.format(
field
))
pass
if len(self.replace_in_values):
for field in self.replace_in_values:
if len(self.replace_in_values[field]):
try:
self.data = petl.convert(
self.data,
field,
'replace',
self.replace_in_values[field][0],
self.replace_in_values[field][1]
)
except Exception as e:
# no field exist
logging.exception('No {} field exist'.format(
field
))
pass
if len(self.fields_types):
converts = {}
for col in self.data[0]:
converts.update({
col: sqla_python_types.get(
self.fields_types.get(col, 'String'),
str
),
})
self.data = petl.convert(self.data, converts)
|
|
"""
===========
Basic Units
===========
"""
import six
import math
import numpy as np
import matplotlib.units as units
import matplotlib.ticker as ticker
from matplotlib.axes import Axes
from matplotlib.cbook import iterable
class ProxyDelegate(object):
def __init__(self, fn_name, proxy_type):
self.proxy_type = proxy_type
self.fn_name = fn_name
def __get__(self, obj, objtype=None):
return self.proxy_type(self.fn_name, obj)
class TaggedValueMeta(type):
def __init__(cls, name, bases, dict):
for fn_name in cls._proxies:
try:
dummy = getattr(cls, fn_name)
except AttributeError:
setattr(cls, fn_name,
ProxyDelegate(fn_name, cls._proxies[fn_name]))
class PassThroughProxy(object):
def __init__(self, fn_name, obj):
self.fn_name = fn_name
self.target = obj.proxy_target
def __call__(self, *args):
fn = getattr(self.target, self.fn_name)
ret = fn(*args)
return ret
class ConvertArgsProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
converted_args = []
for a in args:
try:
converted_args.append(a.convert_to(self.unit))
except AttributeError:
converted_args.append(TaggedValue(a, self.unit))
converted_args = tuple([c.get_value() for c in converted_args])
return PassThroughProxy.__call__(self, *converted_args)
class ConvertReturnProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
ret = PassThroughProxy.__call__(self, *args)
return (NotImplemented if ret is NotImplemented
else TaggedValue(ret, self.unit))
class ConvertAllProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
converted_args = []
arg_units = [self.unit]
for a in args:
if hasattr(a, 'get_unit') and not hasattr(a, 'convert_to'):
# if this arg has a unit type but no conversion ability,
# this operation is prohibited
return NotImplemented
if hasattr(a, 'convert_to'):
try:
a = a.convert_to(self.unit)
except:
pass
arg_units.append(a.get_unit())
converted_args.append(a.get_value())
else:
converted_args.append(a)
if hasattr(a, 'get_unit'):
arg_units.append(a.get_unit())
else:
arg_units.append(None)
converted_args = tuple(converted_args)
ret = PassThroughProxy.__call__(self, *converted_args)
if ret is NotImplemented:
return NotImplemented
ret_unit = unit_resolver(self.fn_name, arg_units)
if ret_unit is NotImplemented:
return NotImplemented
return TaggedValue(ret, ret_unit)
class TaggedValue(six.with_metaclass(TaggedValueMeta)):
_proxies = {'__add__': ConvertAllProxy,
'__sub__': ConvertAllProxy,
'__mul__': ConvertAllProxy,
'__rmul__': ConvertAllProxy,
'__cmp__': ConvertAllProxy,
'__lt__': ConvertAllProxy,
'__gt__': ConvertAllProxy,
'__len__': PassThroughProxy}
def __new__(cls, value, unit):
# generate a new subclass for value
value_class = type(value)
try:
subcls = type('TaggedValue_of_%s' % (value_class.__name__),
tuple([cls, value_class]),
{})
if subcls not in units.registry:
units.registry[subcls] = basicConverter
return object.__new__(subcls)
except TypeError:
if cls not in units.registry:
units.registry[cls] = basicConverter
return object.__new__(cls)
def __init__(self, value, unit):
self.value = value
self.unit = unit
self.proxy_target = self.value
def __getattribute__(self, name):
if name.startswith('__'):
return object.__getattribute__(self, name)
variable = object.__getattribute__(self, 'value')
if hasattr(variable, name) and name not in self.__class__.__dict__:
return getattr(variable, name)
return object.__getattribute__(self, name)
def __array__(self, dtype=object):
return np.asarray(self.value).astype(dtype)
def __array_wrap__(self, array, context):
return TaggedValue(array, self.unit)
def __repr__(self):
return 'TaggedValue(' + repr(self.value) + ', ' + repr(self.unit) + ')'
def __str__(self):
return str(self.value) + ' in ' + str(self.unit)
def __len__(self):
return len(self.value)
def __iter__(self):
# Return a generator expression rather than use `yield`, so that
# TypeError is raised by iter(self) if appropriate when checking for
# iterability.
return (TaggedValue(inner, self.unit) for inner in self.value)
def get_compressed_copy(self, mask):
new_value = np.ma.masked_array(self.value, mask=mask).compressed()
return TaggedValue(new_value, self.unit)
def convert_to(self, unit):
if unit == self.unit or not unit:
return self
new_value = self.unit.convert_value_to(self.value, unit)
return TaggedValue(new_value, unit)
def get_value(self):
return self.value
def get_unit(self):
return self.unit
class BasicUnit(object):
def __init__(self, name, fullname=None):
self.name = name
if fullname is None:
fullname = name
self.fullname = fullname
self.conversions = dict()
def __repr__(self):
return 'BasicUnit(%s)' % self.name
def __str__(self):
return self.fullname
def __call__(self, value):
return TaggedValue(value, self)
def __mul__(self, rhs):
value = rhs
unit = self
if hasattr(rhs, 'get_unit'):
value = rhs.get_value()
unit = rhs.get_unit()
unit = unit_resolver('__mul__', (self, unit))
if unit is NotImplemented:
return NotImplemented
return TaggedValue(value, unit)
def __rmul__(self, lhs):
return self*lhs
def __array_wrap__(self, array, context):
return TaggedValue(array, self)
def __array__(self, t=None, context=None):
ret = np.array([1])
if t is not None:
return ret.astype(t)
else:
return ret
def add_conversion_factor(self, unit, factor):
def convert(x):
return x*factor
self.conversions[unit] = convert
def add_conversion_fn(self, unit, fn):
self.conversions[unit] = fn
def get_conversion_fn(self, unit):
return self.conversions[unit]
def convert_value_to(self, value, unit):
conversion_fn = self.conversions[unit]
ret = conversion_fn(value)
return ret
def get_unit(self):
return self
class UnitResolver(object):
def addition_rule(self, units):
for unit_1, unit_2 in zip(units[:-1], units[1:]):
if (unit_1 != unit_2):
return NotImplemented
return units[0]
def multiplication_rule(self, units):
non_null = [u for u in units if u]
if (len(non_null) > 1):
return NotImplemented
return non_null[0]
op_dict = {
'__mul__': multiplication_rule,
'__rmul__': multiplication_rule,
'__add__': addition_rule,
'__radd__': addition_rule,
'__sub__': addition_rule,
'__rsub__': addition_rule}
def __call__(self, operation, units):
if (operation not in self.op_dict):
return NotImplemented
return self.op_dict[operation](self, units)
unit_resolver = UnitResolver()
cm = BasicUnit('cm', 'centimeters')
inch = BasicUnit('inch', 'inches')
inch.add_conversion_factor(cm, 2.54)
cm.add_conversion_factor(inch, 1/2.54)
radians = BasicUnit('rad', 'radians')
degrees = BasicUnit('deg', 'degrees')
radians.add_conversion_factor(degrees, 180.0/np.pi)
degrees.add_conversion_factor(radians, np.pi/180.0)
secs = BasicUnit('s', 'seconds')
hertz = BasicUnit('Hz', 'Hertz')
minutes = BasicUnit('min', 'minutes')
secs.add_conversion_fn(hertz, lambda x: 1./x)
secs.add_conversion_factor(minutes, 1/60.0)
# radians formatting
def rad_fn(x, pos=None):
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return '0'
elif n == 1:
return r'$\pi/2$'
elif n == 2:
return r'$\pi$'
elif n % 2 == 0:
return r'$%s\pi$' % (n//2,)
else:
return r'$%s\pi/2$' % (n,)
class BasicUnitConverter(units.ConversionInterface):
@staticmethod
def axisinfo(unit, axis):
'return AxisInfo instance for x and unit'
if unit == radians:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.fullname,
)
elif unit == degrees:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter(r'$%i^\circ$'),
label=unit.fullname,
)
elif unit is not None:
if hasattr(unit, 'fullname'):
return units.AxisInfo(label=unit.fullname)
elif hasattr(unit, 'unit'):
return units.AxisInfo(label=unit.unit.fullname)
return None
@staticmethod
def convert(val, unit, axis):
if units.ConversionInterface.is_numlike(val):
return val
if iterable(val):
return [thisval.convert_to(unit).get_value() for thisval in val]
else:
return val.convert_to(unit).get_value()
@staticmethod
def default_units(x, axis):
'return the default unit for x or None'
if iterable(x):
for thisx in x:
return thisx.unit
return x.unit
def cos(x):
if iterable(x):
return [math.cos(val.convert_to(radians).get_value()) for val in x]
else:
return math.cos(x.convert_to(radians).get_value())
basicConverter = BasicUnitConverter()
units.registry[BasicUnit] = basicConverter
units.registry[TaggedValue] = basicConverter
|
|
import argparse
import os
import sys
from io import StringIO
from tabulate import tabulate
from . import __version__, __homepage__
from .csdgen import ZakSpace, Csd
from .parse import parse_pch2
from .patch import Patch
from .resources import get_template_module_path, ProjectData
from .udo import UdoTemplate, UdoTemplateValidation
from .util import get_test_resource
def _all_modules_implemented(patch: Patch):
not_implemented = [x.type_name for x in patch.modules
if not os.path.isfile(get_template_module_path(x.type))]
if len(not_implemented) > 0:
print('The patch file contains some modules that has not been implemented yet:')
print(', '.join(not_implemented))
print('Please, consider contributing these modules, following our tutorial:')
print('https://github.com/gleb812/pch2csd/wiki/How-to-add-new-modules')
return False
return True
def validate_udo(type_id: int, io=sys.stdout, print_action=True):
if print_action:
print("checking module type '{id}' ({id}.txt)".format(id=type_id),
file=io)
pch2_files = [get_test_resource(s) for s in ['test_all_modules_1.pch2',
'test_all_modules_2.pch2']]
data, mod, patch = ProjectData(), None, None
for p in map(lambda x: parse_pch2(data, x), pch2_files):
for m in p.modules:
if m.type == type_id:
mod, patch = m, p
break
if mod is not None:
if print_action:
print('module name: {}'.format(mod.type_name), file=io)
udo = UdoTemplate(mod)
v = UdoTemplateValidation(data, udo)
v.print_errors(io)
return v.is_valid(with_todos=True)
else:
print("error: unknown module type '{}'".format(type_id), file=io)
return False
def print_pch2(fn: str):
if not fn.lower().endswith('.pch2'):
print("error: patch file should have extension '.pch2'")
exit(-1)
data = ProjectData()
path = os.path.abspath(os.path.expanduser(fn))
patch = parse_pch2(data, path)
mod_table = [['Name', 'ID', 'Type', 'Parameters', 'Modes', 'Area']]
for m in patch.modules:
p = patch.find_mod_params(m.location, m.id)
mod_table.append([m.type_name,
m.id,
m.type,
str(p.values),
str(m.modes),
m.location.short_str()])
cab_table = [['From', '', 'To', 'Color', 'Type', 'Area']]
for c in patch.cables:
mf_name = patch.find_module(c.module_from, c.loc).type_name
mt_name = patch.find_module(c.module_to, c.loc).type_name
pin1, pin2 = c.type.short_str().split('-')
cab_table.append([
'{}(id={}, {}={})'.format(mf_name, c.module_from, pin1, c.jack_from),
'->',
'{}(id={}, {}={})'.format(mt_name, c.module_to, pin2, c.jack_to),
c.color.short_str(),
c.type.short_str(),
c.loc.short_str()])
print('Patch file: {}\n'.format(os.path.basename(path)))
print('Modules')
print(tabulate(mod_table, headers='firstrow', tablefmt='simple'))
print('\nCables')
print(tabulate(cab_table, headers='firstrow', tablefmt='simple'))
def convert_pch2(fn: str):
if not fn.lower().endswith('.pch2'):
print("error: the patch file should have extension '.pch2'")
exit(-1)
data = ProjectData()
path = os.path.abspath(os.path.expanduser(fn))
p = parse_pch2(data, path)
zak = ZakSpace()
try:
udos = zak.connect_patch(p)
except ValueError as e:
print('error: {}'.format(e))
exit(-1)
csd = Csd(p, zak, udos)
dirname = os.path.dirname(path)
csd_save_path = os.path.join(dirname, os.path.basename(path) + '.csd')
with open(csd_save_path, 'w') as f:
f.write(csd.get_code())
return csd_save_path
def gen_udo_status_doc():
tpl_url = 'https://github.com/gleb812/pch2csd/blob/master/pch2csd/resources/templates/modules/{}.txt'
data = ProjectData()
with open('Module-implementation-status.md', 'w') as md:
md.write('> **NB!** This file is automatically generated.\n\n')
md.write('| Template | Module name | Status |\n')
md.write('|----------|-------------|--------|\n')
for p in [parse_pch2(data, get_test_resource(pch2file))
for pch2file in ['test_all_modules_1.pch2',
'test_all_modules_2.pch2']]:
for m in p.modules:
status = StringIO()
validate_udo(m.type, status, print_action=False)
md.write('| [`{}`]({}) | `{}` | ```{}``` |\n'.format(
'{}.txt'.format(m.type),
tpl_url.format(m.type),
m.type_name,
'```<br>```'.join(status.getvalue().splitlines())))
def main():
arg_parser = argparse.ArgumentParser(
prog='pch2csd',
description='convert Clavia Nord Modular G2 patches to the Csound code',
epilog='Version {}, homepage: {}'.format(__version__, __homepage__))
arg_parser.add_argument('arg', metavar='arg', nargs='?', default='patch.pch2',
help='a pch2 file path or an UDO numerical ID')
arg_parser.add_argument('-d', '--debug', action='store_const', const=True,
help='print a stack trace in case of error')
group = arg_parser.add_mutually_exclusive_group()
group.add_argument('-p', '--print', action='store_const', const=True,
help='parse the patch file and print its content')
group.add_argument('-c', '--check-udo', action='store_const', const=True,
help="validate the UDO template file (overrides '-p')")
group.add_argument('-v', '--version', action='version',
version='%(prog)s ' + __version__)
group.add_argument('-e', action='store_const', const=True,
help='show the elephant and exit')
args = arg_parser.parse_args()
if args.check_udo:
try:
type_id = int(args.arg)
validate_udo(type_id)
except ValueError:
print("you should pass the integer as the 'arg' parameter when using '--check-udo'")
elif args.print:
print_pch2(args.arg)
elif args.e:
show_elephant()
else:
if args.arg == 'gen_udo_status_doc':
gen_udo_status_doc()
else:
try:
saved_csd = convert_pch2(args.arg)
print('conversion done, created file: {}'.format(saved_csd))
except Exception as e:
print(e)
if args.debug:
import traceback
_, _, tb = sys.exc_info()
print()
print('-----------')
traceback.print_tb(tb, file=sys.stdout)
def show_elephant():
print('///////////////////osyyo///////////////////////////////////////////////////////')
print('//////////////+oshmNMMMmNmhyso+//////////////////+++++////////////////////+o///')
print('///////////+oshydNMMMMMMMMMMMNh++++++++++ossssyysssyshhys+//////////////+hNmhys')
print('/////////+oydmmNNNNNNNNNNNMMNNdhyyyyyyyhhddy+++::/ooossyyhs+///////////omMMMNNN')
print('///////+oyyhhhdhhhhhhdmdmmddhyshhyysys++ossys+--+syyyyyysoo++/////////+hmmmmmdy')
print('///+++++++++ooooooosossssoo+++syyyssyyss+-..`.ydmmddyo+/+++/++++++++++shhhhhyys')
print('+++ oooyhyyhyyyhhdso+/:sddyo+//++/////++++++++++ooosssss')
print('+++ Clavia Nord Modular G2 sshhhyyyyyys+-+hho/ys+///++/////:+++++++++++++++++++')
print('+++ Patch Converter ooossosyyy+:``.--`.//+/+/://+/o+++++++++++++++++++++')
print('+++ oo+oysysso/:-.``````.-:/+/-/+syso+++++++++++++++++++')
print('++oooooooooooooooooooooooooooosssysoosys+-``` ``-:////://oosooooooooo++++++++++')
print('ooooooooooooosssssooosssssssssshyyso+shdh.` `-/:-:-:--/++ooooooooooooooooyso')
print('ssssssssyyyyyyyyyyyyyyyyyssssooooso+++yhh- .:/--````-::-/oooooooooooosyhhdd')
print('ossosssssssssssssssssssssssss/++++/--/+hs` `.`-...````-..`oooooooosssyssssyyy')
print('ooooooosssssssssssssssyysssss/////-` sNm ` .` ``` /oooosoosyhdhysooyhd')
print('oooooosssssssssssssshdyysssym/:::-`/.:mmo ` :sssssyyyyyysoosyyyyy')
print('osssssssssssssssyyhdy+++shmNs-.```.Ny`` +ssssyyyhhyyyyyssssoo')
print('sssshddhysyssyyyhdds/oyhsdysh-. omm- -. :.+yssssyyyyysyhyyysyh')
print('yhhhdhhhhhyyyyyhhhh/.:ysydmNh. .hmmy `:-` `` `yo-ohyyyyyyyyyyyysssss')
print('syyyyyyyyyyhddhddmmy.`.:/++o/` `yhhdh. ..` ```` ohhyyyyyyyyyyyyyyyyyyss')
print('hysyyyyhhhyhhhhhyyhhy/` `-:. `/yyhhhyo `.` `` +yyyyyyyyyyyyysyssssssss')
print('hyyhhyyhhdhhhhyyyyyyyyyo+///+syyyyyyyhy- ..``:yo` :hhyyyyysyyyssyysssssssss')
print('yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyo .-.``syy- syyyyyyssssssssssssssssss')
print('ssssyyyyyyyyyyyyyyyysssssosssoooooooooos-`--.`-sss. `ssssooooooooooooooooooooo')
print('sssyyysssssssssssssooooooooossssssssssss+-:-.`oysy -yyyyyyyyyyyyyyyyyysyysyyy')
print('yyyyyyyyyyyyyhhhhyhhhhhdhhdddddddhdddddd/.:-``yddy :ddddhhdddddddddhhhhhhdhhh')
print('hhddddddddddddddddddddddmmmmmmmdddmmdddh/.:.../hd+ .ddddddddddddhhhyhhhhhhhhh')
print('hhhhhhhhhhhhdddhdddddddddhhhhhhhhdhhhhhh-.o+/--hy. -osyhhhddddhhhhyyyyyyyyys')
print('dddhhhhhhhhhhhhdddhdhhhhhhhhhhyyyssyo//:`-hyys:` ```.-::/+osyyysyyyyyyyyyhyyhys')
print('hhhyyhhhhhdhddhhhhyyysosoysosooo//:----.`+yysssssossooyyysyhhhhyhhyyyyyyhhyyyyy')
if __name__ == '__main__':
main()
|
|
import numpy as np
import cv2
import os
import copy
import time
from skimage import morphology
from scipy import weave
import sys
from igraph import *
import itertools
##########
##PART 1##
##########
cap = cv2.VideoCapture('input.mp4')
imageWidth = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
imageHeight = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
FPS = int(cap.get(cv2.cv.CV_CAP_PROP_FPS))
TotalFrames = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
templateMouse = cv2.imread('cursor_1.png',0)
templateMouse_w, templateMouse_h = templateMouse.shape[::-1]
fourcc = cv2.cv.CV_FOURCC(*'DIVX')
out = cv2.VideoWriter('output.avi',fourcc, FPS, (imageWidth,imageHeight))
thresholdMouse = 0
BlackThreshold = 24
LastValueFrame = 0
IsUserWriting = False
MouseColorValue = 0 # colored pixels in the cursor
ThresholdDrawing = 10 # extra colored pixels need in the next frame to qualify it as drawing
MaxConnectFrames = 3 # keep the drawing effect for next 5 frames, even if no drawing detected
current_connected_frame = MaxConnectFrames + 1
_debug_MissFrames = 50
ObjectsDrawn = 0
StartingFrame = 0 # - base frame, initialized when user starts drawing
frame_before_starting_1 = 0
frame_before_starting_2 = 0
frame_before_starting_3 = 0
frame_before_starting_4 = 0
LastCompleteImage = 0
font = cv2.FONT_HERSHEY_SIMPLEX
#placing rectangles
rect_start_x = []
rect_start_y = []
rect_width = []
rect_height = []
rect_time = []
count_objects_for_saving_images = 0
f_write_objects = open('object_list.txt','w')
f_write_cursor_pos_for_order_points = open('cursor positions.txt','w')
cursor_pos_before_starting = [] # store as many as last 5 cursor positions
saved_copy_of_cursor_pos_before_starting = [] # this stores a copy of cursor_pos_before_starting and uses that to write to data
cursor_pos_for_ordering = []
def getColor(image):
#ignore black color and get the median of red, green and blue
threshold_black = 10
reds = []
greens = []
blues = []
for x in range(0, image.shape[0]):
for y in range(0, image.shape[1]):
if image[x,y][0] > threshold_black:
blues.append(image[x,y][0])
if image[x,y][1] > threshold_black:
greens.append(image[x,y][1])
if image[x,y][2] > threshold_black:
reds.append(image[x,y][2])
#return [np.median(np.array(reds)), np.median(np.array(greens)), np.median(np.array(blues))]
return [np.percentile(np.array(reds), 90), np.percentile(np.array(greens), 90), np.percentile(np.array(blues), 90)]
def RemoveMouse(gray_input):
#Remove the cursor
# - Use gray image created above method = 'cv2.TM_CCOEFF'
method = 'cv2.TM_CCOEFF'
# Apply template Matching - cursor
res = cv2.matchTemplate(gray_input,templateMouse,cv2.TM_CCOEFF) # - one of the methods
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method == 'cv2.TM_SQDIFF' or method == 'cv2.TM_SQDIFF_NORMED':
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + templateMouse_w, top_left[1] + templateMouse_h)
if max_val > thresholdMouse:
cv2.rectangle(gray_input,top_left, bottom_right, (0,0,0), -1)
def CurrentMouseLocation(gray_input, frame_number):
#Find the cursor
# - Use gray image created above method = 'cv2.TM_CCOEFF'
method = 'cv2.TM_CCOEFF'
# Apply template Matching - cursor
res = cv2.matchTemplate(gray_input,templateMouse,cv2.TM_CCOEFF) # - one of the methods
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method == 'cv2.TM_SQDIFF' or method == 'cv2.TM_SQDIFF_NORMED':
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + templateMouse_w, top_left[1] + templateMouse_h)
if max_val > thresholdMouse:
#cv2.rectangle(gray_input,top_left, bottom_right, (0,0,0), -1)
return (frame_number, int((top_left[0] + bottom_right[0]) / 2) , int((top_left[1] + bottom_right[1]) / 2))
return (frame_number, -1,-1)
#Create Folders if not exists
if not os.path.exists('objects'):
os.makedirs('objects')
if not os.path.exists('atomic objects'):
os.makedirs('atomic objects')
current_frame = 0
first_frame = 0
index_of_starting_frame = 0 # - used to get the starting timestamp
gray = []
current_mouse_position = (-1, -1)
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
if current_frame == 0:
first_frame = frame
cv2.imwrite('background.png', first_frame)
else:
frame = cv2.subtract(frame, first_frame)
''' Missing Frames
while (current_frame < _debug_MissFrames):
ret, frame = cap.read()
current_frame += 1
'''
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if current_frame > 0:
current_mouse_position = CurrentMouseLocation(gray, current_frame)
#colored_pixels = countNonBackGroundPixels(gray, BlackThreshold, LastValueFrame);
ret,binaryImage = cv2.threshold(gray,50,255,cv2.THRESH_BINARY)
colored_pixels = cv2.countNonZero(binaryImage)
#print(colored_pixels)
if (colored_pixels > MouseColorValue and colored_pixels - LastValueFrame > ThresholdDrawing):
current_connected_frame = 0
else:
current_connected_frame += 1
if (colored_pixels > MouseColorValue and colored_pixels - LastValueFrame > ThresholdDrawing) or (current_connected_frame < MaxConnectFrames):
#print("Drawing")
# - If user just started drawing: initialize stuff
if IsUserWriting == False:
StartingFrame = frame_before_starting_4 # 5 frames old
index_of_starting_frame = current_frame
#Reset Cursor Positions
cursor_pos_for_ordering = [] # - what about the last few cursor positions as well? if needed? - as in frame_before_starting_4,5
saved_copy_of_cursor_pos_before_starting = list(cursor_pos_before_starting)
# - start saving the new object
IsUserWriting = True
cv2.putText(frame,'Writing',(5,700), font, 1,(200,50,50),1,cv2.CV_AA)
#Add Cursor Positions to List
cursor_pos_for_ordering.append(current_mouse_position)
else:
# - If user just stopped: find out what user just drew
if IsUserWriting == True:
# - Find the difference between current and starting frame
# - Full Final Frame
#cv2.imwrite('object ' + str(ObjectsDrawn) + '.png', gray)
# - Difference Frame
#cv2.imwrite('object ' + str(ObjectsDrawn) + ' diff.png', cv2.subtract(gray, StartingFrame))
#Get the new imge - update binary image
bounding_image = cv2.subtract(gray, StartingFrame)
RemoveMouse(bounding_image)
ret,binaryDifference = cv2.threshold(bounding_image,50,255,cv2.THRESH_BINARY)
#Erode to get rid of small white blocks
kernel = np.ones((2,2),np.uint8)
binaryDifference = cv2.erode(binaryDifference,kernel,iterations = 1)
ObjectsDrawn += 1
#LastCompleteImage = frame
contours, hierarchy = cv2.findContours(binaryDifference,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
x1 = 1000 # - big value
y1 = 1000 # - big value
x2 = 0 # - small value
y2 = 0 # - small value
for i in range(0, len(contours)):
x,y,w,h = cv2.boundingRect(contours[i])
#cv2.rectangle(img,(x,y),(x + w,y + h),(255,0,0),1)
if x < x1:
x1 = x
if y < y1:
y1 = y
if x + w > x2:
x2 = x + w
if y + h > y2:
y2 = y + h
#cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),1)
if x1 - 4 > 0 and x2 + 4 < imageWidth and y1 - 4 > 0 and y2 + 4 < imageHeight and x2 - x1 > 0 and y2 - y1 > 0:
rect_start_x.append(x1 - 4)
rect_start_y.append(y1 - 4)
rect_width.append(x2 + 4)
rect_height.append(y2 + 4)
_time = round(1.0 * current_frame * FPS / TotalFrames, 2)
_starting_time = round(1.0 * index_of_starting_frame * FPS / TotalFrames, 2) # - to save the time when the first frame was registered
rect_time.append(_time)
ROI = frame[y1 - 4:y2 + 4, x1 - 4:x2 + 4]
#Get Color of Object
[r, g, b] = getColor(ROI)
r = int(r)
g = int(g)
b = int(b)
cv2.imwrite('objects/' + str(count_objects_for_saving_images) + '.png', ROI)
count_objects_for_saving_images += 1
f_write_objects.write(str(x1 - 4) + ' ' + str(y1 - 4) + ' ' + str(_starting_time) + ' ' + str(_time) + ' ' + str(r) + ' ' + str(g) + ' ' + str(b) + '\n')
#Cursor positions before starting of the objects: in order to get the very starting points that are missing otherwise
for i in saved_copy_of_cursor_pos_before_starting:
f_write_cursor_pos_for_order_points.write(str(i[0]) + " " + str(i[1] - (x1 - 4)) + " " + str(i[2] - (y1 - 4)) + ' ')
#print (str(i[0] - (x1 - 4)) + " " + str(i[1] - (y1 - 4)) + ' ')
for i in cursor_pos_for_ordering:
f_write_cursor_pos_for_order_points.write(str(i[0]) + " " + str(i[1] - (x1 - 4)) + " " + str(i[2] - (y1 - 4)) + ' ')
f_write_cursor_pos_for_order_points.write('\n')
#Reset Cursor Positions
cursor_pos_for_ordering = []
# Reset to false
cv2.putText(frame,'Not Writing',(5,700), font, 1,(50,50,200),1,cv2.CV_AA)
IsUserWriting = False
if current_frame > 0:
LastValueFrame = colored_pixels
frame_before_starting_4 = frame_before_starting_3
frame_before_starting_3 = frame_before_starting_2
frame_before_starting_2 = frame_before_starting_1
frame_before_starting_1 = gray
#Storing the last few (5) cursor locations, since we seem to miss that information when we realize that new object has started
if (len(cursor_pos_before_starting) < 5):
cursor_pos_before_starting.append(current_mouse_position)
else:
cursor_pos_before_starting.pop(0)
cursor_pos_before_starting.append(current_mouse_position)
# - Drawing rectangles
for i in range(0, len(rect_start_x)):
_loc = 'x = ' + str(rect_start_x[i]) + ', y = ' + str(rect_start_y[i]) + ', '
_size = 'width = ' + str(rect_width[i] - rect_start_x[i]) + ', height = ' + str(rect_height[i] - rect_start_y[i]) + ', '
_time = 'time: ' + str(rect_time[i])
cv2.rectangle(frame,(rect_start_x[i],rect_start_y[i]),(rect_width[i],rect_height[i]),(256,0,0),2)
cv2.putText(frame,_loc + _size + _time,(800,20 + i *13), font, 0.45,(50,50,200),1,cv2.CV_AA)
final_frame = cv2.add(frame, first_frame)
out.write(final_frame)
#cv2.imshow('frame',gray)
current_frame += 1
#print current_frame
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
f_write_objects.close()
f_write_cursor_pos_for_order_points.close()
cap.release()
out.release()
cv2.destroyAllWindows()
print 'Part 1 Completed!'
##########
##PART 2##
##########
# Get total_objects from last part
total_objects = count_objects_for_saving_images
for object_number in range(0, total_objects):
image = cv2.imread('objects/' + str(object_number) + '.png')
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
binary_threshold = 50
ret,bw_image = cv2.threshold(gray_image,binary_threshold,255,0)
contour_image = copy.deepcopy(bw_image)
contours, hierarchy = cv2.findContours(contour_image,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#cv2.drawContours(image,contours,-1,(0,255,0),3)
for i in range(0, len(contours)):
x,y,w,h = cv2.boundingRect(contours[i])
#cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),1)
#cv2.imshow(str(i), image)
mask = np.zeros(image.shape,np.uint8)
mask[y:y+h,x:x+w] = image[y:y+h,x:x+w]
cv2.imwrite('atomic objects/' + str(object_number) + '_' + str(i) + '.png', mask)
#cv2.imshow(str(i), mask);
#print 'here'
#color_img = cv2.cvtColor(bw_image, cv2.COLOR_GRAY2BGR)
#enlarge = cv2.resize(image, (0,0), fx=4, fy=4)
#cv2.imshow("image", enlarge);
cv2.waitKey(0)
cv2.destroyAllWindows()
print 'Part 2 Completed!'
##########
##PART 3##
##########
def _thinningIteration(im, iter):
I, M = im, np.zeros(im.shape, np.uint8)
expr = """
for (int i = 1; i < NI[0]-1; i++) {
for (int j = 1; j < NI[1]-1; j++) {
int p2 = I2(i-1, j);
int p3 = I2(i-1, j+1);
int p4 = I2(i, j+1);
int p5 = I2(i+1, j+1);
int p6 = I2(i+1, j);
int p7 = I2(i+1, j-1);
int p8 = I2(i, j-1);
int p9 = I2(i-1, j-1);
int A = (p2 == 0 && p3 == 1) + (p3 == 0 && p4 == 1) +
(p4 == 0 && p5 == 1) + (p5 == 0 && p6 == 1) +
(p6 == 0 && p7 == 1) + (p7 == 0 && p8 == 1) +
(p8 == 0 && p9 == 1) + (p9 == 0 && p2 == 1);
int B = p2 + p3 + p4 + p5 + p6 + p7 + p8 + p9;
int m1 = iter == 0 ? (p2 * p4 * p6) : (p2 * p4 * p8);
int m2 = iter == 0 ? (p4 * p6 * p8) : (p2 * p6 * p8);
if (A == 1 && B >= 2 && B <= 6 && m1 == 0 && m2 == 0) {
M2(i,j) = 1;
}
}
}
"""
weave.inline(expr, ["I", "iter", "M"])
return (I & ~M)
def thinning(src):
dst = src.copy() / 255
prev = np.zeros(src.shape[:2], np.uint8)
diff = None
while True:
dst = _thinningIteration(dst, 0)
dst = _thinningIteration(dst, 1)
diff = np.absolute(dst - prev)
prev = dst.copy()
if np.sum(diff) == 0:
break
return dst * 255
def IsNeighbourText(x, y, binary_threshold, img):
#return True # - just to show all cursor locations
if img[y, x] > binary_threshold or (y + 1 < dim[1] and img[y + 1, x] > binary_threshold) or (y - 1 >= 0 and img[y - 1, x] > binary_threshold) or (x + 1 < dim[0] and img[y, x + 1] > binary_threshold) or (x + 1 < dim[0] and y + 1 < dim[1] and img[y + 1, x + 1] > binary_threshold) or (y - 1 >= 0 and x + 1 < dim[0] and img[y - 1, x + 1] > binary_threshold) or (x - 1 >= 0 and img[y, x - 1] > binary_threshold) or (y + 1 < dim[1] and x - 1 >= 0 and img[y + 1, x - 1] > binary_threshold) or (y - 1 >=0 and x - 1 >= 0 and img[y - 1, x - 1] > binary_threshold):
return True
else:
return False
def FindClosestPoint(node, nodes):
nodes = np.asarray(nodes)
dist_2 = np.sum((nodes - node)**2, axis=1)
return nodes[np.argmin(dist_2)]
def ComputeAllEdges(data_pts):
graph_connections = []
for current in range(0, len(data_pts)):
adjacent = [data_pts.index(_x) for _x in data_pts if (abs(_x[0] - data_pts[current][0]) < 2) and (abs(_x[1] - data_pts[current][1]) < 2) and (_x[0] != data_pts[current][0] or _x[1] != data_pts[current][1])]
for _a in adjacent:
graph_connections.append((current, _a))
#print "connection: " + str(current) + " : " + str(_a)
#Remove duplicates
list_with_duplicates = sorted([sorted(_x) for _x in graph_connections])
result = list(list_with_duplicates for list_with_duplicates,_ in itertools.groupby(list_with_duplicates))
#print result
return result
#Read Cursor positions
f = open('cursor positions.txt', 'r')
data = f.read().split('\n')
f.close()
#Read Object List File
f = open('object_list.txt', 'r')
objects = f.read().split('\n')
f.close()
#Write Output File
f_output = open('complete_strokes.txt', 'w')
#I declared it here for debugging purposes
g = Graph()
#object_number = 16
for object_number in range(0, 30):
#if 1 == 1:
valid_cursors = []
valid_cursor_timestamps_global = []
path_points_ordered_global = [] #contains all the data points that are then written to file
#Get cursor positions for this group of images
cursor_pos = []
obj1 = data[object_number].split(' ')
for t,x,y in zip(obj1[0::3], obj1[1::3], obj1[2::3]):
cursor_pos.append((t, x, y))
prefixed = [filename for filename in os.listdir('atomic objects/') if filename.startswith(str(object_number) + '_')]
for connected_image in prefixed:
valid_cursor_timestamps = []
image = cv2.imread('atomic objects/' + connected_image)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
binary_threshold = 50
ret,bw_img = cv2.threshold(gray_image,binary_threshold,255,0)
thinned_img = thinning(bw_img)
color_img = cv2.cvtColor(thinned_img, cv2.COLOR_GRAY2BGR)
data_pts_thinned = np.nonzero(thinned_img)
data_pts_thinned = zip(data_pts_thinned[0], data_pts_thinned[1])
#print 'Total points: ' + str(len(cursor_pos))
#Find Valid Cursor Positions
count_valid_cursor_positions = 0
#Append valid cursors in this local list
valid_cursor_local = []
valid_cursor_to_global = [] #this list saves the unmapped, raw cursor positions so that the order of the objects can be figured out
for i in range(0, len(cursor_pos)):
t = int(cursor_pos[i][0])
x = int(cursor_pos[i][1])
y = int(cursor_pos[i][2])
last_t = -100
last_x = -100
last_y = -100
if i > 0:
last_t = int(cursor_pos[i - 1][0])
last_x = int(cursor_pos[i - 1][1])
last_y = int(cursor_pos[i - 1][2])
dim = bw_img.shape[1::-1]
#if a point is within the boundary box of the object
if x >= 0 and y >= 0 and x < dim[0] and y < dim[1]:
if IsNeighbourText(x, y, binary_threshold, bw_img) and len(data_pts_thinned) > 0 and (abs(x - last_x) + abs(y - last_y)) > 3:
#Map y, x to the closest point
[mapped_x, mapped_y] = FindClosestPoint([y, x], data_pts_thinned)
#cv2.circle(color_img,(x, y), 1, (0,0,255))
color_img[mapped_x, mapped_y] = [255,0,0]
count_valid_cursor_positions += 1
#valid_cursor_local.append(cursor_pos[i]) # we also need to store the 'key' thinned points
valid_cursor_local.append((mapped_x, mapped_y))
valid_cursor_to_global.append((x, y)) #send values to global calculation to figure out the order of objects
valid_cursor_timestamps.append(t)
#print valid_cursor_local
#print count_valid_cursor_positions
#Get the thinning of the image
#Map valid neighbour points as skeleton points: snap to closest
#Show the resulting image
#Add Connected Object only if it has at least one valid cursor position
if len(valid_cursor_to_global) > 0:
valid_cursors.append(valid_cursor_to_global)
if len(valid_cursor_timestamps) > 0:
valid_cursor_timestamps_global.append(valid_cursor_timestamps)
#print 'global: ' + str(valid_cursor_to_global)
#Here we have the all the cursor positions for this image.
#Use these to find paths between the data point
#Data points are in: data_pts_thinned, we can use this datastructure to map data points to an index
g = Graph()
g.add_vertices(len(data_pts_thinned))
#Create an adjacency matrix of data points - based on 8-connectivity
g.add_edges(ComputeAllEdges(data_pts_thinned))
g.vs["point"] = data_pts_thinned
#Calculate Paths between cursor positions
path_points_ordered = [] #contains the temporal information, this is used for in-between points as well
path_points = []
for index_cursor_point in range(1, len(valid_cursor_local)):
start_vertex = g.vs.find(point = valid_cursor_local[index_cursor_point - 1])
end_vertex = g.vs.find(point = valid_cursor_local[index_cursor_point])
shortest_path = g.get_all_shortest_paths(start_vertex, end_vertex)
if len(shortest_path) == 0:
print "In image: " + connected_image + ", No path exists between: " + str(data_pts_thinned[index_cursor_point - 1]) + str(" and ") + str(data_pts_thinned[index_cursor_point])
path_points_ordered.append([]) #If no connection, still keep path_points_ordered same in size, need it at the end
else:
#Path exists
_path_between_strokes = []
for _path_points in shortest_path[0]: #shortest path contains an array of shortest paths, meaning there could be more than one,
#I take the first one, doesn't make any difference
#color_img[g.vs[_path_points]['point']] = [0,0,0]
path_points.append(g.vs[_path_points]['point']) #Save path points so that we can delete them later on
_path_between_strokes.append(g.vs[_path_points]['point'])
#print 'start: ' + str(start_vertex['point'])
#print 'end: ' + str(end_vertex['point'])
#print 'path: ' + str(path_points)
path_points_ordered.append(_path_between_strokes)
#1st pass:
#Remove all points used in any path - except data points
path_points = list(set(path_points)) #Remove multiple entries firs
for i in path_points:
if i not in valid_cursor_local:
g.delete_vertices(g.vs.find(point = i))
#2nd pass:
#Remove isolated points
isolated_points = []
for i in range(0, len(g.vs)):
if (len(g.incident(g.vs[i])) == 0):
isolated_points.append(g.vs[i]['point'])
for i in isolated_points:
g.delete_vertices(g.vs.find(point = i))
#color_img[i] = [0,0,0]
#3rd pass:
#Originating from the cursor_positions, DFS? overkill to all possible positions
#Take care of the timing of these auxilary points - push other boundaries to make room for these
#Get List of cursor points that have something attached to them
for _cursor_no in range(0, len(valid_cursor_local)):
if valid_cursor_local[_cursor_no] in g.vs['point']:
#print 'index: ' + str(_cursor_no) + ' ' + str(valid_cursor_local[_cursor_no])
paths = g.shortest_paths_dijkstra(source = g.vs.find(point = valid_cursor_local[_cursor_no]))
if len(paths) > 0: #result of shortest paths is a list of list with one outermost element
paths = paths[0]
missed_points = []
for pt_in_path in range(0, len(paths)):
if paths[pt_in_path] > 0 and not math.isinf(paths[pt_in_path]):
#print g.vs[pt_in_path]['point']
missed_points.append([paths[pt_in_path], g.vs[pt_in_path]['point']])
#Add missed points to the path_points_ordered list
#If first cursor position, put before cursor position in first list
#If last cursor position, put after cursor position in last list
#If btw first and last, put before cursor position in that list
missed_points = sorted(missed_points)
#print 'missed points: ' + str(missed_points)
#print paths
if _cursor_no == len(valid_cursor_local) - 1 and len(valid_cursor_local) > 1: #Last and has more than one valid cursor
for pt in range(0, len(missed_points)):
path_points_ordered[_cursor_no - 1].append(missed_points[pt][1]) #1 is the actual data
else: #First or any other except Last
for pt in range(0, len(missed_points)):
if len(path_points_ordered) > 0:
path_points_ordered[_cursor_no].insert(0,missed_points[len(missed_points) - pt - 1][1]) #1 is the actual data, #reversed order of points
else:
path_points_ordered.append([]) # no points are there in path_points_ordered at all. So, add the first empty list
#Submit local ordered points to global variable
path_points_ordered_global.append(path_points_ordered)
#color_img.fill(0) #remove everything drawn before
#for i in range(0, len(g.vs)):
# color_img[g.vs[i]['point']] = [0,0,255]
for i in range(0, len(path_points_ordered)):
for j in range(0, len(path_points_ordered[i])):
color_img[path_points_ordered[i][j]] = [0,255,255]
#4th pass:
#Start from 'pseudo' corner points and iterate until nothing is left
#Append everything at the end
#enlarge = cv2.resize(color_img, (0,0), fx=5, fy=5)
#cv2.imshow(connected_image, enlarge);
#cv2.imwrite('diet/' + connected_image, color_img)
#Find the order of the connected blocks among each other
order = [0] * len(valid_cursors)
counter = 0
#Convert cursor_pos from string to int
for i in range(0, len(cursor_pos)):
cursor_pos[i] = map(int, cursor_pos[i])
for i in cursor_pos:
for j in range(0, len(valid_cursors)):
if list(valid_cursors[j][0]) == i:
order[counter] = j
counter += 1
if counter == len(valid_cursors):
break
if counter == len(valid_cursors):
break
'''#Write to output File
f_output.write(objects[object_number] + '\n') # Object Information
#f_output.write(str(len(path_points_ordered_global)) + '\n') # Number of Objects
output = ""
for obj_number in range(0, len(path_points_ordered_global)):
#output += str(len(path_points_ordered_global[obj_number])) + '\n' # Number of Strokes in this object
sum = 0
for i in path_points_ordered_global:
sum += len(i)
output += str(sum) + '\n'
for j in range(0, len(path_points_ordered_global[obj_number])):
if len(valid_cursor_timestamps_global) > obj_number + 1:
if len(valid_cursor_timestamps_global[obj_number]) > 1:
output += str(valid_cursor_timestamps_global[obj_number][j + 1] - valid_cursor_timestamps_global[obj_number][j]) + ' ';
else:
output += "1 "; #default: 1 frame time
for k in range(0, len(path_points_ordered_global[obj_number][j])):
output += (str(path_points_ordered_global[obj_number][j][k][0]) + " " + str(path_points_ordered_global[obj_number][j][k][1]) + " ")
output += '\n'
f_output.write(output)'''
# --- QUICK FIX --- Change later when done experimenting with Fabric.js --- QUICK FIX --- #
#Write to output File
f_output.write(objects[object_number] + '\n') # Object Information
#f_output.write(str(len(path_points_ordered_global)) + '\n') # Number of Objects
sum = 0
for i in path_points_ordered_global:
sum += len(i)
f_output.write(str(sum) + '\n')
output = ""
for obj_number in range(0, len(path_points_ordered_global)):
#output += str(len(path_points_ordered_global[obj_number])) + '\n' # Number of Strokes in this object
for j in range(0, len(path_points_ordered_global[obj_number])):
'''if len(valid_cursor_timestamps_global) > obj_number + 1:
if len(valid_cursor_timestamps_global[obj_number]) > 1:
output += str(valid_cursor_timestamps_global[obj_number][j + 1] - valid_cursor_timestamps_global[obj_number][j]) + ' ';
else:
output += "1 "; #default: 1 frame time'''
for k in range(0, len(path_points_ordered_global[obj_number][j])):
output += (str(path_points_ordered_global[obj_number][j][k][1]) + " " + str(path_points_ordered_global[obj_number][j][k][0]) + " ")
output += '\n'
f_output.write(output)
f_output.close()
cv2.waitKey(0)
cv2.destroyAllWindows()
print 'Done!'
|
|
"""Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
from __future__ import absolute_import, division, print_function, with_statement
# Author: Jacob Kristhammar, 2010
import base64
import collections
import hashlib
import os
import struct
import tornado.escape
import tornado.web
import zlib
from tornado.concurrent import TracebackFuture
from tornado.escape import utf8, native_str, to_unicode
from tornado import httpclient, httputil
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.log import gen_log, app_log
from tornado import simple_httpclient
from tornado.tcpclient import TCPClient
from tornado.util import bytes_type, _websocket_mask
try:
from urllib.parse import urlparse # py2
except ImportError:
from urlparse import urlparse # py3
try:
xrange # py2
except NameError:
xrange = range # py3
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class WebSocketHandler(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client::
class EchoWebSocket(websocket.WebSocketHandler):
def open(self):
print "WebSocket opened"
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print "WebSocket closed"
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
"""
def __init__(self, application, request, **kwargs):
tornado.web.RequestHandler.__init__(self, application, request,
**kwargs)
self.ws_connection = None
self.close_code = None
self.close_reason = None
self.stream = None
@tornado.web.asynchronous
def get(self, *args, **kwargs):
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
self.set_status(400)
self.finish("Can \"Upgrade\" only to \"WebSocket\".")
return
# Connection header should be upgrade. Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(lambda s: s.strip().lower(), headers.get("Connection", "").split(","))
if 'upgrade' not in connection:
self.set_status(400)
self.finish("\"Connection\" must be \"Upgrade\".")
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.set_status(403)
self.finish("Cross origin websockets not allowed")
return
self.stream = self.request.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
if self.request.headers.get("Sec-WebSocket-Version") in ("7", "8", "13"):
self.ws_connection = WebSocketProtocol13(
self, compression_options=self.get_compression_options())
self.ws_connection.accept_connection()
else:
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 426 Upgrade Required\r\n"
"Sec-WebSocket-Version: 8\r\n\r\n"))
self.stream.close()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
"""
if self.ws_connection is None:
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols):
"""Invoked when a new WebSocket requests specific subprotocols.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol. Failure to select a
subprotocol does not automatically abort the connection,
although clients may close the connection if none of their
proposed subprotocols was selected.
"""
return None
def get_compression_options(self):
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the memory and CPU usage of the compression,
but no such options are currently implemented.
.. versionadded:: 4.1
"""
return None
def open(self):
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
"""
pass
def on_message(self, message):
"""Handle incoming messages on the WebSocket
This method must be overridden.
"""
raise NotImplementedError
def ping(self, data):
"""Send ping frame to the remote end."""
if self.ws_connection is None:
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data):
"""Invoked when the response to a ping frame is received."""
pass
def on_close(self):
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code=None, reason=None):
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin):
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return True to accept the request or False to reject it.
By default, rejects all requests with an origin on a host other
than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return true::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value):
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
self.stream.set_nodelay(value)
def on_connection_close(self):
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
self.on_close()
def send_error(self, *args, **kwargs):
if self.stream is None:
super(WebSocketHandler, self).send_error(*args, **kwargs)
else:
# If we get an uncaught exception during the handshake,
# we have no choice but to abruptly close the connection.
# TODO: for uncaught exceptions after the handshake,
# we can close the connection more gracefully.
self.stream.close()
def _wrap_method(method):
def _disallow_for_websocket(self, *args, **kwargs):
if self.stream is None:
method(self, *args, **kwargs)
else:
raise RuntimeError("Method not supported for Web Sockets")
return _disallow_for_websocket
for method in ["write", "redirect", "set_header", "set_cookie",
"set_status", "flush", "finish"]:
setattr(WebSocketHandler, method,
_wrap_method(getattr(WebSocketHandler, method)))
class WebSocketProtocol(object):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.stream = handler.stream
self.client_terminated = False
self.server_terminated = False
def _run_callback(self, callback, *args, **kwargs):
"""Runs the given callback with exception handling.
On error, aborts the websocket connection and returns False.
"""
try:
callback(*args, **kwargs)
except Exception:
app_log.error("Uncaught exception in %s",
self.request.path, exc_info=True)
self._abort()
def on_connection_close(self):
self._abort()
def _abort(self):
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
class _PerMessageDeflateCompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
# There is no symbolic constant for the minimum wbits value.
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._compressor = self._create_compressor()
else:
self._compressor = None
def _create_compressor(self):
return zlib.compressobj(-1, zlib.DEFLATED, -self._max_wbits)
def compress(self, data):
compressor = self._compressor or self._create_compressor()
data = (compressor.compress(data) +
compressor.flush(zlib.Z_SYNC_FLUSH))
assert data.endswith(b'\x00\x00\xff\xff')
return data[:-4]
class _PerMessageDeflateDecompressor(object):
def __init__(self, persistent, max_wbits):
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError("Invalid max_wbits value %r; allowed range 8-%d",
max_wbits, zlib.MAX_WBITS)
self._max_wbits = max_wbits
if persistent:
self._decompressor = self._create_decompressor()
else:
self._decompressor = None
def _create_decompressor(self):
return zlib.decompressobj(-self._max_wbits)
def decompress(self, data):
decompressor = self._decompressor or self._create_decompressor()
return decompressor.decompress(data + b'\x00\x00\xff\xff')
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0f
def __init__(self, handler, mask_outgoing=False,
compression_options=None):
WebSocketProtocol.__init__(self, handler)
self.mask_outgoing = mask_outgoing
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None
self._frame_length = None
self._fragmented_message_buffer = None
self._fragmented_message_opcode = None
self._waiting = None
self._compression_options = compression_options
self._decompressor = None
self._compressor = None
self._frame_compressed = None
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
def accept_connection(self):
try:
self._handle_websocket_headers()
self._accept_connection()
except ValueError:
gen_log.debug("Malformed WebSocket request received", exc_info=True)
self._abort()
return
def _handle_websocket_headers(self):
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: self.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key):
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self):
return WebSocketProtocol13.compute_accept_value(
self.request.headers.get("Sec-Websocket-Key"))
def _accept_connection(self):
subprotocol_header = ''
subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
subprotocols = [s.strip() for s in subprotocols.split(',')]
if subprotocols:
selected = self.handler.select_subprotocol(subprotocols)
if selected:
assert selected in subprotocols
subprotocol_header = "Sec-WebSocket-Protocol: %s\r\n" % selected
extension_header = ''
extensions = self._parse_extensions_header(self.request.headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors('server', ext[1])
if ('client_max_window_bits' in ext[1] and
ext[1]['client_max_window_bits'] is None):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]['client_max_window_bits']
extension_header = ('Sec-WebSocket-Extensions: %s\r\n' %
httputil._encode_header(
'permessage-deflate', ext[1]))
break
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %s\r\n"
"%s%s"
"\r\n" % (self._challenge_response(),
subprotocol_header, extension_header)))
self._run_callback(self.handler.open, *self.handler.open_args,
**self.handler.open_kwargs)
self._receive_frame()
def _parse_extensions_header(self, headers):
extensions = headers.get("Sec-WebSocket-Extensions", '')
if extensions:
return [httputil._parse_header(e.strip())
for e in extensions.split(',')]
return []
def _process_server_headers(self, key, headers):
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers['Upgrade'].lower() == 'websocket'
assert headers['Connection'].lower() == 'upgrade'
accept = self.compute_accept_value(key)
assert headers['Sec-Websocket-Accept'] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if (ext[0] == 'permessage-deflate' and
self._compression_options is not None):
self._create_compressors('client', ext[1])
else:
raise ValueError("unsupported extension %r", ext)
def _get_compressor_options(self, side, agreed_parameters):
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + '_no_context_takeover') not in agreed_parameters)
wbits_header = agreed_parameters.get(side + '_max_window_bits', None)
if wbits_header is None:
options['max_wbits'] = zlib.MAX_WBITS
else:
options['max_wbits'] = int(wbits_header)
return options
def _create_compressors(self, side, agreed_parameters):
# TODO: handle invalid parameters gracefully
allowed_keys = set(['server_no_context_takeover',
'client_no_context_takeover',
'server_max_window_bits',
'client_max_window_bits'])
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = 'client' if (side == 'server') else 'server'
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters))
self._decompressor = _PerMessageDeflateDecompressor(
**self._get_compressor_options(other_side, agreed_parameters))
def _write_frame(self, fin, opcode, data, flags=0):
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
l = len(data)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if l < 126:
frame += struct.pack("B", l | mask_bit)
elif l <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, l)
else:
frame += struct.pack("!BQ", 127 | mask_bit, l)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
self.stream.write(frame)
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes_type)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
try:
self._write_frame(True, opcode, message, flags=flags)
except StreamClosedError:
self._abort()
def write_ping(self, data):
"""Send ping frame."""
assert isinstance(data, bytes_type)
self._write_frame(True, 0x9, data)
def _receive_frame(self):
try:
self.stream.read_bytes(2, self._on_frame_start)
except StreamClosedError:
self._abort()
def _on_frame_start(self, data):
self._wire_bytes_in += len(data)
header, payloadlen = struct.unpack("BB", data)
self._final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
self._frame_opcode = header & self.OPCODE_MASK
self._frame_opcode_is_control = self._frame_opcode & 0x8
if self._decompressor is not None:
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
self._masked_frame = bool(payloadlen & 0x80)
payloadlen = payloadlen & 0x7f
if self._frame_opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
try:
if payloadlen < 126:
self._frame_length = payloadlen
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
elif payloadlen == 126:
self.stream.read_bytes(2, self._on_frame_length_16)
elif payloadlen == 127:
self.stream.read_bytes(8, self._on_frame_length_64)
except StreamClosedError:
self._abort()
def _on_frame_length_16(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!H", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_frame_length_64(self, data):
self._wire_bytes_in += len(data)
self._frame_length = struct.unpack("!Q", data)[0]
try:
if self._masked_frame:
self.stream.read_bytes(4, self._on_masking_key)
else:
self.stream.read_bytes(self._frame_length, self._on_frame_data)
except StreamClosedError:
self._abort()
def _on_masking_key(self, data):
self._wire_bytes_in += len(data)
self._frame_mask = data
try:
self.stream.read_bytes(self._frame_length, self._on_masked_frame_data)
except StreamClosedError:
self._abort()
def _on_masked_frame_data(self, data):
# Don't touch _wire_bytes_in; we'll do it in _on_frame_data.
self._on_frame_data(_websocket_mask(self._frame_mask, data))
def _on_frame_data(self, data):
self._wire_bytes_in += len(data)
if self._frame_opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not self._final_frame:
# control frames must not be fragmented
self._abort()
return
opcode = self._frame_opcode
elif self._frame_opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if self._final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if self._final_frame:
opcode = self._frame_opcode
else:
self._fragmented_message_opcode = self._frame_opcode
self._fragmented_message_buffer = data
if self._final_frame:
self._handle_message(opcode, data)
if not self.client_terminated:
self._receive_frame()
def _handle_message(self, opcode, data):
if self.client_terminated:
return
if self._frame_compressed:
data = self._decompressor.decompress(data)
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return
self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.handler.close_code = struct.unpack('>H', data[:2])[0]
if len(data) > 2:
self.handler.close_reason = to_unicode(data[2:])
self.close()
elif opcode == 0x9:
# Ping
self._write_frame(True, 0xA, data)
elif opcode == 0xA:
# Pong
self._run_callback(self.handler.on_pong, data)
else:
self._abort()
def close(self, code=None, reason=None):
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b''
else:
close_data = struct.pack('>H', code)
if reason is not None:
close_data += utf8(reason)
self._write_frame(True, 0x8, close_data)
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort)
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
def __init__(self, io_loop, request, compression_options=None):
self.compression_options = compression_options
self.connect_future = TracebackFuture()
self.read_future = None
self.read_queue = collections.deque()
self.key = base64.b64encode(os.urandom(16))
scheme, sep, rest = request.url.partition(':')
scheme = {'ws': 'http', 'wss': 'https'}[scheme]
request.url = scheme + sep + rest
request.headers.update({
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': self.key,
'Sec-WebSocket-Version': '13',
})
if self.compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers['Sec-WebSocket-Extensions'] = (
'permessage-deflate; client_max_window_bits')
self.tcp_client = TCPClient(io_loop=io_loop)
super(WebSocketClientConnection, self).__init__(
io_loop, None, request, lambda: None, self._on_http_response,
104857600, self.tcp_client, 65536)
def close(self, code=None, reason=None):
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None
def on_connection_close(self):
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self.on_message(None)
self.tcp_client.close()
super(WebSocketClientConnection, self).on_connection_close()
def _on_http_response(self, response):
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(WebSocketError(
"Non-websocket response"))
def headers_received(self, start_line, headers):
if start_line.code != 101:
return super(WebSocketClientConnection, self).headers_received(
start_line, headers)
self.headers = headers
self.protocol = WebSocketProtocol13(
self, mask_outgoing=True,
compression_options=self.compression_options)
self.protocol._process_server_headers(self.key, self.headers)
self.protocol._receive_frame()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.stream = self.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None
self.connect_future.set_result(self)
def write_message(self, message, binary=False):
"""Sends a message to the WebSocket server."""
self.protocol.write_message(message, binary)
def read_message(self, callback=None):
"""Reads a message from the WebSocket server.
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
assert self.read_future is None
future = TracebackFuture()
if self.read_queue:
future.set_result(self.read_queue.popleft())
else:
self.read_future = future
if callback is not None:
self.io_loop.add_future(future, callback)
return future
def on_message(self, message):
if self.read_future is not None:
self.read_future.set_result(message)
self.read_future = None
else:
self.read_queue.append(message)
def on_pong(self, data):
pass
def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None,
compression_options=None):
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options``.
"""
if io_loop is None:
io_loop = IOLoop.current()
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = httpclient._RequestProxy(
request, httpclient.HTTPRequest._DEFAULTS)
conn = WebSocketClientConnection(io_loop, request, compression_options)
if callback is not None:
io_loop.add_future(conn.connect_future, callback)
return conn.connect_future
|
|
"""
Basic unit commitment to some mix-integer linear/quadratic programming problem
@author: Zhao Tianyang
@e-mail: zhaoty@ntu.edu.sg
@date:20 Mar 2018
Note: The mathematical model is taken from the following references.
[1]Tight and Compact MILP Formulation of Start-Up and Shut-Down Ramping in Unit Commitment
Due to the limitation on the ramp constraint, the following paper has been selected as the reference.
[2]Tight mixed integer linear programming formulations for the unit commitment problem
Further ramp constraints can be found in
[3] A State Transition MIP Formulation for the Unit Commitment Problem
Important note:
1) If you are familiar with Matlab, you are strongly recommended to know the differences between Matlab and numpy, which you can found in the following link.
https://docs.scipy.org/doc/numpy-dev/user/numpy-for-matlab-users.html
"""
from numpy import zeros, shape, ones, diag, concatenate, r_, arange, divide
import matplotlib.pyplot as plt
from solvers.mixed_integer_quadratic_programming import mixed_integer_quadratic_programming as miqp
import scipy.linalg as linalg
from scipy.sparse import csr_matrix as sparse
from pypower import loadcase, ext2int
def problem_formulation(case):
"""
:param case: The test case for unit commitment problem
:return:
"""
from unit_commitment.data_format.data_format import IG, PG
from unit_commitment.test_cases.case118 import F_BUS, T_BUS, BR_X, RATE_A
from unit_commitment.test_cases.case118 import GEN_BUS, COST_C, COST_B, COST_A, PG_MAX, PG_MIN, I0, MIN_DOWN, \
MIN_UP, RU, RD, COLD_START
from unit_commitment.test_cases.case118 import BUS_ID, PD
baseMVA, bus, gen, branch, profile = case["baseMVA"], case["bus"], case["gen"], case["branch"], case["Load_profile"]
# Modify the bus, gen and branch matrix
bus[:, BUS_ID] = bus[:, BUS_ID] - 1
gen[:, GEN_BUS] = gen[:, GEN_BUS] - 1
branch[:, F_BUS] = branch[:, F_BUS] - 1
branch[:, T_BUS] = branch[:, T_BUS] - 1
ng = shape(case['gen'])[0] # number of schedule injections
nl = shape(case['branch'])[0] ## number of branches
nb = shape(case['bus'])[0] ## number of branches
u0 = [0] * ng # The initial generation status
for i in range(ng):
u0[i] = int(gen[i, I0] > 0)
# Formulate a mixed integer quadratic programming problem
# 1) Announce the variables
# [vt,wt,ut,Pt]:start-up,shut-down,status,generation level
# 1.1) boundary information
T = case["Load_profile"].shape[0]
lb = []
for i in range(ng):
lb += [0] * T
lb += [0] * T
lb += [0] * T
lb += [0] * T
ub = []
for i in range(ng):
ub += [1] * T
ub += [1] * T
ub += [1] * T
ub += [gen[i, PG_MAX]] * T
nx = len(lb)
NX = 4 * T # The number of decision variables for each unit
# 1.2) variable information
vtypes = []
for i in range(ng):
vtypes += ["C"] * T
vtypes += ["C"] * T
vtypes += ["B"] * T
vtypes += ["C"] * T
# 1.3) objective information
c = []
q = []
for i in range(ng):
c += [gen[i, COLD_START]] * T
c += [0] * T
c += [gen[i, COST_C]] * T
c += [gen[i, COST_B]] * T
q += [0] * T
q += [0] * T
q += [0] * T
q += [gen[i, COST_A]] * T
Q = diag(q)
# 2) Constraint set
# 2.1) Power balance equation
Aeq = zeros((T, nx))
for i in range(T):
for j in range(ng):
Aeq[i, j * NX + 3 * T + i] = 1
beq = [0] * T
for i in range(T):
beq[i] = case["Load_profile"][i]
# 2.2) Status transformation of each unit
Aeq_temp = zeros((T * ng, nx))
beq_temp = [0] * T * ng
for i in range(ng):
for j in range(T):
Aeq_temp[i * T + j, i * NX + j] = 1
Aeq_temp[i * T + j, i * NX + j + T] = -1
Aeq_temp[i * T + j, i * NX + j + 2 * T] = -1
if j != 0:
Aeq_temp[i * T + j, i * NX + j - 1 + 2 * T] = 1
else:
beq_temp[i * T + j] = -u0[i]
Aeq = concatenate((Aeq, Aeq_temp), axis=0)
beq += beq_temp
# 2.3) Power range limitation
Aineq = zeros((T * ng, nx))
bineq = [0] * T * ng
for i in range(ng):
for j in range(T):
Aineq[i * T + j, i * NX + 2 * T + j] = gen[i, PG_MIN]
Aineq[i * T + j, i * NX + 3 * T + j] = -1
Aineq_temp = zeros((T * ng, nx))
bineq_temp = [0] * T * ng
for i in range(ng):
for j in range(T):
Aineq_temp[i * T + j, i * NX + 2 * T + j] = -gen[i, PG_MAX]
Aineq_temp[i * T + j, i * NX + 3 * T + j] = 1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq += bineq_temp
# 2.4) Start up and shut down time limitation
UP_LIMIT = [0] * ng
DOWN_LIMIT = [0] * ng
for i in range(ng):
UP_LIMIT[i] = T - int(gen[i, MIN_UP])
DOWN_LIMIT[i] = T - int(gen[i, MIN_DOWN])
# 2.4.1) Up limit
Aineq_temp = zeros((sum(UP_LIMIT), nx))
bineq_temp = [0] * sum(UP_LIMIT)
for i in range(ng):
for j in range(int(gen[i, MIN_UP]), T):
Aineq_temp[sum(UP_LIMIT[0:i]) + j - int(gen[i, MIN_UP]), i * NX + j - int(gen[i, MIN_UP]):i * NX + j] = 1
Aineq_temp[sum(UP_LIMIT[0:i]) + j - int(gen[i, MIN_UP]), i * NX + 2 * T + j] = -1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq += bineq_temp
# 2.4.2) Down limit
Aineq_temp = zeros((sum(DOWN_LIMIT), nx))
bineq_temp = [1] * sum(DOWN_LIMIT)
for i in range(ng):
for j in range(int(gen[i, MIN_DOWN]), T):
Aineq_temp[sum(DOWN_LIMIT[0:i]) + j - int(gen[i, MIN_DOWN]),
i * NX + T + j - int(gen[i, MIN_DOWN]):i * NX + T + j] = 1
Aineq_temp[sum(DOWN_LIMIT[0:i]) + j - int(gen[i, MIN_DOWN]), i * NX + 2 * T + j] = 1
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq += bineq_temp
# 2.5) Ramp constraints:
# 2.5.1) Ramp up limitation
Aineq_temp = zeros((ng * (T - 1), nx))
bineq_temp = [0] * ng * (T - 1)
for i in range(ng):
for j in range(T - 1):
Aineq_temp[i * (T - 1) + j, i * NX + 3 * T + j + 1] = 1
Aineq_temp[i * (T - 1) + j, i * NX + 3 * T + j] = -1
Aineq_temp[i * (T - 1) + j, i * NX + j + 1] = gen[i, RU] - gen[i, PG_MIN]
bineq_temp[i * (T - 1) + j] = gen[i, RU]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq += bineq_temp
# 2.5.2) Ramp up limitation
Aineq_temp = zeros((ng * (T - 1), nx))
bineq_temp = [0] * ng * (T - 1)
for i in range(ng):
for j in range(T - 1):
Aineq_temp[i * (T - 1) + j, i * NX + 3 * T + j + 1] = -1
Aineq_temp[i * (T - 1) + j, i * NX + 3 * T + j] = 1
Aineq_temp[i * (T - 1) + j, i * NX + T + j + 1] = gen[i, RD] - gen[i, PG_MIN]
bineq_temp[i * (T - 1) + j] = gen[i, RD]
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq += bineq_temp
# 2.6) Line flow limitation
# Add the line flow limitation time by time
b = 1 / branch[:, BR_X] ## series susceptance
## build connection matrix Cft = Cf - Ct for line and from - to buses
f = branch[:, F_BUS] ## list of "from" buses
t = branch[:, T_BUS] ## list of "to" buses
i = r_[range(nl), range(nl)] ## double set of row indices
## connection matrix
Cft = sparse((r_[ones(nl), -ones(nl)], (i, r_[f, t])), (nl, nb))
## build Bf such that Bf * Va is the vector of real branch powers injected
## at each branch's "from" bus
Bf = sparse((r_[b, -b], (i, r_[f, t])), shape=(nl, nb)) ## = spdiags(b, 0, nl, nl) * Cft
## build Bbus
Bbus = Cft.T * Bf
# The distribution factor
Distribution_factor = sparse(linalg.solve(Bbus.toarray().transpose(), Bf.toarray().transpose()).transpose())
Cg = sparse((ones(ng), (gen[:, GEN_BUS], arange(ng))),
(nb, ng)) # Sparse index generation method is different from the way of matlab
Cd = sparse((ones(nb), (bus[:, BUS_ID], arange(nb))), (nb, nb)) # Sparse index load
Aineq_temp = zeros((nl * T, nx))
bineq_temp = [0] * nl * T
for i in range(T):
index = [0] * ng
for j in range(ng):
index[j] = j * 4 * T + 3 * T + i
Cx2g = sparse((ones(ng), (arange(ng), index)), (ng, nx))
Aineq_temp[i * nl:(i + 1) * nl, :] = (Distribution_factor * Cg * Cx2g).todense()
PD_bus = bus[:, PD] * case["Load_profile"][i]
bineq_temp[i * nl:(i + 1) * nl] = branch[:, RATE_A] + Distribution_factor * Cd * PD_bus
del index, Cx2g
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq += bineq_temp
Aineq_temp = zeros((nl * T, nx))
bineq_temp = [0] * nl * T
for i in range(T):
index = [0] * ng
for j in range(ng):
index[j] = j * 4 * T + 3 * T + i
Cx2g = sparse((-ones(ng), (arange(ng), index)), (ng, nx))
Aineq_temp[i * nl:(i + 1) * nl, :] = (Distribution_factor * Cg * Cx2g).todense()
PD_bus = bus[:, PD] * case["Load_profile"][i]
bineq_temp[i * nl:(i + 1) * nl] = branch[:, RATE_A] - Distribution_factor * Cd * PD_bus
del index, Cx2g
Aineq = concatenate((Aineq, Aineq_temp), axis=0)
bineq += bineq_temp
model = {}
model["c"] = c
model["Q"] = Q
model["Aeq"] = Aeq
model["beq"] = beq
model["lb"] = lb
model["ub"] = ub
model["Aineq"] = Aineq
model["bineq"] = bineq
model["vtypes"] = vtypes
model["Distribution_factor"] = Distribution_factor
model["Cg"] = Cg
model["Cd"] = Cd
model["T"] = T
model["ng"] = ng
model["nl"] = nl
return model
def solution_decomposition(xx, obj, success):
"""
Decomposition of objective functions
:param xx: Solution
:param obj: Objective value
:param success: Success or not
:return:
"""
T = 24
ng = 54
result = {}
result["success"] = success
result["obj"] = obj
if success:
v = zeros((ng, T))
w = zeros((ng, T))
Ig = zeros((ng, T))
Pg = zeros((ng, T))
for i in range(ng):
v[i, :] = xx[4 * i * T:4 * i * T + T]
w[i, :] = xx[4 * i * T + T:4 * i * T + 2 * T]
Ig[i, :] = xx[4 * i * T + 2 * T:4 * i * T + 3 * T]
Pg[i, :] = xx[4 * i * T + 3 * T:4 * i * T + 4 * T]
result["vt"] = v
result["wt"] = w
result["Ig"] = Ig
result["Pg"] = Pg
else:
result["vt"] = 0
result["wt"] = 0
result["Ig"] = 0
result["Pg"] = 0
return result
if __name__ == "__main__":
from unit_commitment.test_cases import case118
test_case = case118.case118()
model = problem_formulation(test_case)
(xx, obj, success) = miqp(c=model["c"], Q=model["Q"], Aeq=model["Aeq"], A=model["Aineq"], b=model["bineq"],
beq=model["beq"], xmin=model["lb"],
xmax=model["ub"], vtypes=model["vtypes"])
sol = solution_decomposition(xx, obj, success)
ng = model["ng"]
nl = model["nl"]
T = model["T"]
Distribution_factor = model["Distribution_factor"]
Cg = model["Cg"]
Cd = model["Cd"]
nx = 4 * T * ng
# check the branch power flow
branch_f2t = zeros((nl, T))
branch_t2f = zeros((nl, T))
for i in range(T):
PD_bus = test_case["bus"][:, 1] * test_case["Load_profile"][i]
branch_f2t[:, i] = Distribution_factor * (Cg * sol["Pg"][:, i] - Cd * PD_bus)
branch_t2f[:, i] = -Distribution_factor * (Cg * sol["Pg"][:, i] - Cd * PD_bus)
plt.plot(sol["Pg"])
plt.show()
|
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
One repository to update them all
On mbed.org the mbed SDK is split up in multiple repositories, this script takes
care of updating them all.
"""
import sys
from copy import copy
from os import walk, remove, makedirs
from os.path import join, abspath, dirname, relpath, exists, isfile
from shutil import copyfile
from optparse import OptionParser
import re
import string
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from workspace_tools.settings import MBED_ORG_PATH, MBED_ORG_USER, BUILD_DIR
from workspace_tools.paths import LIB_DIR
from workspace_tools.utils import run_cmd
MBED_URL = "mbed.org"
MBED_USER = "mbed_official"
changed = []
push_remote = True
quiet = False
commit_msg = ''
# Code that does have a mirror in the mbed SDK
# Tuple data: (repo_name, list_of_code_dirs, [team])
# team is optional - if not specified, the code is published under mbed_official
OFFICIAL_CODE = (
("mbed-src" , "mbed"),
("mbed-rtos", "rtos"),
("mbed-dsp" , "dsp"),
("mbed-rpc" , "rpc"),
("lwip" , "net/lwip/lwip"),
("lwip-sys", "net/lwip/lwip-sys"),
("Socket" , "net/lwip/Socket"),
("lwip-eth" , "net/eth/lwip-eth"),
("EthernetInterface", "net/eth/EthernetInterface"),
("USBDevice", "USBDevice"),
("USBHost" , "USBHost"),
("CellularModem", "net/cellular/CellularModem"),
("CellularUSBModem", "net/cellular/CellularUSBModem"),
("UbloxUSBModem", "net/cellular/UbloxUSBModem"),
("UbloxModemHTTPClientTest", ["tests/net/cellular/http/common", "tests/net/cellular/http/ubloxusb"]),
("UbloxModemSMSTest", ["tests/net/cellular/sms/common", "tests/net/cellular/sms/ubloxusb"]),
)
# Code that does have dependencies to libraries should point to
# the latest revision. By default, they point to a specific revision.
CODE_WITH_DEPENDENCIES = (
# Libraries
"EthernetInterface",
# RTOS Examples
"rtos_basic",
"rtos_isr",
"rtos_mail",
"rtos_mutex",
"rtos_queue",
"rtos_semaphore",
"rtos_signals",
"rtos_timer",
# Net Examples
"TCPEchoClient",
"TCPEchoServer",
"TCPSocket_HelloWorld",
"UDPSocket_HelloWorld",
"UDPEchoClient",
"UDPEchoServer",
"BroadcastReceive",
"BroadcastSend",
# mbed sources
"mbed-src-program",
)
# A list of regular expressions that will be checked against each directory
# name and skipped if they match.
IGNORE_DIRS = (
)
IGNORE_FILES = (
'COPYING',
'\.md',
"\.lib",
"\.bld"
)
def ignore_path(name, reg_exps):
for r in reg_exps:
if re.search(r, name):
return True
return False
class MbedRepository:
@staticmethod
def run_and_print(command, cwd):
stdout, _, _ = run_cmd(command, wd=cwd, redirect=True)
print(stdout)
def __init__(self, name, team = None):
self.name = name
self.path = join(MBED_ORG_PATH, name)
if team is None:
self.url = "http://" + MBED_URL + "/users/" + MBED_USER + "/code/%s/"
else:
self.url = "http://" + MBED_URL + "/teams/" + team + "/code/%s/"
if not exists(self.path):
# Checkout code
if not exists(MBED_ORG_PATH):
makedirs(MBED_ORG_PATH)
self.run_and_print(['hg', 'clone', self.url % name], cwd=MBED_ORG_PATH)
else:
# Update
self.run_and_print(['hg', 'pull'], cwd=self.path)
self.run_and_print(['hg', 'update'], cwd=self.path)
def publish(self):
# The maintainer has to evaluate the changes first and explicitly accept them
self.run_and_print(['hg', 'addremove'], cwd=self.path)
stdout, _, _ = run_cmd(['hg', 'status'], wd=self.path)
if stdout == '':
print "No changes"
return False
print stdout
if quiet:
commit = 'Y'
else:
commit = raw_input(push_remote and "Do you want to commit and push? Y/N: " or "Do you want to commit? Y/N: ")
if commit == 'Y':
args = ['hg', 'commit', '-u', MBED_ORG_USER]
if commit_msg:
args = args + ['-m', commit_msg]
self.run_and_print(args, cwd=self.path)
if push_remote:
self.run_and_print(['hg', 'push'], cwd=self.path)
return True
# Check if a file is a text file or a binary file
# Taken from http://code.activestate.com/recipes/173220/
text_characters = "".join(map(chr, range(32, 127)) + list("\n\r\t\b"))
_null_trans = string.maketrans("", "")
def is_text_file(filename):
block_size = 1024
def istext(s):
if "\0" in s:
return 0
if not s: # Empty files are considered text
return 1
# Get the non-text characters (maps a character to itself then
# use the 'remove' option to get rid of the text characters.)
t = s.translate(_null_trans, text_characters)
# If more than 30% non-text characters, then
# this is considered a binary file
if float(len(t))/len(s) > 0.30:
return 0
return 1
with open(filename) as f:
res = istext(f.read(block_size))
return res
# Return the line ending type for the given file ('cr' or 'crlf')
def get_line_endings(f):
examine_size = 1024
try:
tf = open(f, "rb")
lines, ncrlf = tf.readlines(examine_size), 0
tf.close()
for l in lines:
if l.endswith("\r\n"):
ncrlf = ncrlf + 1
return 'crlf' if ncrlf > len(lines) >> 1 else 'cr'
except:
return 'cr'
# Copy file to destination, but preserve destination line endings if possible
# This prevents very annoying issues with huge diffs that appear because of
# differences in line endings
def copy_with_line_endings(sdk_file, repo_file):
if not isfile(repo_file):
copyfile(sdk_file, repo_file)
return
is_text = is_text_file(repo_file)
if is_text:
sdk_le = get_line_endings(sdk_file)
repo_le = get_line_endings(repo_file)
if not is_text or sdk_le == repo_le:
copyfile(sdk_file, repo_file)
else:
print "Converting line endings in '%s' to '%s'" % (abspath(repo_file), repo_le)
f = open(sdk_file, "rb")
data = f.read()
f.close()
f = open(repo_file, "wb")
data = data.replace("\r\n", "\n") if repo_le == 'cr' else data.replace('\n','\r\n')
f.write(data)
f.close()
def visit_files(path, visit):
for root, dirs, files in walk(path):
# Ignore hidden directories
for d in copy(dirs):
full = join(root, d)
if d.startswith('.'):
dirs.remove(d)
if ignore_path(full, IGNORE_DIRS):
print "Skipping '%s'" % full
dirs.remove(d)
for file in files:
if ignore_path(file, IGNORE_FILES):
continue
visit(join(root, file))
def update_repo(repo_name, sdk_paths, team_name):
repo = MbedRepository(repo_name, team_name)
# copy files from mbed SDK to mbed_official repository
def visit_mbed_sdk(sdk_file):
repo_file = join(repo.path, relpath(sdk_file, sdk_path))
repo_dir = dirname(repo_file)
if not exists(repo_dir):
makedirs(repo_dir)
copy_with_line_endings(sdk_file, repo_file)
for sdk_path in sdk_paths:
visit_files(sdk_path, visit_mbed_sdk)
# remove repository files that do not exist in the mbed SDK
def visit_repo(repo_file):
for sdk_path in sdk_paths:
sdk_file = join(sdk_path, relpath(repo_file, repo.path))
if exists(sdk_file):
break
else:
remove(repo_file)
print "remove: %s" % repo_file
visit_files(repo.path, visit_repo)
if repo.publish():
changed.append(repo_name)
def update_code(repositories):
for r in repositories:
repo_name, sdk_dir = r[0], r[1]
team_name = r[2] if len(r) == 3 else None
print '\n=== Updating "%s" ===' % repo_name
sdk_dirs = [sdk_dir] if type(sdk_dir) != type([]) else sdk_dir
sdk_path = [join(LIB_DIR, d) for d in sdk_dirs]
update_repo(repo_name, sdk_path, team_name)
def update_single_repo(repo):
repos = [r for r in OFFICIAL_CODE if r[0] == repo]
if not repos:
print "Repository '%s' not found" % repo
else:
update_code(repos)
def update_dependencies(repositories):
for repo_name in repositories:
print '\n=== Updating "%s" ===' % repo_name
repo = MbedRepository(repo_name)
# point to the latest libraries
def visit_repo(repo_file):
with open(repo_file, "r") as f:
url = f.read()
with open(repo_file, "w") as f:
f.write(url[:(url.rindex('/')+1)])
visit_files(repo.path, visit_repo, None, MBED_REPO_EXT)
if repo.publish():
changed.append(repo_name)
def update_mbed():
update_repo("mbed", [join(BUILD_DIR, "mbed")], None)
def do_sync(options):
global push_remote, quiet, commit_msg, changed
push_remote = not options.nopush
quiet = options.quiet
commit_msg = options.msg
chnaged = []
if options.code:
update_code(OFFICIAL_CODE)
if options.dependencies:
update_dependencies(CODE_WITH_DEPENDENCIES)
if options.mbed:
update_mbed()
if options.repo:
update_single_repo(options.repo)
if changed:
print "Repositories with changes:", changed
return changed
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", "--code",
action="store_true", default=False,
help="Update the mbed_official code")
parser.add_option("-d", "--dependencies",
action="store_true", default=False,
help="Update the mbed_official code dependencies")
parser.add_option("-m", "--mbed",
action="store_true", default=False,
help="Release a build of the mbed library")
parser.add_option("-n", "--nopush",
action="store_true", default=False,
help="Commit the changes locally only, don't push them")
parser.add_option("", "--commit_message",
action="store", type="string", default='', dest='msg',
help="Commit message to use for all the commits")
parser.add_option("-r", "--repository",
action="store", type="string", default='', dest='repo',
help="Synchronize only the given repository")
parser.add_option("-q", "--quiet",
action="store_true", default=False,
help="Don't ask for confirmation before commiting or pushing")
(options, args) = parser.parse_args()
do_sync(options)
|
|
"""Defines the unit tests for the :mod:`colour.models.rgb.ictcp` module."""
import numpy as np
import unittest
from itertools import permutations
from colour.models.rgb import (
RGB_to_ICtCp,
ICtCp_to_RGB,
XYZ_to_ICtCp,
ICtCp_to_XYZ,
)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"TestRGB_to_ICtCp",
"TestICtCp_to_RGB",
"TestXYZ_to_ICtCp",
"TestICtCp_to_XYZ",
]
class TestRGB_to_ICtCp(unittest.TestCase):
"""
Define :func:`colour.models.rgb.ictcp.TestRGB_to_ICtCp` definition unit
tests methods.
"""
def test_RGB_to_ICtCp(self):
"""Test :func:`colour.models.rgb.ictcp.RGB_to_ICtCp` definition."""
np.testing.assert_almost_equal(
RGB_to_ICtCp(np.array([0.45620519, 0.03081071, 0.04091952])),
np.array([0.07351364, 0.00475253, 0.09351596]),
decimal=7,
)
np.testing.assert_almost_equal(
RGB_to_ICtCp(
np.array([0.45620519, 0.03081071, 0.04091952]), L_p=4000
),
np.array([0.10516931, 0.00514031, 0.12318730]),
decimal=7,
)
np.testing.assert_almost_equal(
RGB_to_ICtCp(
np.array([0.45620519, 0.03081071, 0.04091952]), L_p=1000
),
np.array([0.17079612, 0.00485580, 0.17431356]),
decimal=7,
)
np.testing.assert_almost_equal(
RGB_to_ICtCp(
np.array([0.45620519, 0.03081071, 0.04091952]),
method="ITU-R BT.2100-1 PQ",
),
np.array([0.07351364, 0.00475253, 0.09351596]),
decimal=7,
)
np.testing.assert_almost_equal(
RGB_to_ICtCp(
np.array([0.45620519, 0.03081071, 0.04091952]),
method="ITU-R BT.2100-2 PQ",
),
np.array([0.07351364, 0.00475253, 0.09351596]),
decimal=7,
)
np.testing.assert_almost_equal(
RGB_to_ICtCp(
np.array([0.45620519, 0.03081071, 0.04091952]),
method="ITU-R BT.2100-1 HLG",
),
np.array([0.62567899, -0.03622422, 0.67786522]),
decimal=7,
)
np.testing.assert_almost_equal(
RGB_to_ICtCp(
np.array([0.45620519, 0.03081071, 0.04091952]),
method="ITU-R BT.2100-2 HLG",
),
np.array([0.62567899, -0.01984490, 0.35911259]),
decimal=7,
)
def test_n_dimensional_RGB_to_ICtCp(self):
"""
Test :func:`colour.models.rgb.ictcp.RGB_to_ICtCp` definition
n-dimensional support.
"""
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
ICtCp = RGB_to_ICtCp(RGB)
RGB = np.tile(RGB, (6, 1))
ICtCp = np.tile(ICtCp, (6, 1))
np.testing.assert_almost_equal(RGB_to_ICtCp(RGB), ICtCp, decimal=7)
RGB = np.reshape(RGB, (2, 3, 3))
ICtCp = np.reshape(ICtCp, (2, 3, 3))
np.testing.assert_almost_equal(RGB_to_ICtCp(RGB), ICtCp, decimal=7)
def test_domain_range_scale_RGB_to_ICtCp(self):
"""
Test :func:`colour.models.rgb.ictcp.RGB_to_ICtCp` definition domain
and range scale support.
"""
RGB = np.array([0.45620519, 0.03081071, 0.04091952])
ICtCp = RGB_to_ICtCp(RGB)
d_r = (("reference", 1), ("1", 1), ("100", 1))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
RGB_to_ICtCp(RGB * factor), ICtCp * factor, decimal=7
)
@ignore_numpy_errors
def test_nan_RGB_to_ICtCp(self):
"""
Test :func:`colour.models.rgb.ictcp.RGB_to_ICtCp` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
RGB = np.array(case)
RGB_to_ICtCp(RGB)
class TestICtCp_to_RGB(unittest.TestCase):
"""
Define :func:`colour.models.rgb.ictcp.ICtCp_to_RGB` definition unit tests
methods.
"""
def test_ICtCp_to_RGB(self):
"""Test :func:`colour.models.rgb.ictcp.ICtCp_to_RGB` definition."""
np.testing.assert_almost_equal(
ICtCp_to_RGB(np.array([0.07351364, 0.00475253, 0.09351596])),
np.array([0.45620519, 0.03081071, 0.04091952]),
decimal=7,
)
np.testing.assert_almost_equal(
ICtCp_to_RGB(
np.array([0.10516931, 0.00514031, 0.12318730]), L_p=4000
),
np.array([0.45620519, 0.03081071, 0.04091952]),
decimal=7,
)
np.testing.assert_almost_equal(
ICtCp_to_RGB(
np.array([0.17079612, 0.00485580, 0.17431356]), L_p=1000
),
np.array([0.45620519, 0.03081071, 0.04091952]),
decimal=7,
)
np.testing.assert_almost_equal(
ICtCp_to_RGB(
np.array([0.07351364, 0.00475253, 0.09351596]),
method="ITU-R BT.2100-1 PQ",
),
np.array([0.45620519, 0.03081071, 0.04091952]),
decimal=7,
)
np.testing.assert_almost_equal(
ICtCp_to_RGB(
np.array([0.07351364, 0.00475253, 0.09351596]),
method="ITU-R BT.2100-2 PQ",
),
np.array([0.45620519, 0.03081071, 0.04091952]),
decimal=7,
)
np.testing.assert_almost_equal(
ICtCp_to_RGB(
np.array([0.62567899, -0.03622422, 0.67786522]),
method="ITU-R BT.2100-1 HLG",
),
np.array([0.45620519, 0.03081071, 0.04091952]),
decimal=7,
)
np.testing.assert_almost_equal(
ICtCp_to_RGB(
np.array([0.62567899, -0.01984490, 0.35911259]),
method="ITU-R BT.2100-2 HLG",
),
np.array([0.45620519, 0.03081071, 0.04091952]),
decimal=7,
)
def test_n_dimensional_ICtCp_to_RGB(self):
"""
Test :func:`colour.models.rgb.ictcp.ICtCp_to_RGB` definition
n-dimensional support.
"""
ICtCp = np.array([0.07351364, 0.00475253, 0.09351596])
RGB = ICtCp_to_RGB(ICtCp)
ICtCp = np.tile(ICtCp, (6, 1))
RGB = np.tile(RGB, (6, 1))
np.testing.assert_almost_equal(ICtCp_to_RGB(ICtCp), RGB, decimal=7)
ICtCp = np.reshape(ICtCp, (2, 3, 3))
RGB = np.reshape(RGB, (2, 3, 3))
np.testing.assert_almost_equal(ICtCp_to_RGB(ICtCp), RGB, decimal=7)
def test_domain_range_scale_ICtCp_to_RGB(self):
"""
Test :func:`colour.models.rgb.ictcp.ICtCp_to_RGB` definition domain
and range scale support.
"""
ICtCp = np.array([0.07351364, 0.00475253, 0.09351596])
RGB = ICtCp_to_RGB(ICtCp)
d_r = (("reference", 1), ("1", 1), ("100", 1))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
ICtCp_to_RGB(ICtCp * factor), RGB * factor, decimal=7
)
@ignore_numpy_errors
def test_nan_ICtCp_to_RGB(self):
"""
Test :func:`colour.models.rgb.ictcp.ICtCp_to_RGB` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
ICtCp = np.array(case)
ICtCp_to_RGB(ICtCp)
class TestXYZ_to_ICtCp(unittest.TestCase):
"""
Define :func:`colour.models.rgb.ictcp.TestXYZ_to_ICtCp` definition unit
tests methods.
"""
def test_XYZ_to_ICtCp(self):
"""Test :func:`colour.models.rgb.ictcp.XYZ_to_ICtCp` definition."""
np.testing.assert_almost_equal(
XYZ_to_ICtCp(np.array([0.20654008, 0.12197225, 0.05136952])),
np.array([0.06858097, -0.00283842, 0.06020983]),
decimal=7,
)
np.testing.assert_almost_equal(
XYZ_to_ICtCp(
np.array([0.20654008, 0.12197225, 0.05136952]),
np.array([0.34570, 0.35850]),
),
np.array([0.06792437, 0.00452089, 0.05514480]),
decimal=7,
)
np.testing.assert_almost_equal(
XYZ_to_ICtCp(
np.array([0.20654008, 0.12197225, 0.05136952]),
np.array([0.34570, 0.35850]),
chromatic_adaptation_transform="Bradford",
),
np.array([0.06783951, 0.00476111, 0.05523093]),
decimal=7,
)
np.testing.assert_almost_equal(
XYZ_to_ICtCp(
np.array([0.20654008, 0.12197225, 0.05136952]), L_p=4000
),
np.array([0.09871102, -0.00447247, 0.07984812]),
decimal=7,
)
np.testing.assert_almost_equal(
XYZ_to_ICtCp(
np.array([0.20654008, 0.12197225, 0.05136952]), L_p=1000
),
np.array([0.16173872, -0.00792543, 0.11409458]),
decimal=7,
)
np.testing.assert_almost_equal(
XYZ_to_ICtCp(
np.array([0.20654008, 0.12197225, 0.05136952]),
method="ITU-R BT.2100-1 PQ",
),
np.array([0.06858097, -0.00283842, 0.06020983]),
decimal=7,
)
np.testing.assert_almost_equal(
XYZ_to_ICtCp(
np.array([0.20654008, 0.12197225, 0.05136952]),
method="ITU-R BT.2100-2 PQ",
),
np.array([0.06858097, -0.00283842, 0.06020983]),
decimal=7,
)
np.testing.assert_almost_equal(
XYZ_to_ICtCp(
np.array([0.20654008, 0.12197225, 0.05136952]),
method="ITU-R BT.2100-1 HLG",
),
np.array([0.59242792, -0.06824263, 0.47421473]),
decimal=7,
)
np.testing.assert_almost_equal(
XYZ_to_ICtCp(
np.array([0.20654008, 0.12197225, 0.05136952]),
method="ITU-R BT.2100-2 HLG",
),
np.array([0.59242792, -0.03740730, 0.25122675]),
decimal=7,
)
def test_n_dimensional_XYZ_to_ICtCp(self):
"""
Test :func:`colour.models.rgb.ictcp.XYZ_to_ICtCp` definition
n-dimensional support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
ICtCp = XYZ_to_ICtCp(XYZ)
XYZ = np.tile(XYZ, (6, 1))
ICtCp = np.tile(ICtCp, (6, 1))
np.testing.assert_almost_equal(XYZ_to_ICtCp(XYZ), ICtCp, decimal=7)
XYZ = np.reshape(XYZ, (2, 3, 3))
ICtCp = np.reshape(ICtCp, (2, 3, 3))
np.testing.assert_almost_equal(XYZ_to_ICtCp(XYZ), ICtCp, decimal=7)
def test_domain_range_scale_XYZ_to_ICtCp(self):
"""
Test :func:`colour.models.rgb.ictcp.XYZ_to_ICtCp` definition domain
and range scale support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
ICtCp = XYZ_to_ICtCp(XYZ)
d_r = (("reference", 1), ("1", 1), ("100", 1))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_ICtCp(XYZ * factor), ICtCp * factor, decimal=7
)
@ignore_numpy_errors
def test_nan_XYZ_to_ICtCp(self):
"""
Test :func:`colour.models.rgb.ictcp.XYZ_to_ICtCp` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
XYZ_to_ICtCp(XYZ)
class TestICtCp_to_XYZ(unittest.TestCase):
"""
Define :func:`colour.models.rgb.ictcp.ICtCp_to_XYZ` definition unit tests
methods.
"""
def test_ICtCp_to_XYZ(self):
"""Test :func:`colour.models.rgb.ictcp.ICtCp_to_XYZ` definition."""
np.testing.assert_almost_equal(
ICtCp_to_XYZ(np.array([0.06858097, -0.00283842, 0.06020983])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7,
)
np.testing.assert_almost_equal(
ICtCp_to_XYZ(
np.array([0.06792437, 0.00452089, 0.05514480]),
np.array([0.34570, 0.35850]),
),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7,
)
np.testing.assert_almost_equal(
ICtCp_to_XYZ(
np.array([0.06783951, 0.00476111, 0.05523093]),
np.array([0.34570, 0.35850]),
chromatic_adaptation_transform="Bradford",
),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7,
)
np.testing.assert_almost_equal(
ICtCp_to_XYZ(
np.array([0.09871102, -0.00447247, 0.07984812]), L_p=4000
),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7,
)
np.testing.assert_almost_equal(
ICtCp_to_XYZ(
np.array([0.16173872, -0.00792543, 0.11409458]), L_p=1000
),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7,
)
np.testing.assert_almost_equal(
ICtCp_to_XYZ(
np.array([0.06858097, -0.00283842, 0.06020983]),
method="ITU-R BT.2100-1 PQ",
),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7,
)
np.testing.assert_almost_equal(
ICtCp_to_XYZ(
np.array([0.06858097, -0.00283842, 0.06020983]),
method="ITU-R BT.2100-2 PQ",
),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7,
)
np.testing.assert_almost_equal(
ICtCp_to_XYZ(
np.array([0.59242792, -0.06824263, 0.47421473]),
method="ITU-R BT.2100-1 HLG",
),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7,
)
np.testing.assert_almost_equal(
ICtCp_to_XYZ(
np.array([0.59242792, -0.03740730, 0.25122675]),
method="ITU-R BT.2100-2 HLG",
),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7,
)
def test_n_dimensional_ICtCp_to_XYZ(self):
"""
Test :func:`colour.models.rgb.ictcp.ICtCp_to_XYZ` definition
n-dimensional support.
"""
ICtCp = np.array([0.06858097, -0.00283842, 0.06020983])
XYZ = ICtCp_to_XYZ(ICtCp)
ICtCp = np.tile(ICtCp, (6, 1))
XYZ = np.tile(XYZ, (6, 1))
np.testing.assert_almost_equal(ICtCp_to_XYZ(ICtCp), XYZ, decimal=7)
ICtCp = np.reshape(ICtCp, (2, 3, 3))
XYZ = np.reshape(XYZ, (2, 3, 3))
np.testing.assert_almost_equal(ICtCp_to_XYZ(ICtCp), XYZ, decimal=7)
def test_domain_range_scale_ICtCp_to_XYZ(self):
"""
Test :func:`colour.models.rgb.ictcp.ICtCp_to_XYZ` definition domain
and range scale support.
"""
ICtCp = np.array([0.06858097, -0.00283842, 0.06020983])
XYZ = ICtCp_to_XYZ(ICtCp)
d_r = (("reference", 1), ("1", 1), ("100", 1))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
ICtCp_to_XYZ(ICtCp * factor), XYZ * factor, decimal=7
)
@ignore_numpy_errors
def test_nan_ICtCp_to_XYZ(self):
"""
Test :func:`colour.models.rgb.ictcp.ICtCp_to_XYZ` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
ICtCp = np.array(case)
ICtCp_to_XYZ(ICtCp)
if __name__ == "__main__":
unittest.main()
|
|
import tensorflow as tf
import numpy as np
import math
import os
from zimpy.networks.base_neural_network import BaseNeuralNetwork, ConfigurationContext, HyperParametersContext
class SingleLayerHyperParametersContext(HyperParametersContext):
def __init__(
self,
hidden_layer_neuron_count=512,
**kwargs
):
"""
:param hidden_layer_neuron_count: number of neurons for the hidden layer
:param kwargs: Arguments to pass into to super constructor
"""
super(SingleLayerHyperParametersContext, self).__init__(**kwargs)
self.hidden_layer_neuron_count = hidden_layer_neuron_count
class SingleLayerLinear(BaseNeuralNetwork):
def fit(self):
data = self.config.data
hyper_parameters = self.config.hyper_parameters
features, labels, logits = self.__build_graph()
# Feed dicts for training, validation, test and prediction
train_feed_dict = {features: data.train_flat, labels: data.train_labels}
valid_feed_dict = {features: data.validate_flat, labels: data.validate_labels}
test_feed_dict = {features: data.test_flat, labels: data.test_labels}
predict_feed_dict = {features: data.predict_flat, labels: data.predict_labels}
# Passing global_step to minimize() will increment it at each step.
global_step = tf.Variable(0, trainable=False)
# Define loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, labels))
# Configure optimizer
if self.config.optimizer_type == ConfigurationContext.OPTIMIZER_TYPE_GRADIENT_DESCENT:
# decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
learning_rate = tf.train.exponential_decay(learning_rate=hyper_parameters.start_learning_rate,
global_step=global_step,
decay_steps=75000, decay_rate=0.96, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss,
global_step=global_step)
elif self.config.optimizer_type == ConfigurationContext.OPTIMIZER_TYPE_ADAGRAD:
learning_rate = tf.constant(hyper_parameters.start_learning_rate)
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate).minimize(loss)
training_epochs = hyper_parameters.epochs
batch_size = hyper_parameters.batch_size
num_training = data.num_training
batch_count = int(math.ceil(num_training / batch_size))
display_step = 1
# Launch the graph
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333, allow_growth=True)
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options)
# config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(training_epochs):
for i in range(batch_count):
x_batch, y_batch, batch_start, batch_end = data.next_batch(batch_size)
batch_feed_dict = {features: x_batch, labels: y_batch}
# ImagePlotter.plot_images(ImageJitterer.jitter_images(data.train_orig[batch_start:batch_end]), batch_y)
# ImagePlotter.plot_images(data.train_orig[batch_start:batch_end], np.argmax(batch_y, axis=1))
# Run optimization op (backprop) and loss op (to get loss value)
sess.run(optimizer, feed_dict=batch_feed_dict)
# _, current_loss = sess.run([optimizer, loss], feed_dict=batch_feed_dict)
# self.track_loss(current_loss)
# Display logs per epoch step and very last batch iteration
if epoch % display_step == 0 or (epoch == (training_epochs - 1) and i == (batch_count - 1)):
total_iterations = (epoch + 1)
print("Epoch:", '%04d' % total_iterations, 'of', '%04d' % training_epochs)
self.config.hyper_parameters.end_learning_rate = sess.run(learning_rate)
# Calculate accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# store accuracies
self.train_accuracy = accuracy.eval(train_feed_dict)
self.validate_accuracy = accuracy.eval(valid_feed_dict)
self.test_accuracy = accuracy.eval(test_feed_dict)
self.predict_accuracy = accuracy.eval(predict_feed_dict)
# store predictions
self.train_predictions = tf.cast(correct_prediction.eval(train_feed_dict), "float").eval()
self.test_predictions = tf.cast(correct_prediction.eval(test_feed_dict), "float").eval()
self.predict_predictions = tf.cast(correct_prediction.eval(predict_feed_dict), "float").eval()
self.validate_predictions = tf.cast(correct_prediction.eval(valid_feed_dict), "float").eval()
self.loss = sess.run(loss, feed_dict=valid_feed_dict)
self.track_loss(self.loss)
print(" loss: ", "{:.9f}".format(self.loss))
print(" batch accuracy: ", accuracy.eval(batch_feed_dict))
print(" train accuracy: ", accuracy.eval(train_feed_dict))
print(" validate accuracy: ", accuracy.eval(valid_feed_dict))
print(" test accuracy: ", accuracy.eval(test_feed_dict))
print(" predict accuracy: ", accuracy.eval(predict_feed_dict))
print(" batch size: ", batch_size)
print(" learning rate: ", sess.run(learning_rate))
print('')
y_pred = tf.nn.softmax(logits)
top_5_op = tf.nn.top_k(y_pred, 5)
self.top_5 = sess.run(top_5_op,
feed_dict={features: data.predict_flat, labels: data.predict_labels})
saved = self.evaluate_accuracy(sess, accuracy.eval(valid_feed_dict), total_iterations)
if saved == True:
# store the final results for later analysis and prediction runs
# NOTE: I wrote the serializer mechanic prior to discovering tf.train.Saver.
self.weights = {
'hidden_layer': self.weight_variables['hidden_layer'].eval(),
'out': self.weight_variables['out'].eval()
}
self.biases = {
'hidden_layer': self.bias_variables['hidden_layer'].eval(),
'out': self.bias_variables['out'].eval()
}
os.system('say "{:.002f}%"'.format(self.validate_accuracy * 100))
if total_iterations - self.last_improvement > hyper_parameters.required_accuracy_improvement:
msg = 'No improvement found in a while, stopping optimization after {} iterations. Final accuracy, {}% at iteration {}.'.format(
total_iterations, str(int(self.validate_accuracy * 100)), self.last_improvement)
print(msg)
os.system('say "{}"'.format(msg))
break
print("Optimization Finished!")
def top_k(self, x, y, model_name, k=5):
features, labels, logits = self.__build_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
self.saver = tf.train.Saver()
self.saver.restore(sess, self.save_dir + '/' + model_name)
y_pred = tf.nn.softmax(logits)
# Calculate predictions.
# in_top_k_op = tf.nn.in_top_k(logits, true_labels, k)
# top_1_op = tf.nn.top_k(logits, 1)
# top_1_op = tf.nn.top_k(y_pred, 1)
# top_1 = sess.run(top_1_op, feed_dict={features: images})
top_k_op = tf.nn.top_k(y_pred, k)
top_k = sess.run(top_k_op, feed_dict={features: x, labels: y})
print('top {}:'.format(k))
print('')
print(top_k)
print('')
print(top_k.values)
print('')
print(top_k.indices)
print('')
print(top_k.values.shape)
print('')
print(top_k.indices.shape)
return top_k
def predict(self, images, true_labels, model_name):
features, labels, logits = self.__build_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
# This seems to take A LOOOOOOONG time so not doing it right now.
# self.saver = tf.train.import_meta_graph(self.save_dir + '/' + model_name + '.meta')
# self.saver.restore(sess, self.save_dir + '/' + model_name)
self.saver = tf.train.Saver()
self.saver.restore(sess, self.save_dir + '/' + model_name)
# Number of images.
num_images = len(images)
# Allocate an array for the predicted classes which
# will be calculated in batches and filled into this array.
cls_pred = np.zeros(shape=num_images, dtype=np.int)
feed_dict = {features: images, labels: true_labels}
y_pred_cls = tf.argmax(logits, dimension=1)
# y_true_cls = tf.argmax(labels, 1)
# correct_prediction = tf.equal(y_pred_cls, y_true_cls)
cls_pred = sess.run(y_pred_cls, feed_dict=feed_dict)
# predicted_labels = tf.argmax(sess.run(labels, feed_dict=feed_dict), dimension=1).eval()
# sign_names = [self.config.data.sign_names_map[label] for label in predicted_labels]
# print(predicted_labels)
# print(sign_names)
# Calculate accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# predictions = tf.cast(correct_prediction.eval(feed_dict), "bool").eval()
# print(predictions)
correct = (np.argmax(true_labels, axis=1) == cls_pred)
print(" predict accuracy: {:004f}%".format(accuracy.eval(feed_dict) * 100))
return correct, cls_pred
def __build_graph(self):
"""
Builds a 2 layer network with a single hidden layer with n hidden layer neurons.
:return:
"""
data = self.config.data
hyper_parameters = self.config.hyper_parameters
image_size = data.train_flat.shape[1]
num_classes = data.num_classes
n_hidden_layer = hyper_parameters.hidden_layer_neuron_count
self.top_5 = {}
# Store layers weight & bias
self.weight_variables = {
'hidden_layer': tf.Variable(tf.random_normal([image_size, n_hidden_layer]), name='weights_hidden_layer'),
'out': tf.Variable(tf.random_normal([n_hidden_layer, num_classes]), name='weights_out')
}
self.bias_variables = {
'hidden_layer': tf.Variable(tf.zeros([n_hidden_layer]), name='biases_hidden_layer'),
'out': tf.Variable(tf.zeros([num_classes]), name='biases_out')
}
features = tf.placeholder("float", [None, image_size])
labels = tf.placeholder("float", [None, num_classes])
# Hidden layer with RELU activation
layer_1 = tf.add(
tf.matmul(
features,
self.weight_variables['hidden_layer']
),
self.bias_variables['hidden_layer']
)
layer_1 = tf.nn.relu(layer_1)
# Output layer with linear activation
logits = tf.matmul(layer_1, self.weight_variables['out']) + self.bias_variables['out']
return features, labels, logits
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow-related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond as smart_module
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if isinstance(pred, variables.Variable):
return control_flow_ops.cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
return smart_module.smart_cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
def constant_value(pred):
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
Arguments:
pred: A scalar, either a Python bool or a TensorFlow boolean variable
or tensor, or the Python integer 1 or 0.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError: If `pred` is not a Variable, Tensor or bool, or Python
integer 1 or 0.
"""
# Allow integer booleans.
if isinstance(pred, int):
if pred == 1:
pred = True
elif pred == 0:
pred = False
if isinstance(pred, variables.Variable):
return None
return smart_module.smart_constant_value(pred)
def is_tensor_or_tensor_list(v):
v = nest.flatten(v)
if v and isinstance(v[0], ops.Tensor):
return True
else:
return False
def get_reachable_from_inputs(inputs, targets=None):
"""Returns the set of tensors/ops reachable from `inputs`.
Stops if all targets have been found (target is optional).
Only valid in Symbolic mode, not Eager mode.
Args:
inputs: List of tensors.
targets: List of tensors.
Returns:
A set of tensors reachable from the inputs (includes the inputs themselves).
"""
inputs = nest.flatten(inputs, expand_composites=True)
reachable = object_identity.ObjectIdentitySet(inputs)
if targets:
remaining_targets = object_identity.ObjectIdentitySet(nest.flatten(targets))
queue = inputs[:]
while queue:
x = queue.pop()
if isinstance(x, tuple(_user_convertible_tensor_types)):
# Can't find consumers of user-specific types.
continue
if isinstance(x, ops.Operation):
outputs = x.outputs[:] or []
outputs += x._control_outputs # pylint: disable=protected-access
elif isinstance(x, variables.Variable):
try:
outputs = [x.op]
except AttributeError:
# Variables can be created in an Eager context.
outputs = []
elif tensor_util.is_tensor(x):
outputs = x.consumers()
else:
raise TypeError('Expected Operation, Variable, or Tensor, got ' + str(x))
for y in outputs:
if y not in reachable:
reachable.add(y)
if targets:
remaining_targets.discard(y)
queue.insert(0, y)
if targets and not remaining_targets:
return reachable
return reachable
# This function needs access to private functions of `nest`.
# pylint: disable=protected-access
def map_structure_with_atomic(is_atomic_fn, map_fn, nested):
"""Maps the atomic elements of a nested structure.
Arguments:
is_atomic_fn: A function that determines if an element of `nested` is
atomic.
map_fn: The function to apply to atomic elements of `nested`.
nested: A nested structure.
Returns:
The nested structure, with atomic elements mapped according to `map_fn`.
Raises:
ValueError: If an element that is neither atomic nor a sequence is
encountered.
"""
if is_atomic_fn(nested):
return map_fn(nested)
# Recursively convert.
if not nest.is_sequence(nested):
raise ValueError(
'Received non-atomic and non-sequence element: {}'.format(nested))
if nest._is_mapping(nested):
values = [nested[k] for k in nest._sorted(nested)]
else:
values = nested
mapped_values = [
map_structure_with_atomic(is_atomic_fn, map_fn, ele) for ele in values
]
return nest._sequence_like(nested, mapped_values)
# pylint: enable=protected-access
def convert_shapes(input_shape, to_tuples=True):
"""Converts nested shape representations to desired format.
Performs:
TensorShapes -> tuples if `to_tuples=True`.
tuples of int or None -> TensorShapes if `to_tuples=False`.
Valid objects to be converted are:
- TensorShapes
- tuples with elements of type int or None.
- ints
- None
Arguments:
input_shape: A nested structure of objects to be converted to TensorShapes.
to_tuples: If `True`, converts all TensorShape to tuples. Otherwise converts
all tuples representing shapes to TensorShapes.
Returns:
Nested structure of shapes in desired format.
"""
def _is_shape_component(value):
return value is None or isinstance(value, (int, tensor_shape.Dimension))
def _is_atomic_shape(input_shape):
# Ex: TensorShape or (None, 10, 32) or 5 or `None`
if _is_shape_component(input_shape):
return True
if isinstance(input_shape, tensor_shape.TensorShape):
return True
if (isinstance(input_shape, (tuple, list)) and
all(_is_shape_component(ele) for ele in input_shape)):
return True
return False
def _convert_shape(input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if to_tuples:
input_shape = tuple(input_shape.as_list())
return input_shape
return map_structure_with_atomic(_is_atomic_shape, _convert_shape,
input_shape)
class ListWrapper(object):
"""A wrapper for lists to be treated as elements for `nest`."""
def __init__(self, list_to_wrap):
self._list = list_to_wrap
def as_list(self):
return self._list
def convert_inner_node_data(nested, wrap=False):
"""Either wraps or unwraps innermost node data lists in `ListWrapper` objects.
Arguments:
nested: A nested data structure.
wrap: If `True`, wrap innermost lists in `ListWrapper` objects. If `False`,
unwraps `ListWrapper` objects into lists.
Returns:
Structure of same type as nested, with lists wrapped/unwrapped.
"""
def _is_serialized_node_data(nested):
# Node data can be of form `[layer_name, node_id, tensor_id]` or
# `[layer_name, node_id, tensor_id, kwargs]`.
if (isinstance(nested, list) and (len(nested) in [3, 4]) and
isinstance(nested[0], six.string_types)):
return True
return False
def _is_atomic_nested(nested):
"""Returns `True` if `nested` is a list representing node data."""
if isinstance(nested, ListWrapper):
return True
if _is_serialized_node_data(nested):
return True
return not nest.is_sequence(nested)
def _convert_object_or_list(nested):
"""Convert b/t `ListWrapper` object and list representations."""
if wrap:
if isinstance(nested, ListWrapper):
return nested
if _is_serialized_node_data(nested):
return ListWrapper(nested)
return nested
else:
if isinstance(nested, ListWrapper):
return nested.as_list()
return nested
return map_structure_with_atomic(_is_atomic_nested, _convert_object_or_list,
nested)
def shape_type_conversion(fn):
"""Decorator that handles tuple/TensorShape conversion.
Used in `compute_output_shape` and `build`.
Arguments:
fn: function to wrap.
Returns:
Wrapped function.
"""
def wrapper(instance, input_shape):
# Pass shapes as tuples to `fn`
# This preserves compatibility with external Keras.
if input_shape is not None:
input_shape = convert_shapes(input_shape, to_tuples=True)
output_shape = fn(instance, input_shape)
# Return shapes from `fn` as TensorShapes.
if output_shape is not None:
output_shape = convert_shapes(output_shape, to_tuples=False)
return output_shape
return wrapper
def are_all_symbolic_tensors(tensors):
return all(is_symbolic_tensor(tensor) for tensor in tensors)
_user_convertible_tensor_types = set()
def is_symbolic_tensor(tensor):
"""Returns whether a tensor is symbolic (from a TF graph) or an eager tensor.
A Variable can be seen as either: it is considered symbolic
when we are in a graph scope, and eager when we are in an eager scope.
Arguments:
tensor: A tensor instance to test.
Returns:
True for symbolic tensors, False for eager tensors.
"""
if isinstance(tensor, tuple(_user_convertible_tensor_types)):
tensor = ops.convert_to_tensor_or_composite(tensor)
if isinstance(tensor, variables.Variable):
# Variables that are output of a Keras Layer in Functional API mode
# should be considered symbolic.
# TODO(omalleyt): We need a better way to check this in order to
# enable `run_eagerly=True` for Models containing Layers that
# return Variables as outputs.
return (getattr(tensor, '_keras_history', False) or
not context.executing_eagerly())
if isinstance(tensor, composite_tensor.CompositeTensor):
component_tensors = nest.flatten(tensor, expand_composites=True)
return any(hasattr(t, 'graph') for t in component_tensors)
if isinstance(tensor, ops.Tensor):
return hasattr(tensor, 'graph')
return False
def register_symbolic_tensor_type(cls):
"""Allows users to specify types regarded as symbolic `Tensor`s.
Used in conjunction with `tf.register_tensor_conversion_function`, calling
`tf.keras.utils.register_symbolic_tensor_type(cls)` allows non-`Tensor`
objects to be plumbed through Keras layers.
Example:
```python
# One-time setup.
class Foo(object):
def __init__(self, input_):
self._input = input_
def value(self):
return tf.constant(42.)
tf.register_tensor_conversion_function(
Foo, lambda x, *args, **kwargs: x.value())
tf.keras.utils.register_symbolic_tensor_type(Foo)
# User-land.
layer = tf.keras.layers.Lambda(lambda input_: Foo(input_))
```
Arguments:
cls: A `class` type which shall be regarded as a symbolic `Tensor`.
"""
global _user_convertible_tensor_types
_user_convertible_tensor_types.add(cls)
def is_tensor_or_variable(x):
return tensor_util.is_tensor(x) or isinstance(x, variables.Variable)
def assert_no_legacy_layers(layers):
"""Prevent tf.layers.Layers from being used with Keras.
Certain legacy layers inherit from their keras analogs; however they are
not supported with keras and can lead to subtle and hard to diagnose bugs.
Args:
layers: A list of layers to check
Raises:
TypeError: If any elements of layers are tf.layers.Layers
"""
# isinstance check for tf.layers.Layer introduces a circular dependency.
legacy_layers = [l for l in layers if getattr(l, '_is_legacy_layer', None)]
if legacy_layers:
layer_str = '\n'.join([' ' + str(l) for l in legacy_layers])
raise TypeError(
'The following are legacy tf.layers.Layers:\n{}\nTo use keras as a '
'framework (for instance using the Network, Model, or Sequential '
'classes), please use the tf.keras.layers implementation instead. '
'(Or, if writing custom layers, subclass from tf.keras.layers rather '
'than tf.layers)'.format(layer_str))
@tf_contextlib.contextmanager
def maybe_init_scope(layer):
"""Open an `init_scope` if in V2 mode and using the keras graph.
Arguments:
layer: The Layer/Model that is currently active.
Yields:
None
"""
# Don't open an init_scope in V1 mode or when using legacy tf.layers.
if (ops.executing_eagerly_outside_functions() and
getattr(layer, '_keras_style', True)):
with ops.init_scope():
yield
else:
yield
@tf_contextlib.contextmanager
def graph_context_for_symbolic_tensors(*args, **kwargs):
"""Returns graph context manager if any of the inputs is a symbolic tensor."""
if any(is_symbolic_tensor(v) for v in list(args) + list(kwargs.values())):
with K.get_graph().as_default():
yield
else:
yield
|
|
#!/usr/bin/python
# File created on Nov 27 Jan 2012
from __future__ import division
import threading
import time
from multiprocessing import Process
__author__ = "Kishori M Konwar, Niels W. Hanson"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"
try:
import re
import sys
from optparse import OptionParser, OptionGroup
import traceback
except:
print """ Could not load some user defined module functions"""
print """ Make sure your typed \"source MetaPathwayrc\""""
print """ """
sys.exit(3)
script_name = "format_cog_names.py"
usage= script_name + """ --product product_file --subsystems subsystems_file -o output"""
parser = OptionParser(usage)
parser.add_option( "--products", dest="products",
help='the product file')
parser.add_option( "--subsystems", dest="subsystems",
help='the subsystem file')
parser.add_option( "-o", dest="output_file",
help='the output file')
parser.add_option( "-N", dest="numthreads", type = 'int',
help='the number of threads')
def fprintf(file, fmt, *args):
file.write(fmt % args)
def check_arguments(opts, args):
if opts.products == None:
print """Must have the \"products\" file"""
return False
if opts.subsystems == None:
print """Must have the \"subsystems\" file"""
return False
if opts.output_file == None:
print """Must have an output file"""
return False
return True
def split_attributes(str, attributes):
rawattributes = re.split(';', str)
for attribStr in rawattributes:
insert_attribute(attributes, attribStr)
return attributes
def create_dict_from_list(list, dictionary):
for word in list:
dictionary[word] = True
stopwords_list= ["a", "able", "about", "across", "after", "all", "almost", "also", "am",\
"among", "an", "and", "any", "are", "as", "at", "be",\
"because", "been", "but", "by", "can", "cannot", "could", "dear", "did",\
"do", "does", "either", "else", "ever", "every", "for", "from", "get",\
"got", "had", "has", "have", "he", "her", "hers", "him", "his", "how", "however",\
"i", "if", "in", "into", "is", "it", "its", "just", "least", "let", "like",\
"likely", "may", "me", "might", "most", "must", "my", "neither", "no",\
"nor", "not", "of", "off", "often", "on", "only" , "or", "other", "our",\
"own", "rather", "said", "say", "says", "she", "should", "since", "so", "some",\
"than", "that", "the", "their", "them", "then", "there", "these", "they",\
"this", "tis", "to", "too", "twas", "us", "wants", "was", "we", "were", "what",\
"when", "where", "which", "while", "who", "whom", "why", "will", "with",\
"would", "yet", "you", "your", "enzyme", "hypothetical", "protein"]
def remove_repeats(filtered_words):
word_dict = {}
newlist = []
for word in filtered_words:
if not word in word_dict:
word_dict[word]=1
newlist.append(word)
return newlist
def remove_stop_words(list):
newlist = []
for item in list:
if not item in stopwords_list:
newlist.append(item)
return newlist
def format_product(product):
product0 = re.sub(r'\[.+?\]', '', product)
product1 = re.sub(r'\(', '', product0)
product2 = re.sub(r'\)', '', product1)
product3 = re.sub(r':', '', product2)
product4 = re.sub(r'\/', '', product3)
product_end = product4
####### subproducts = re.split('\/', product0)
list = create_words_list(product_end)
dict = create_dictionary_from_list(list)
return dict
def create_dictionary_from_list(list):
dict = {}
for item in list:
dict[item] = 1;
return dict
def create_words_list(string):
Splits = re.split(' ', string)
list = []
for Split in Splits:
smallsplits = Split.split(',')
list +=smallsplits
list = remove_repeats(list)
list = remove_stop_words(list)
list_final = []
for item in list:
if len(item):
list_final.append(item)
return list_final
def process_products_file(output_file, t, N):
outputfile = open( output_file,'w')
seq_beg_pattern = re.compile(">")
stopwords = {}
create_dict_from_list(stopwords_list, stopwords)
count = 0
success = 0
print 'Thread :' + str(t) + ' subsystems :' + str(len(subsystems)) + ' ' + str(len(lines))
#if t==0:
# print completed
for line in lines:
if t==10 and count % 1000 ==0:
print count
if count % N != t:
count += 1
continue
count += 1
if seq_beg_pattern.search(line):
words = line.rstrip().split()
seqname = re.sub('>', '', words.pop(0))
product = ' '.join(words)
productdict = format_product(product)
best_match, score = get_best_match(productdict, subsystems)
if best_match != None:
fprintf(outputfile, ">%s\n", seqname + '\t' + best_match)
success +=1
else:
fprintf(outputfile, ">%s\n", seqname + '\t' + product)
outputfile.close()
def get_best_match( productdict, subsystems = None, minMatchScore = 80):
perfectmatch = 85
maxScore = 0
maxMatch = None
maxFail= ( 1 - minMatchScore) + 1
for key, value in subsystems.iteritems():
score = computescore(productdict, value, maxFail)
if score > maxScore and score > minMatchScore:
maxScore = score
maxMatch = key
if maxScore > perfectmatch:
break
return maxMatch, maxScore
def computescore(productlist, value, maxFail) :
matchSize = 0
if len(productlist.keys()) > len(value.keys()):
length = len(value.keys())
maxFailNum = maxFail*length
fails = 0
for word in value:
if word in productlist:
matchSize += 1
else:
fails +=1
if fails > maxFailNum:
return 0
else:
length = len(productlist.keys())
maxFailNum = maxFail*length
fails = 0
for word in productlist:
if word in value:
matchSize += 1
else:
fails +=1
if fails > maxFailNum:
return 0
if length == 0 :
return 0
return 100*matchSize/length
def process_subsystems_file(subsystems_file, subsystems) :
try:
subsystemsfile = open(subsystems_file,'r')
except IOError:
print "Cannot open " + str(subsystems_file)
sublines=subsystemsfile.readlines()
subsystemsfile.close()
for line in sublines:
words = [x.strip() for x in line.rstrip().split('\t') ]
maxField = len(words) - 1
if maxField < 2:
continue
subsystems[words[maxField]] = create_dictionary_from_list(create_words_list(words[maxField]))
class ThreadClass(threading.Thread):
N = None
output_file = None
subsystems = None
#lines = None
i = None
#completed = [ 0 for i in range(20) ]
def __init__(self, output_file, i, N, subsystems):
threading.Thread.__init__(self)
self.output_file = output_file
self.i = i
self.N = N
self.subsystems = subsystems
def run(self):
pass
# process_products_file(self.output_file, self.i, self.N, None)
# the main function
completed = [ 0 for i in range(0, 20)]
def main(argv):
(opts, args) = parser.parse_args()
if not check_arguments(opts, args):
print usage
sys.exit(0)
N = opts.numthreads
global subsystems
global lines
subsystems = {}
lines = []
process_subsystems_file(opts.subsystems, subsystems)
Threads = []
try:
productsfile = open( opts.products,'r')
lines = productsfile.readlines()
productsfile.close()
except:
print 'cannot open ' + opts.products
# print 'lines : ' + str(len(ThreadClass.lines))
try:
for i in range(0, N):
print i, opts.products, opts.output_file + str(i), N #, len(subsystems), len(ThreadClass.lines)
t = Process(target = process_products_file, args = (opts.output_file + str(i), i, N, ) )
Threads.append(t)
except:
print "Error: unable to start thread"
for t in Threads:
t.start()
for t in Threads:
t.join()
# letter_function_map = process_function_file(opts.func_file)
# functionTriplets_organism_map = process_orginasim_file(opts.org_file)
# whog_scheme = {}
# process_whog_file(opts.whog_file, whog_scheme)
# seqid_ginumber = {}
# if opts.myva_gb_file:
# seqid_ginumber= process_ginumber_file(opts.myva_gb_file)
# write_output(whog_scheme, letter_function_map, functionTriplets_organism_map, seqid_ginumber, opts.output_file)
# the main function of metapaths
if __name__ == "__main__":
main(sys.argv[1:])
|
|
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import sys
import copy
from datetime import datetime
from vistrails.core.data_structures.graph import Graph
from vistrails.core.system import time_strptime
from vistrails.db.versions.v0_5_0.domain import DBVistrail, DBAction, DBTag, DBModule, \
DBConnection, DBPortSpec, DBFunction, DBParameter, DBLocation, DBAdd, \
DBChange, DBDelete, DBAnnotation, DBPort
def convertDate(date):
if date is not None and date != '':
return datetime(*time_strptime(date, '%d %b %Y %H:%M:%S')[0:6])
return datetime(1900, 1, 1)
def translateVistrail(_vistrail):
vistrail = DBVistrail()
for _action in _vistrail.db_get_actions():
# print 'translating action %s' % _action.db_time
functionName = 'translate%s%sAction' % \
(_action.db_what[0].upper(), _action.db_what[1:])
thisModule = sys.modules[__name__]
action = getattr(thisModule, functionName)(_action)
vistrail.db_add_action(action)
for _tag in _vistrail.db_get_tags():
tag = DBTag(time=_tag.db_time,
name=_tag.db_name)
vistrail.db_add_tag(tag)
convertIds(vistrail)
# for action in vistrail.getActions():
# print '%s %s' % (action.id, action.operations)
vistrail.db_version = '0.5.0'
return vistrail
def translateAddModuleAction(_action):
operations = []
for _module in _action.db_datas:
module = DBModule(id=_module.db_id,
name=_module.db_name,
cache=1,
location=DBLocation(id=_module.db_id,
x=_module.db_x,
y=_module.db_y))
module.db_location.relative = False
operation = DBAdd(id=_action.db_time,
what='module',
objectId=_module.db_id,
data=module)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateAddConnectionAction(_action):
operations = []
for _connection in _action.db_datas:
source = DBPort(id=_connection.db_id,
type='source',
moduleId=_connection.db_sourceId,
moduleName=_connection.db_sourceModule,
sig=_connection.db_sourcePort)
destination = DBPort(id=_connection.db_id,
type='destination',
moduleId=_connection.db_destinationId,
moduleName=_connection.db_destinationModule,
sig=_connection.db_destinationPort)
connection = DBConnection(id=_connection.db_id,
ports=[source, destination])
operation = DBAdd(id=_action.db_time,
what='connection',
objectId=_connection.db_id,
data=connection)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateChangeParameterAction(_action):
operations = []
for _set in _action.db_datas:
parameter = DBParameter(id=_set.db_parameterId,
pos=_set.db_parameterId,
name=_set.db_parameter,
alias=_set.db_alias,
val=_set.db_value,
type=_set.db_type)
function = DBFunction(id=_set.db_functionId,
pos=_set.db_functionId,
name=_set.db_function,
parameters=[parameter])
operation = DBChange(id=_action.db_time,
what='function',
oldObjId=_set.db_functionId,
parentObjId=_set.db_moduleId,
parentObjType='module',
data=function)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateAddModulePortAction(_action):
operations = []
for _portSpec in _action.db_datas:
# ids need to be checked
portSpec = DBPortSpec(id=_portSpec.db_moduleId,
name=_portSpec.db_portName,
type=_portSpec.db_portType,
spec=_portSpec.db_portSpec)
operation = DBAdd(id=_action.db_time,
what='portSpec',
objectId=(_portSpec.db_portName,
_portSpec.db_portType),
parentObjId=_portSpec.db_moduleId,
parentObjType='module',
data=portSpec)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateChangeAnnotationAction(_action):
operations = []
for _annotation in _action.db_datas:
if _annotation.db_key.strip() != '' or \
_annotation.db_value.strip() != '':
annotation = DBAnnotation(id=-1,
key=_annotation.db_key,
value=_annotation.db_value)
operation = DBChange(id=_action.db_time,
what='annotation',
oldObjId=_annotation.db_key,
parentObjId=_annotation.db_moduleId,
parentObjType='module',
data=annotation)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateDeleteModuleAction(_action):
operations = []
for _module in _action.db_datas:
operation = DBDelete(id=_action.db_time,
what='module',
objectId=_module.db_moduleId)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateDeleteConnectionAction(_action):
operations = []
for _connection in _action.db_datas:
operation = DBDelete(id=_action.db_time,
what='connection',
objectId=_connection.db_connectionId)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateDeleteFunctionAction(_action):
operations = []
for _function in _action.db_datas:
operation = DBDelete(id=_action.db_time,
what='function',
objectId=_function.db_functionId,
parentObjId=_function.db_moduleId,
parentObjType='module')
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateDeleteAnnotationAction(_action):
operations = []
for _annotation in _action.db_datas:
operation = DBDelete(id=_action.db_time,
what='annotation',
objectId=_annotation.db_key)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateDeleteModulePortAction(_action):
operations = []
for _portSpec in _action.db_datas:
operation = DBDelete(id=_action.db_time,
what='portSpec',
objectId=(_portSpec.db_portName,
_portSpec.db_portType),
parentObjId=_portSpec.db_moduleId,
parentObjType='module')
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
def translateMoveModuleAction(_action):
operations = []
for _location in _action.db_datas:
location = DBLocation(id=_location.db_id,
x=_location.db_dx,
y=_location.db_dy)
location.relative = True
operation = DBChange(id=_action.db_time,
what='location',
oldObjId=_location.db_id,
parentObjId=_location.db_id,
parentObjType='module',
data=location)
operations.append(operation)
action = DBAction(id=_action.db_time,
prevId=_action.db_parent,
date=convertDate(_action.db_date),
user=_action.db_user,
operations=operations)
return action
### UPDATE IDS ###
def convertIds(vistrail):
actions = vistrail.db_get_actions()
actions.sort(key=lambda x: x.db_id)
objectDict = {}
# refDict = {'objectDict': objectDict}
graph = Graph()
for action in actions:
graph.add_vertex(action.db_id)
graph.add_edge(action.db_prevId, action.db_id)
def convertAction(actionId):
if actionId == 0:
return
allOps = []
action = vistrail.db_get_action(actionId)
# objectDict = refDict['objectDict']
# if action.actionType == 'delete' or action.actionType == 'change':
# action.objectDict = copy.deepcopy(objectDict)
# else:
# action.objectDict = objectDict
for operation in action.db_get_operations():
allOps.extend(convertOperation(vistrail,
objectDict,
operation.vtType,
operation))
action.db_operations = allOps
def removeObjects(actionId):
if actionId == 0:
return
# print "removeObjects(%s)" % actionId
action = vistrail.db_get_action(actionId)
# need to reverse ops here
reverseOps = action.db_get_operations()
reverseOps.reverse()
for operation in reverseOps:
parentList = getTypeIdList(operation)
removeObject(operation.db_what,
operation.db_oldId,
objectDict,
parentList[:-1])
reverseOps.reverse()
graph.dfs(enter_vertex=convertAction,
leave_vertex=removeObjects)
def getTypeIdList(operation):
if operation.db_what in ('module', 'connection'):
return [(operation.db_what, operation.db_oldId)]
elif operation.db_what in \
('function', 'portSpec', 'location', 'annotation'):
return [('module', operation.db_oldParentId),
(operation.db_what, operation.db_oldId)]
elif operation.db_what in ('port'):
return [('connection', operation.db_oldParentId),
(operation.db_what, operation.db_oldId)]
elif operation.db_what in ('parameter'):
return [('module', operation.db_moduleId),
('function', operation.db_oldParentId),
('parameter', operation.db_oldId)]
else:
print "unknown type: '%s'" % operation.db_what
return [(operation.db_what, operation.db_oldId)]
def getOldId(object):
if object.vtType == 'annotation':
return object.db_key
elif object.vtType == 'port':
return object.db_type
elif object.vtType == 'portSpec':
return (object.db_name, object.db_type)
else:
return object.getPrimaryKey()
def getChildren(object):
childList = []
if object.vtType == 'module':
childList = object.db_get_functions() + \
object.db_get_portSpecs() + \
object.db_get_annotations()
childList.append(object.db_location)
object.db_functions = []
object.db_portSpecs = []
object.db_annotations = {}
object.db_location = None
elif object.vtType == 'connection':
childList = object.db_get_ports()
object.db_ports = []
elif object.vtType == 'function':
childList = object.db_get_parameters()
object.db_parameters = []
return childList
def captureObject(object, objectDict, newId, parentList):
# print "capturing %s" % object
currentDict = objectDict
for key in parentList:
(objType, objId) = key
# (currentId, newDict, _) = currentDict[(objType, objId)]
# currentDict = newDict
(objList, curIdx) = currentDict[(objType, objId)]
currentDict = objList[curIdx][1]
oldId = getOldId(object)
# print "capture: %s %s" % (object.vtType, oldId)
# currentDict[(object.vtType, oldId)] = (newId, {}, object)
if not currentDict.has_key((object.vtType, oldId)):
currentDict[(object.vtType, oldId)] = ([], -1)
(curList, curIdx) = currentDict[(object.vtType, oldId)]
curList.append((newId, {}, object, curIdx))
currentDict[(object.vtType, oldId)] = (curList, len(curList) - 1)
def captureDelete(objType, objId, objectDict, parentList):
currentDict = objectDict
for (aType, aId) in parentList:
# (currentId, newDict, _) = currentDict[(objType, objId)]
# currentDict = newDict
(objList, curIdx) = currentDict[(aType, aId)]
currentDict = objList[curIdx][1]
# print "captureDelete: %s %s" % (objType, objId)
if not currentDict.has_key((objType, objId)):
raise Exception("invalid delete")
(curList, curIdx) = currentDict[(objType, objId)]
curList.append((-1, {}, None, curIdx))
currentDict[(objType, objId)] = (curList, len(curList) - 1)
def removeObject(oldObjType, oldId, objectDict, parentList):
# print '%s %s' % (oldObjType, oldId)
# print objectDict
# print parentList
try:
currentDict = objectDict
for key in parentList:
(objType, objId) = key
# (currentId, newDict, _) = currentDict[(objType, objId)]
# currentDict = newDict
(objList, objIdx) = currentDict[(objType, objId)]
currentDict = objList[objIdx][1]
# print "remove: %s %s" % (oldObjType, oldId)
(curList, curIdx) = currentDict[(oldObjType, oldId)]
# print "ok"
newIdx = curList[curIdx][3]
# del curList[curIdx]
currentDict[(oldObjType, oldId)] = (curList, newIdx)
except KeyError:
print "cannot remove (%s, %s)" % (oldObjType, oldId)
print parentList
print objList
print "index: %s" % objIdx
def findNewId(typeIdList, objectDict):
try:
currentDict = objectDict
for key in typeIdList:
# (currentId, currentDict, currentObj) = currentDict[key]
(objList, curIdx) = currentDict[key]
if curIdx == -1:
return (None, None)
(currentId, currentDict, currentObj, _) = objList[curIdx]
if currentId == -1:
return (None, None)
return (currentId, currentObj)
except KeyError:
pass
return (None, None)
def getChildList(typeIdList, objectDict):
try:
currentDict = objectDict
for (objType, objOldId) in typeIdList:
# (currentId, currentDict, _) = currentDict[(objType, objOldId)]
(objList, curIdx) = currentDict[(objType, objOldId)]
if curIdx == -1:
return {}
currentDict = objList[curIdx][1]
return currentDict
except KeyError:
pass
return {}
def createOperation(actionType, objId, objType, parentId, parentType,
object=None):
if actionType == 'add':
operation = DBAdd(what=objType,
objectId=objId,
parentObjId=parentId,
parentObjType=parentType,
data=object)
elif actionType == 'change':
operation = DBChange(what=objType,
oldObjId=objId,
parentObjId=parentId,
parentObjType=parentType,
data=object)
elif actionType == 'delete':
operation = DBDelete(what=objType,
objectId=objId,
parentObjId=parentId,
parentObjType=parentType)
else:
msg = "Cannot find actionType='%s'" % actionType
raise Exception(msg)
return operation
def convertChangeToAdd(operation):
return DBAdd(what=operation.db_what,
objectId=operation.db_newObjId,
parentObjId=operation.db_parentObjId,
parentObjType=operation.db_parentObjType,
data=operation.db_data)
def convertOperation(vistrail, objectDict, actionType, operation):
newOps = []
if actionType == 'add':
object = operation.db_data
if object.vtType == 'parameter' and object.db_pos == -1:
return newOps
operation.db_oldId = operation.db_objectId
if operation.db_what == 'annotation':
operation.db_oldId = object.db_key
elif operation.db_what == 'port':
operation.db_oldId = object.db_type
operation.db_oldParentId = operation.db_parentObjId
parentList = getTypeIdList(operation)
newId = vistrail.idScope.getNewId(object.vtType)
captureObject(object, objectDict, newId, parentList[:-1])
operation.db_objectId = newId
oldId = object.getPrimaryKey()
if object.vtType == 'annotation':
oldId = object.db_key
elif object.vtType == 'port':
oldId = object.db_type
if hasattr(object, 'db_id'):
object.db_id = newId
# set parent ids correctly...
operation.db_id = vistrail.idScope.getNewId('operation')
if operation.db_parentObjId is not None:
oldParentObjId = operation.db_parentObjId
operation.db_parentObjId = findNewId(parentList[:-1], objectDict)[0]
if object.vtType == 'port':
object.db_moduleId = \
findNewId([('module', object.db_moduleId)], objectDict)[0]
# if object.vtType == 'connection':
# for port in object.db_ports.itervalues():
# port.db_moduleId = \
# findNewId([('module', port.db_moduleId)], objectDict)[0]
newOps.append(operation)
# set child operations
children = getChildren(object)
for child in children:
# hack to get around fact that location ids are wrong
if child.vtType == 'location':
child.db_id = oldId
newOp = createOperation('add',
child.getPrimaryKey(),
child.vtType,
oldId,
object.vtType,
child)
# hack to get moduleId at parameter level
if child.vtType == 'parameter':
newOp.db_moduleId = oldParentObjId
newOps.extend(convertOperation(vistrail,
objectDict,
'add',
newOp))
newOp.db_parentObjId = newId
elif actionType == 'change':
object = operation.db_data
if object.vtType == 'parameter' and object.db_pos == -1:
return newOps
operation.db_oldId = operation.db_oldObjId
if operation.db_what == 'annotation':
operation.db_oldId = object.db_key
elif operation.db_what == 'port':
operation.db_oldId = object.db_type
operation.db_oldParentId = operation.db_parentObjId
parentList = getTypeIdList(operation)
# need to get changed id as new id if have one
(foundId, foundObj) = findNewId(parentList, objectDict)
if foundId is not None:
if foundObj.vtType == 'function' and \
foundObj.db_pos == object.db_pos and \
foundObj.db_name == object.db_name:
# don't create new function, convert parameter
for parameter in object.db_parameters:
newOp = createOperation('change',
parameter.getPrimaryKey(),
parameter.vtType,
object.getPrimaryKey(),
object.vtType,
parameter)
newOp.db_moduleId = operation.db_parentObjId
newOps.extend(convertOperation(vistrail,
objectDict,
'change',
newOp))
newOp.db_parentObjId = foundId
return newOps
else:
if foundObj.vtType == 'location' and object.relative == True:
object.db_x += foundObj.db_x
object.db_y += foundObj.db_y
object.relative = False
# get new id for new object
newId = vistrail.idScope.getNewId(object.vtType)
operation.db_oldObjId = foundId
operation.db_newObjId = newId
else:
# get new id for new object
newId = vistrail.idScope.getNewId(object.vtType)
operation.db_oldObjId = -1
operation.db_newObjId = newId
anOldId = operation.db_oldId
anOldParentId = operation.db_parentObjId
if hasattr(operation,'db_moduleId'):
aModuleId = operation.db_moduleId
else:
aModuleId = None
operation = convertChangeToAdd(operation)
operation.db_oldId = anOldId
operation.db_oldParentId = operation.db_parentObjId
operation.db_moduleId = aModuleId
# need to do child deletes first
childDict = getChildList(parentList, objectDict)
for k,v in childDict.items():
(objType, objId) = k
# (newId, newDict) = v
# print 'creating delete for %s' % objType
newOp = createOperation('delete',
objId,
objType,
object.getPrimaryKey(),
object.vtType)
# hack to get moduleId at parameter level
if objType == 'parameter':
newOp.db_moduleId = operation.db_parentObjId
newOps.extend(convertOperation(vistrail,
objectDict,
'delete',
newOp))
newOp.db_parentObjId = newId
# don't reverse -- ordering is correct
# newOps.reverse()
# set new object id
captureObject(object, objectDict, newId, parentList[:-1])
# operation.db_objectId = newId
oldId = object.getPrimaryKey()
if object.vtType == 'annotation':
oldId = object.db_key
elif object.vtType == 'port':
oldId = object.db_type
if hasattr(object, 'db_id'):
object.db_id = newId
# set parent ids correctly...
operation.db_id = vistrail.idScope.getNewId('operation')
if operation.db_parentObjId is not None:
oldParentObjId = operation.db_parentObjId
operation.db_parentObjId = findNewId(parentList[:-1], objectDict)[0]
if object.vtType == 'port':
object.db_moduleId = \
findNewId([('module', object.db_moduleId)], objectDict)[0]
# if object.vtType == 'connection':
# for port in object.db_ports.itervalues():
# port.db_moduleId = \
# findNewId([('module', port.db_moduleId)], objectDict)[0]
newOps.append(operation)
# set child operations
children = getChildren(operation.db_data)
for child in children:
# print 'creating add for %s' % child.vtType
newOp = createOperation('add',
child.getPrimaryKey(),
child.vtType,
oldId,
object.vtType,
child)
# hack to get moduleId at parameter level
if child.vtType == 'parameter':
newOp.db_moduleId = oldParentObjId
newOps.extend(convertOperation(vistrail,
objectDict,
'add',
newOp))
newOp.db_parentObjId = newId
elif actionType == 'delete':
operation.db_oldId = operation.db_objectId
# if operation.db_what == 'annotation':
# operation.db_oldId = object.db_key
# elif operation.db_what == 'port':
# operation.db_oldId = object.db_type
operation.db_oldParentId = operation.db_parentObjId
parentList = getTypeIdList(operation)
# get new id for delete operation
(newId, _) = findNewId(parentList, objectDict)
# print 'found new id: %s' % newId
if newId is None:
msg = "Cannot find id: %s" % parentList
print msg
# raise Exception(msg)
return []
# need to do child deletes first
childDict = getChildList(parentList, objectDict)
for k,v in childDict.items():
(objType, objId) = k
# (newId, newDict) = v
newOp = createOperation('delete',
objId,
objType,
operation.db_objectId,
operation.db_what)
# hack to get moduleId at parameter level
if objType == 'parameter':
newOp.db_moduleId = operation.db_parentObjId
newOps.extend(convertOperation(vistrail,
objectDict,
'delete',
newOp))
newOp.db_parentObjId = newId
# newOps.reverse()
captureDelete(operation.db_what, operation.db_objectId, objectDict,
parentList[:-1])
operation.db_objectId = newId
# set parent ids correctly
operation.db_id = vistrail.idScope.getNewId('operation')
if operation.db_parentObjId is not None:
operation.db_parentObjId = findNewId(parentList[:-1], objectDict)[0]
newOps.append(operation)
return newOps
|
|
"""
Object classes for recording and plotting time-series data.
This module defines a set of DataRecorder object types for recording
time-series data, a set of Trace object types for
specifying ways of generating 1D-vs-time traces from recorded data,
and a TraceGroup object that will plot a set of traces on stacked,
aligned axes.
"""
import os
import bisect
from itertools import izip
from numpy import asarray
import ImageDraw
import param
from param import normalize_path
from topo.base.simulation import EventProcessor
from topo.plotting.bitmap import RGBBitmap, MontageBitmap, TITLE_FONT
from topo.misc.util import Struct
class DataRecorder(EventProcessor):
"""
Record time-series data from a simulation.
A DataRecorder instance stores a set of named time-series
variables, consisting of a sequence of recorded data items of any
type, along with the times at which they were recorded.
DataRecorder is an abstract class for which different
implementations may exist for different means of storing recorded
data. For example, the subclass InMemoryRecorder stores all the
data in memory.
A DataRecorder instance can operate either as an event processor, or in a
stand-alone mode. Both usage modes can be used on the same
instance in the same simulation.
STAND-ALONE USAGE:
A DataRecorder instance is used as follows:
- Method .add_variable adds a named time series variable.
- Method .record_data records a new data item and timestamp.
- Method .get_data gets a time-delimited sequence of data from a variable
EVENTPROCESSOR USAGE:
A DataRecorder can also be connected to a simulation as an event
processor, forming a kind of virtual recording equipment. An
output port from any event processor in a simulation can be
connected to a DataRecorder; the recorder will automaticall create
a variable with the same name as the connection, and record any
incoming data on that variable with the time it was received. For
example::
topo.sim['Recorder'] = InMemoryRecorder()
topo.sim.connect('V1','Recorder',name='V1 Activity')
This script snippet will create a new DataRecorder and
automatically record all activity sent from the sheet 'V1'.
"""
__abstract = True
def __init__(self,**params):
super(DataRecorder,self).__init__(**params)
self._trace_groups = {}
def _src_connect(self,conn):
raise NotImplementedError
def _dest_connect(self,conn):
super(DataRecorder,self)._dest_connect(conn)
self.add_variable(conn.name)
def input_event(self,conn,data):
self.record_data(conn.name,self.simulation.time(),data)
def add_variable(self,name):
"""
Create a new time-series variable with the given name.
"""
raise NotImplementedError
def record_data(self,varname,time,data):
"""
Record the given data item with the given timestamp in the
named timeseries.
"""
raise NotImplementedError
def get_data(self,varname,times=(None,None),fill_range=False):
"""
Get the named timeseries between the given times
(inclusive). If fill_range is true, the returned data will
have timepoints exactly at the start and end of
the given timerange. The data values at these timepoints will
be those of the next-earlier datapoint in the series.
(NOTE: fill_range can fail to create a beginning timepoint if
the start of the time range is earlier than the first recorded datapoint.]
"""
raise NotImplementedError
def get_times(self,var):
"""
Get all the timestamps for a given variable.
"""
raise NotImplementedError
def get_time_indices(self,varname,start_time,end_time):
"""
For the named variable, get the start and end indices suitable
for slicing the data to include all times t::
start_time <= t <= end_time.
A start_ or end_time of None is interpreted to mean the
earliest or latest available time, respectively.
"""
times = self.get_times(varname)
if start_time is None:
start = 0
else:
start = bisect.bisect_left(times,start_time)
if start >= len(times):
start = len(times)-1
elif times[start] > start_time:
start -= 1
if end_time is None:
end = None
else:
end = bisect.bisect_right(times,end_time)
return start,end
class InMemoryRecorder(DataRecorder):
"""
A data recorder that stores all recorded data in memory.
"""
def __init__(self,**params):
super(InMemoryRecorder,self).__init__(**params)
self._vars = {}
def add_variable(self,name):
self._vars[name] = Struct(time=[],data=[])
def record_data(self,varname,time,data):
var = self._vars[varname]
# add the data, maintaining it sorted by time
if not var.time or var.time[-1] <= time:
var.time.append(time)
var.data.append(data)
elif time < var.time[0]:
var.time.insert(0,time)
var.data.insert(0,data)
else:
idx = bisect.bisect_right(var.time,time)
var.time.insert(idx,time)
var.data.insert(idx,data)
def get_datum(self,name,time):
idx,dummy = self.get_time_indices(name,time,time)
data = self._vars[name].data
if idx >= len(data):
idx -= 1
return data[idx]
def get_data(self,name,times=(None,None),fill_range=False):
tstart,tend = times
start,end = self.get_time_indices(name,tstart,tend)
var = self._vars[name]
if start >= len(var.data):
# if the start index is out of bounds
if fill_range:
time = times
data = [var.data[-1]]*2
else:
time,data = [],[]
else:
time,data = var.time[start:end],var.data[start:end]
if fill_range:
if time[0] > tstart and start > 0:
time.insert(0,tstart)
data.insert(0,var.data[start-1])
if time[-1] < tend:
time.append(tend)
data.append(data[-1])
return time,data
def get_times(self,varname):
return self._vars[varname].time
class Trace(param.Parameterized):
"""
A specification for generating 1D traces of data from recorded
timeseries.
A Trace object is a callable object that encapsulates
a method for generating a 1-dimensional trace from possibly
multidimensional timeseries data, along with a specification for
how to plot that data, including Y-axis boundaries and plotting arguments.
Trace is an abstract class. Subclasses implement
the __call__ method to define how to extract a 1D trace from a
sequence of data.
"""
__abstract = True
data_name = param.String(default=None,doc="""
Name of the timeseries from which the trace is generated.
E.g. the connection name into a DataRecorder object.""")
# JPALERT: This should really be something like a NumericTuple,
# except that NumericTuple won't allow the use of None to indicate
# 'no default'. (Nor will Number.)
# JB: We could have that as an option, or as another Parameter
# type, but in many cases knowing that the parameter cannot be set
# to a non-numeric value is crucial, as it means we don't have to
# do special checks every time the value is used. So we should
# leave the default behavior as it is, but yes, it would be good
# to handle None for numeric types (and also for Boolean, to make
# it tri-state). Note that Bounds is a specific type that we
# should probably support in any case, because it not only needs
# to support None, it needs to specify whether the bounds are
# inclusive or exclusive.
ybounds = param.Parameter(default=(None,None),doc="""
The (min,max) boundaries for y axis. If either is None, then
the bound min or max of the data given, respectively.""")
ymargin = param.Number(default=0.1,doc="""
The fraction of the difference ymax-ymin to add to the
top of the plot as padding.""")
plotkw = param.Dict(default=dict(linestyle='steps'),doc="""
Contains the keyword arguments to pass to the plot command
when plotting the trace.""")
def __call__(self,data):
raise NotImplementedError
# JB: Needs docstring. Should this be a property instead?
def get_ybounds(self,ydata):
ymin,ymax = self.ybounds
if ymax is None:
ymax = max(ydata)
if ymin is None:
ymin = min(ydata)
ymax += (ymax-ymin)*self.ymargin
return ymin,ymax
class IdentityTrace(Trace):
"""
A Trace that returns the data, unmodified.
"""
def __call__(self,data):
return data
class IndexTrace(Trace):
"""
A Trace that assumes that each data item is a sequence that can be
indexed with a single integer, and traces the value of one indexed element.
"""
index = param.Integer(default=0,doc="""
The index into the data to be traced.""")
def __call__(self,data):
return [x[self.index] for x in data]
class SheetPositionTrace(Trace):
"""
A trace that assumes that the data are sheet activity matrices,
and traces the value of a given (x,y) position on the sheet.
"""
x = param.Number(default=0.0,doc="""
The x sheet-coordinate of the position to be traced.""")
y = param.Number(default=0.0,doc="""
The y sheet-coordinate of the position to be traced.""")
position = param.Composite(attribs=['x','y'],doc="""
The sheet coordinates of the position to be traced.""")
# JPALERT: Would be nice to some way to set up the coordinate system
# automatically. The DataRecorder object already knows what Sheet
# the data came from.
coordframe = param.Parameter(default=None,doc="""
The SheetCoordinateSystem to use to convert the position
into matrix coordinates.""")
def __call__(self,data):
r,c = self.coordframe.sheet2matrixidx(self.x,self.y)
return [d[r,c] for d in data]
class TraceGroup(param.Parameterized):
"""
A group of data traces to be plotted together.
A TraceGroup defines a set of associated data traces and allows
them to be plotted on stacked, aligned axes. The constructor
takes a DataRecorder object as a data source, and a list of
Trace objects that indicate the traces to plot. The
trace specifications are stored in the attribute self.traces,
which can be modified at any time.
"""
hspace = param.Number(default=0.6,doc="""
Height spacing adjustment between plots. Larger values
produce more space.""")
time_axis_relative = param.Boolean(default=False,doc="""
Whether to plot the time-axis tic values relative to the start
of the plotted time range, or in absolute values.""")
def __init__(self,recorder,traces=[],**params):
super(TraceGroup,self).__init__(**params)
self.traces = traces
self.recorder = recorder
def plot(self,times=(None,None)):
"""
Plot the traces.
Requires MatPlotLib (aka pylab).
Plots the traces specified in self.traces, over the timespan
specified by times. times = (start_time,end_time); if either
start_time or end_time is None, it is assumed to extend to the
beginning or end of the timeseries, respectively.
"""
import pylab
rows = len(self.traces)
tstart,tend = times
pylab.subplots_adjust(hspace=self.hspace)
for i,trace in enumerate(self.traces):
# JPALERT: The TraceGroup object should really create its
# own matplotlib.Figure object and always plot there
# (instead of in the frontmost plot), but I haven't
# figured out how to do that yet.
pylab.subplot(rows,1,i+1)
pylab.title(trace.name)
time,data = self.recorder.get_data(trace.data_name,times=times,fill_range=True)
y = trace(data)
if self.time_axis_relative:
time = asarray(time) - time[0]
pylab.plot(time,y,**trace.plotkw)
ymin,ymax = trace.get_ybounds(y)
pylab.axis(xmin=time[0],xmax=time[-1],ymin=ymin,ymax=ymax)
def get_images(name,times,recorder,overlays=(0,0,0)):
"""
Get a time-sequence of matrix data from a DataRecorder variable
and convert it to a sequence of images stored in Bitmap objects.
Parameters: name is the name of the variable to be queried. times
is a sequence of timepoints at which to query the
variable. recorder is the data recorder. overlays is a tuple of
matrices or scalars to be added to the red, green, and blue
channels of the bitmaps respectively.
"""
result = []
for t in times:
d = recorder.get_datum(name,t)
im = RGBBitmap(d+overlays[0],d+overlays[1],d+overlays[2])
result.append(im)
return result
# JABALERT: Is there some reason it is called ActivityMovie in
# particular, if it can plot things other than Activity?
# Maybe DataRecorderMovie?
class ActivityMovie(param.Parameterized):
"""
An object encapsulating a series of movie frames displaying the
value of one or more matrix-valued time-series contained in a
DataRecorder object.
An ActivityMovie takes a DataRecorder object, a list of names of
variables in that recorder and a sequence of timepoints at which
to sample those variables. It uses that information to compose a
sequence of MontageBitmap objects displaying the stored values of
each variable at each timepoint. These bitmaps can then be saved
to sequentially-named files that can be composited into a movie by
external software.
Parameters are available to control the layout of the montage,
adding timecodes to the frames, and the names of the frame files.
"""
variables = param.List(class_=str, doc="""
A list of variable names in a DataRecorder object containing
matrix-valued time series data.""")
overlays = param.Dict(default={}, doc="""
A dictionary indicating overlays for the variable bitmaps. The
for each key in the dict matching the name of a variable, there
should be associated a triple of matrices to be overlayed on
the red, green, and blue channels of the corresponding bitmap
in each frame.""")
frame_times = param.List(default=[0,1], doc="""
A list of the times of the frames in the movie.""")
montage_params = param.Dict(default={},doc="""
A dictionary containing parameters to be used when
instantiating the MontageBitmap objects representing each frame.""",
instantiate=False)
recorder = param.ClassSelector(class_=DataRecorder, doc="""
The DataRecorder storing the timeseries.""")
filename_fmt = param.String(default='%n_%t.%T',doc="""
The format for the filenames used to store the frames. The following
substitutions are possible:
%n: The name of this ActivityMovie object.
%t: The frame time, as formatted by the filename_time_fmt parameter
%T: The filetype given by the filetype parameter. """)
filename_time_fmt = param.String(default='%05.0f', doc="""
The format of the frame time, using Python string substitution for
a floating-point number.""")
filetype = param.String(default='tif',doc="""
The filetype to use when writing frames. Can be any filetype understood
by the Python Imaging Library.""")
filename_prefix = param.String(default='', doc="""
A prefix to prepend to the filename of each frame when saving;
can include directories. If the filename contains a path, any
non-existent directories in the path will be created when the
movie is saved.""")
add_timecode = param.Boolean(default=False, doc="""
Whether to add a visible timecode indicator to each frame.""")
timecode_options = param.Dict(default={},instantiate=False,doc="""
A dictionary of keyword options to be passed to the PIL ImageDraw.text method
when drawing the timecode on the frame. Valid options include font,
an ImageFont object indicating the text font, and fill a PIL color
specification indicating the text color. If unspecified, color defaults to
the PIL default of black. Font defaults to topo.plotting.bitmap.TITLE_FONT.""")
timecode_fmt = param.String(default='%05.0f',doc="""
The format of the timecode displayed in the movie frames, using
Python string substitution for a floating-point number.""")
timecode_offset = param.Number(default=0,doc="""
A value to be added to each timecode before formatting for display.""")
def __init__(self,**params):
super(ActivityMovie,self).__init__(**params)
bitmaps = [get_images(var,self.frame_times,self.recorder,
overlays=self.overlays.get(var,(0,0,0)))
for var in self.variables]
self.frames = [MontageBitmap(bitmaps=list(bms),**self.montage_params)
for bms in izip(*bitmaps)]
if self.add_timecode:
for t,f in izip(self.frame_times,self.frames):
draw = ImageDraw.Draw(f.image)
timecode = self.timecode_fmt % (t+self.timecode_offset)
tw,th = draw.textsize(timecode,font=self.timecode_options.setdefault('font',TITLE_FONT))
w,h = f.image.size
draw.text((w-tw-f.margin-1,h-th-1),timecode,**self.timecode_options)
def save(self):
"""Save the movie frames."""
filename_pat = self.name.join(self.filename_fmt.split('%n'))
filename_pat = self.filename_time_fmt.join(filename_pat.split('%t'))
filename_pat = self.filetype.join(filename_pat.split('%T'))
filename_pat = normalize_path(filename_pat,prefix=self.filename_prefix)
dirname = os.path.dirname(filename_pat)
if not os.access(dirname,os.F_OK):
os.makedirs(dirname)
self.verbose('Writing',len(self.frames),'to files like "%s"'%filename_pat)
for t,f in zip(self.frame_times,self.frames):
filename = filename_pat% t
self.debug("Writing frame",repr(filename))
f.image.save(filename)
|
|
# Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import subprocess
from uai.utils.logger import uai_logger
from uaitrain.operation.base_op import BaseUAITrainOp
from uaitrain.api.get_env_pkg import GetUAITrainEnvPkgAPIOp
from uaitrain.api.check_and_get_base_image_op import CheckAndGetUAITrainBaseImageApiOp
DOCKER_PUBLIC_REGISTRY = "uhub.ucloud.cn"
DOCKER_INTERNAL_REGISTRY = "uhub.service.ucloud.cn"
DOCKER_TAG_SUFFIX = "uaitrain"
TMP_CPU_DOCKER_FILE = "uaitrain-cpu.Dockerfile"
TMP_DOCKER_FILE = "uaitrain.Dockerfile"
DOCKER_RUN_CMD_FILE = "uaitrain_cmd.txt"
class BaseUAITrainDockerImagePackOp(BaseUAITrainOp):
def __init__(self, parser):
super(BaseUAITrainDockerImagePackOp, self).__init__(parser)
self.dcoker_register = DOCKER_PUBLIC_REGISTRY
def _add_pack_args(self, pack_parser):
pack_parser.add_argument(
'--os',
type=str,
default='ubuntu-14.04.05',
help='The docker os version')
pack_parser.add_argument(
'--python_version',
type=str,
default='python-2.7.6',
help='The docker python version')
pack_parser.add_argument(
'--ai_arch_v',
type=str,
required=True,
help='The AI framework and its version, e.g., tensorflow-1.1.0')
pack_parser.add_argument(
'--acc_type',
type=str,
default='gpu',
help='The accelerator id, e.g., GPU')
def _add_image_args(self, pack_parser):
uhub_parse = pack_parser.add_argument_group(
'Docker-Params', 'Docker Parameters, help to upload docker image automatically')
uhub_parse.add_argument(
'--uhub_username',
type=str,
required=True,
help='Username to login uhub, should be your account name')
uhub_parse.add_argument(
'--uhub_password',
type=str,
required=True,
help='Password used to login uhub, should be your account password')
uhub_parse.add_argument(
'--uhub_registry',
type=str,
required=True,
help='The name of registry owned by user on ucloud console')
uhub_parse.add_argument(
'--uhub_imagename',
type=str,
required=True,
help='The docker image name')
uhub_parse.add_argument(
'--uhub_imagetag',
type=str,
default='uaitrain',
help='The docker image tag')
pack_parser.add_argument(
'--internal_uhub',
type=str,
choices=['true', 'false'],
default='false',
help='Whether to use internal uhub. Use it when your are in UCloud Uhost')
def _add_code_args(self, pack_parser):
code_parse = pack_parser.add_argument_group(
'Code-Params', 'Code Parameters, help to pack user code into docker image')
code_parse.add_argument(
'--code_path',
type=str,
required=True,
help='The path of the user program containing all code files')
code_parse.add_argument(
'--mainfile_path',
type=str,
required=True,
help='The related path of main python file considering code_path as root')
code_parse.add_argument(
'--train_params',
type=str,
default="",
help='The params used in training')
cmd_gen_parse = pack_parser.add_argument_group(
'Cmd-Gen-Params', 'Cmd generate params')
cmd_gen_parse.add_argument(
'--test_data_path',
type=str,
required=True,
help='The data dir for local test')
cmd_gen_parse.add_argument(
'--test_output_path',
type=str,
required=True,
help='The output dir for local test')
def _add_args(self):
pack_parser = self.parser.add_parser('pack', help='Pack local docker image for uai train')
self.pack_parser = pack_parser
self._add_account_args(pack_parser)
self._add_pack_args(pack_parser)
self._add_image_args(pack_parser)
self._add_code_args(pack_parser)
def _parse_img_args(self, args):
self.uhub_username = args['uhub_username']
self.uhub_password = args['uhub_password']
self.uhub_registry = args['uhub_registry']
self.uhub_imagename = args['uhub_imagename']
self.uhub_imagetag = args['uhub_imagetag']
self.internal_uhub = args['internal_uhub']
self.internal_uhub = True if self.internal_uhub == 'true' else False
if self.internal_uhub is True:
self.dcoker_register = DOCKER_INTERNAL_REGISTRY
def _parse_code_args(self, args):
self.code_path = args['code_path']
self.mainfile_path = args['mainfile_path']
self.train_params = args['train_params']
self.test_data_path = args['test_data_path']
self.test_output_path = args['test_output_path']
def _parse_args(self, args):
super(BaseUAITrainDockerImagePackOp, self)._parse_args(args)
if ('ai_arch_v' in args) is False:
print("AI Framework and its version is required, e.g. --ai_arch_v=tensorflow-1.1.0")
return False
self.os_v_name = args['os']
self.python_v_name = args['python_version']
self.ai_arch_v_name = args['ai_arch_v']
self.acc_id_name = args['acc_type']
self.uhub_username = args['uhub_username']
self.uhub_password = args['uhub_password']
self.uhub_registry = args['uhub_registry']
self.uhub_imagename = args['uhub_imagename']
self.uhub_imagetag = args['uhub_imagetag']
self.internal_uhub = args['internal_uhub']
self.internal_uhub = True if self.internal_uhub == 'true' else False
if self.internal_uhub is True:
self.dcoker_register = DOCKER_INTERNAL_REGISTRY
self.code_path = args['code_path']
self.mainfile_path = args['mainfile_path']
self.train_params = args['train_params']
self.test_data_path = args['test_data_path']
self.test_output_path = args['test_output_path']
return True
def _translate_pkg_to_id(self, pkgtype, pkg):
uai_logger.info("Start download {0} package info".format(pkgtype))
api_op = GetUAITrainEnvPkgAPIOp(self.pub_key,
self.pri_key,
pkgtype,
self.project_id,
self.region,
self.zone)
succ, result = api_op.call_api()
if succ is False:
raise RuntimeError("Error get {0} info from server".format(pkgtype))
for avpkg in result['DataSet']:
if pkgtype == 'OS' or pkgtype == 'Python' or pkgtype == 'AIFrame':
versionsplit = pkg.rfind('-')
if versionsplit > 0:
if avpkg["PkgName"] == pkg[:versionsplit] and (
avpkg["PkgVersion"] == "" or avpkg["PkgVersion"] == pkg[versionsplit + 1:]):
return avpkg["PkgId"]
elif versionsplit < 0:
if avpkg["PkgName"] == pkg:
return avpkg["PkgId"]
else:
if avpkg["PkgName"] == pkg:
return avpkg["PkgId"]
uai_logger.error("Some {0} package is not supported: {1}".format(pkgtype, pkg))
raise RuntimeError("Some {0} package is not supported: {1}".format(pkgtype, pkg))
return None
def _build_cpu_userimage(self):
'''
Build cpu image for local training
'''
uai_logger.info("Pull base image from " + self.dcoker_register)
retcode = subprocess.check_call(["docker", "pull", self.cpu_image], stderr=subprocess.STDOUT)
if retcode != 0:
raise RuntimeError("Error pull image: {0}, Please check your network".format(self.cpu_image))
uai_logger.info("Create CPU Dockerfile")
dockerbuf = []
dockerbuf.append("From " + self.cpu_image + "\n")
dockerbuf.append("ADD " + "./" + self.code_path + " /data/\n")
with open(TMP_CPU_DOCKER_FILE, 'w') as f:
f.write(''.join(dockerbuf))
uai_logger.info("Build CPU user image")
userimage = self.uhub_imagename + "-cpu"
if self.uhub_imagetag != None and self.uhub_imagetag != "":
userimage = userimage + ":" + self.uhub_imagetag
else:
userimage = userimage + ":" + DOCKER_TAG_SUFFIX
retcode = subprocess.check_call(["docker", "build", "-t", userimage, "-f", TMP_CPU_DOCKER_FILE, "."],
stderr=subprocess.STDOUT)
if retcode != 0:
raise RuntimeError("Error build image: {0}, Please retry".format(userimage))
self.user_cpu_image = userimage
def _build_gpu_userimage(self):
'''
Build actual image for training
'''
uai_logger.info("Build training docker image")
uai_logger.info("Pull base image from " + self.dcoker_register)
retcode = subprocess.check_call(["docker", "pull", self.acc_image],
stderr=subprocess.STDOUT)
if retcode != 0:
raise RuntimeError("Error pull image: {0}, Please check your network".format(self.acc_image))
uai_logger.info("Create GPU Dockerfile")
dockerbuf = []
dockerbuf.append("From " + self.acc_image + "\n")
dockerbuf.append("ADD " + "./" + self.code_path + " /data/\n")
with open(TMP_DOCKER_FILE, 'w') as f:
f.write(''.join(dockerbuf))
uai_logger.info("Build user image")
print(self.dcoker_register)
userimage = self.dcoker_register + "/" + self.uhub_registry + "/" + self.uhub_imagename
if self.uhub_imagetag != None and self.uhub_imagetag != "":
userimage = userimage + ":" + self.uhub_imagetag
else:
userimage = userimage + ":" + DOCKER_TAG_SUFFIX
retcode = subprocess.check_call(["docker", "build", "-t", userimage, "-f", TMP_DOCKER_FILE, "."],
stderr=subprocess.STDOUT)
if retcode != 0:
raise RuntimeError("Error build image: {0}, Please retry".format(userimage))
print(userimage)
self.user_gpu_image = userimage
def _push_gpu_userimage(self):
uai_logger.info("Push user image")
retcode = subprocess.check_call(["docker", "push", self.user_gpu_image],
stderr=subprocess.STDOUT)
if retcode != 0:
raise RuntimeError("Error push image {0}, Please check your network".format(self.user_gpu_image))
def _gen_pycmd(self):
pycmd = "/data/" + self.mainfile_path + " " + self.train_params
return pycmd
def _gen_cpu_docker_cmd(self, pycmd):
cpu_docker_cmd = "sudo docker run -it " + \
"-v " + self.test_data_path + ":" + "/data/data " + \
"-v " + self.test_output_path + ":" + "/data/output " + \
self.user_cpu_image + " " + "/bin/bash -c " + \
"\"cd /data && /usr/bin/python " + pycmd + " " + "--work_dir=/data --data_dir=/data/data --output_dir=/data/output --log_dir=/data/output\""
return cpu_docker_cmd
def _gen_gpu_docker_cmd(self, pycmd):
gpu_docker_cmd = "sudo nvidia-docker run -it " + \
"-v " + self.test_data_path + ":" + "/data/data " + \
"-v " + self.test_output_path + ":" + "/data/output " + \
self.user_gpu_image + " " + "/bin/bash -c " + \
"\"cd /data && /usr/bin/python " + pycmd + " " + "--work_dir=/data --data_dir=/data/data --output_dir=/data/output --log_dir=/data/output\""
return gpu_docker_cmd
def _gen_run_cmd(self):
f = open(DOCKER_RUN_CMD_FILE, "w")
""" Python cmd used in deploy
"""
pycmd = self._gen_pycmd()
f.write("CMD Used for deploying: " + pycmd + "\n")
print("CMD Used for deploying: {0}".format(pycmd))
""" Cmd used in local CPU train test
"""
cpu_docker_cmd = self._gen_cpu_docker_cmd(pycmd)
f.write("CMD for CPU local test: " + cpu_docker_cmd + "\n")
print("CMD for CPU local test: {0}".format(cpu_docker_cmd))
if self.acc_id_name == 'gpu':
""" Cmd used in local GPU train test
"""
gpu_docker_cmd = self._gen_gpu_docker_cmd(pycmd)
f.write("CMD for GPU local test: " + gpu_docker_cmd + "\n")
print("CMD for GPU local test: {0}".format(gpu_docker_cmd))
f.close()
print("You can check these cmd later in file: {0}".format(DOCKER_RUN_CMD_FILE))
def _build_userimage(self):
uai_logger.info("Docker login on " + self.dcoker_register)
retcode = subprocess.check_call(
["docker", "login", "-u", self.uhub_username, "-p", self.uhub_password, self.dcoker_register],
stderr=subprocess.STDOUT)
if retcode != 0:
raise RuntimeError("Error login to uhub, Please check your username and password, or try with sudo")
self._build_cpu_userimage()
if self.acc_id_name == 'gpu':
self._build_gpu_userimage()
self._push_gpu_userimage()
self._gen_run_cmd()
def check_interHub(self, baseimage):
if baseimage.startswith(DOCKER_PUBLIC_REGISTRY) and self.internal_uhub is True:
baseimage = baseimage.replace(DOCKER_PUBLIC_REGISTRY, DOCKER_INTERNAL_REGISTRY, 1)
if baseimage.startswith(DOCKER_INTERNAL_REGISTRY) and self.internal_uhub is False:
baseimage = baseimage.replace(DOCKER_INTERNAL_REGISTRY, DOCKER_PUBLIC_REGISTRY, 1)
return baseimage
def cmd_run(self, args):
if self._parse_args(args) == False:
return False
os_v = self._translate_pkg_to_id('OS', self.os_v_name)
python_v = self._translate_pkg_to_id('Python', self.python_v_name)
ai_arch_v = self._translate_pkg_to_id('AIFrame', self.ai_arch_v_name)
acc_id = self._translate_pkg_to_id('Accelerator', self.acc_id_name)
get_acc_image_op = CheckAndGetUAITrainBaseImageApiOp(
self.pub_key,
self.pri_key,
os_v,
python_v,
ai_arch_v,
acc_id,
self.project_id,
self.region,
self.zone)
succ, result = get_acc_image_op.call_api()
acc_image_name = result['BimgName'][0]
acc_image_name = self.check_interHub(acc_image_name)
cpu_acc_id = self._translate_pkg_to_id('Accelerator', 'cpu')
get_cpu_image_op = CheckAndGetUAITrainBaseImageApiOp(
self.pub_key,
self.pri_key,
os_v,
python_v,
ai_arch_v,
cpu_acc_id,
self.project_id,
self.region,
self.zone)
succ, result = get_cpu_image_op.call_api()
cpu_image_name = result['BimgName'][0]
cpu_image_name = self.check_interHub(cpu_image_name)
print(acc_image_name)
print(cpu_image_name)
self.acc_image = acc_image_name
self.cpu_image = cpu_image_name
self._build_userimage()
|
|
import os
import logging
import urllib
import csv
from StringIO import StringIO
import datetime
import time
import jinja2
from flask import make_response, jsonify, render_template, Response
import flask.ext.assets
from webassets.ext.jinja2 import AssetsExtension
from webassets import Environment as AssetsEnvironment
from database import db_session
from models import *
from base import *
import utilities
app = utilities.init_flask(__name__)
assets = flask.ext.assets.Environment()
assets.init_app(app)
assets_env = AssetsEnvironment('./static/', '/static')
jinja_environment = jinja2.Environment(
autoescape=True,
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates")),
extensions=[AssetsExtension])
jinja_environment.assets_environment = assets_env
MINIMAL_ZOOM = 16
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
def generate_json(results, is_thin):
yield '{"markers": ['
is_first = True
for marker in results.all():
if is_first:
is_first = False
prefix = ''
else:
prefix = ','
yield prefix + json.dumps(marker.serialize(is_thin))
yield ']}'
def generate_csv(results, is_thin):
output_file = StringIO()
yield output_file.getvalue()
output_file.truncate(0)
output = None
for marker in results.all():
serialized = marker.serialize(is_thin)
if not output:
output = csv.DictWriter(output_file, serialized.keys())
output.writeheader()
row = {k: v.encode('utf8')
if type(v) is unicode else v
for k, v in serialized.iteritems()}
output.writerow(row)
yield output_file.getvalue()
output_file.truncate(0)
@app.route("/markers")
@user_optional
def markers(methods=["GET", "POST"]):
logging.debug('getting markers')
if request.method == "GET":
ne_lat = float(request.values['ne_lat'])
ne_lng = float(request.values['ne_lng'])
sw_lat = float(request.values['sw_lat'])
sw_lng = float(request.values['sw_lng'])
zoom = int(request.values['zoom'])
start_date = datetime.date.fromtimestamp(int(request.values['start_date']))
end_date = datetime.date.fromtimestamp(int(request.values['end_date']))
fatal = int(request.values['show_fatal'])
severe = int(request.values['show_severe'])
light = int(request.values['show_light'])
inaccurate = int(request.values['show_inaccurate'])
logging.debug('querying markers in bounding box')
is_thin = (zoom < MINIMAL_ZOOM)
results = Marker.bounding_box_query(ne_lat, ne_lng, sw_lat, sw_lng,
start_date, end_date,
fatal, severe, light, inaccurate,
is_thin, yield_per=50)
if request.values.get('format') == 'csv':
return Response(generate_csv(results, is_thin), headers={
"Content-Type": "text/csv",
"Content-Disposition": 'attachment; filename="data.csv"'
})
else: # defaults to json
return Response(generate_json(results, is_thin), mimetype="application/json")
else:
data = json.loads(self.request.body)
marker = Marker.parse(data)
marker.user = self.user
marker.update_location()
marker.put()
return make_response(json.dumps(marker.serialize(self.user)))
@app.route("/markers/(.*)", methods=["GET", "PUT", "DELETE"])
@user_required
def marker(self, key_name):
if request.method == "GET":
marker = Marker.get_by_key_name(key_name)
return make_response(json.dumps(marker.serialize(self.user)))
elif request.method == "PUT":
marker = Marker.get_by_key_name(key_name)
data = json.loads(self.request.body)
marker.update(data, self.user)
return make_response(json.dumps(marker.serialize(self.user)))
elif request.method == "DELETE":
marker = Marker.get_by_key_name(key_name)
marker.delete()
# @app.route("/login", methods=["POST"])
# @user_optional
# def login():
# user = get_user()
# if user:
# return make_response(json.dumps(user.serialize()))
#
# if request.json:
# facebook_data = request.json
# user_id = facebook_data["userID"]
# access_token = facebook_data["accessToken"]
# user_details = json.loads(urllib.urlopen("https://graph.facebook.com/me?access_token=" + access_token).read())
# # login successful
# if user_details["id"] == user_id:
# user = User.query.filter(User.email == user_details["email"]).scalar()
# if not user:
# user = User(
# email = user_details["email"],
# first_name = user_details["first_name"],
# last_name = user_details["last_name"],
# username = user_details["username"],
# facebook_id = user_details["id"],
# facebook_url = user_details["link"],
# access_token = facebook_data["accessToken"]
# )
# else:
# user.access_token = facebook_data["accessToken"]
#
# db_session.add(user)
# set_user(user)
# return make_response(json.dumps(user.serialize()))
# else:
# raise Exception("Error in logging in.")
# else:
# raise Exception("No login data or user logged in.")
#
#
# @app.route("/logout")
# @user_required
# def do_logout():
# logout()
#
# @app.route("/follow/(.*)")
# @user_required
# def follow(key_name):
# marker = Marker.get_by_key_name(key_name)
# follower = Follower.all().filter("marker", marker).filter("user", self.user).get()
# if not follower:
# Follower(parent = marker, marker = marker, user = self.user).put()
#
# @app.route("/unfollow/(.*)")
# @user_required
# def unfollow(key_name):
# marker = Marker.get_by_key_name(key_name)
# follower = Follower.all().filter("marker", marker).filter("user", self.user).get()
# if follower:
# follower.delete()
@app.route('/', defaults={'marker_id': None})
@app.route('/<int:marker_id>')
def main(marker_id):
# at this point the marker id is just a running number, and the
# LMS is in the description and needs to be promoted to a DB
# field so we can query it. We also need to add a provider id.
context = {'minimal_zoom': MINIMAL_ZOOM, 'url': request.host}
marker = None
if 'marker' in request.values:
markers = Marker.get_marker(request.values['marker'])
if markers.count() == 1:
marker = markers[0]
context['coordinates'] = (marker.latitude, marker.longitude)
context['marker'] = marker.id
if 'start_date' in request.values:
context['start_date'] = string2timestamp(request.values['start_date'])
elif marker:
context['start_date'] = year2timestamp(marker.created.year)
if 'end_date' in request.values:
context['end_date'] = string2timestamp(request.values['end_date'])
elif marker:
context['end_date'] = year2timestamp(marker.created.year + 1)
for attr in 'show_fatal', 'show_severe', 'show_light', 'show_inaccurate',\
'zoom':
if attr in request.values:
context[attr] = request.values[attr]
if 'map_only' in request.values:
if request.values['map_only'] in ('1', 'true'):
context['map_only'] = 1
if 'lat' in request.values and 'lon' in request.values:
context['coordinates'] = (request.values['lat'], request.values['lon'])
return render_template('index.html', **context)
def string2timestamp(s):
return time.mktime(datetime.datetime.strptime(s, "%Y-%m-%d").timetuple())
def year2timestamp(y):
return time.mktime(datetime.date(y, 1, 1).timetuple())
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
app.run(debug=True)
|
|
"""
The ``fluent_contents_tags`` module provides two template tags for rendering placeholders:
It can be loaded using:
.. code-block:: html+django
{% load fluent_contents_tags %}
A placeholder which is stored in a :class:`~fluent_contents.models.PlaceholderField` can
be rendered with the following syntax:
.. code-block:: html+django
{% render_placeholder someobject.placeholder %}
To support CMS interfaces, placeholder slots can be defined in the template.
This is done using the following syntax:
.. code-block:: html+django
{% page_placeholder currentpage "slotname" %}
{% page_placeholder currentpage "slotname" title="Admin title" role="main" %}
The CMS interface can scan for those tags using the :ref:`fluent_contents.analyzer` module.
"""
from django.conf import settings
from django.db.models import Manager
from django.forms import Media
from django.template import Library, TemplateSyntaxError, Variable
from tag_parser import parse_as_var, parse_token_kwargs
from tag_parser.basetags import BaseAssignmentOrOutputNode, BaseNode
from fluent_contents import appsettings, rendering
from fluent_contents.models import ImmutableMedia, Placeholder
from fluent_contents.rendering import get_cached_placeholder_output
from fluent_contents.utils.templatetags import (
extract_literal,
extract_literal_bool,
is_true,
)
register = Library()
@register.tag
def page_placeholder(parser, token):
"""
Render a placeholder for a given object. Syntax:
.. code-block:: html+django
{% page_placeholder currentpage "slotname" %}
Additionally, extra meta information can be provided for the admin interface.
.. code-block:: html+django
{% page_placeholder currentpage "slotname" title="Tab title" role="main %}
If the currentpage variable is named ``page``, it can be left out.
The extra information can be extracted with the
:func:`~PagePlaceholderNode.get_title` and :func:`~PagePlaceholderNode.get_role`
functions of the :class:`~PagePlaceholderNode` class.
Optionally, a template can be used to render the placeholder:
.. code-block:: html+django
{% page_placeholder currentpage "slotname" template="mysite/parts/slot_placeholder.html" %}
That template should loop over the content items, for example:
.. code-block:: html+django
{% for contentitem, html in contentitems %}
{% if not forloop.first %}<div class="splitter"></div>{% endif %}
{{ html }}
{% endfor %}
.. note::
When a template is used, the system assumes that the output can change per request.
Hence, the output of individual items will be cached, but the final merged output is no longer cached.
Add ``cachable=True`` to enable output caching for templates too.
"""
return PagePlaceholderNode.parse(parser, token)
class PagePlaceholderNode(BaseAssignmentOrOutputNode):
"""
The template node of the ``page_placeholder`` tag.
It renders a placeholder of a provided parent object.
The template tag can also contain additional metadata,
which can be returned by scanning for this node using the :ref:`fluent_contents.analyzer` module.
"""
allowed_kwargs = ("title", "role", "template", "cachable", "fallback")
allowed_meta_kwargs = ("title", "role")
min_args = 1
max_args = 2
def __init__(self, tag_name, as_var, parent_expr, slot_expr, **kwargs):
super(PagePlaceholderNode, self).__init__(
tag_name, as_var, parent_expr, slot_expr, **kwargs
)
self.slot_expr = slot_expr
# Move some arguments outside the regular "kwargs"
# because they don't need to be parsed as variables.
# Those are the remaining non-functional args for CMS admin page.
self.meta_kwargs = {}
for arg in self.allowed_meta_kwargs:
try:
self.meta_kwargs[arg] = kwargs.pop(arg)
except KeyError:
pass
@classmethod
def parse(cls, parser, token):
"""
Parse the node syntax:
.. code-block:: html+django
{% page_placeholder parentobj slotname title="test" role="m" %}
"""
bits, as_var = parse_as_var(parser, token)
tag_name, args, kwargs = parse_token_kwargs(
parser,
bits,
allowed_kwargs=cls.allowed_kwargs,
compile_args=True,
compile_kwargs=True,
)
# Play with the arguments
if len(args) == 2:
parent_expr = args[0]
slot_expr = args[1]
elif len(args) == 1:
# Allow 'page' by default. Works with most CMS'es, including django-fluent-pages.
parent_expr = Variable("page")
slot_expr = args[0]
else:
raise TemplateSyntaxError(
"""{0} tag allows two arguments: 'parent object' 'slot name' and optionally: title=".." role="..".""".format(
tag_name
)
)
cls.validate_args(tag_name, *args, **kwargs)
return cls(
tag_name=tag_name,
as_var=as_var,
parent_expr=parent_expr,
slot_expr=slot_expr,
**kwargs
)
def get_slot(self):
"""
Return the string literal that is used for the placeholder slot in the template.
When the variable is not a string literal, ``None`` is returned.
"""
return extract_literal(self.slot_expr)
def get_title(self):
"""
Return the string literal that is used in the template.
The title is used in the admin screens.
"""
try:
return extract_literal(self.meta_kwargs["title"])
except KeyError:
slot = self.get_slot()
if slot is not None:
return slot.replace("_", " ").title()
return None
def get_role(self):
"""
Return the string literal that is used in the template.
The role can be "main", "sidebar" or "related", or shorted to "m", "s", "r".
"""
try:
return extract_literal(self.meta_kwargs["role"])
except KeyError:
return None
def get_fallback_language(self):
"""
Return whether to use the fallback language.
"""
try:
# Note: currently not supporting strings yet.
return extract_literal_bool(self.kwargs["fallback"]) or None
except KeyError:
return False
def get_value(self, context, *tag_args, **tag_kwargs):
request = self.get_request(context)
output = None
# Process arguments
parent, slot = tag_args
template_name = tag_kwargs.get("template", None)
# cachable default is True unless there is a template.
cachable = is_true(tag_kwargs.get("cachable", not bool(template_name)))
fallback_language = is_true(tag_kwargs.get("fallback", False))
if template_name and cachable and not extract_literal(self.kwargs["template"]):
# If the template name originates from a variable, it can change any time.
# It's not possible to create a reliable output cache for for that,
# as it would have to include any possible template name in the key.
raise TemplateSyntaxError(
"{0} tag does not allow 'cachable' for variable template names!".format(
self.tag_name
)
)
if (
appsettings.FLUENT_CONTENTS_CACHE_OUTPUT
and appsettings.FLUENT_CONTENTS_CACHE_PLACEHOLDER_OUTPUT
and cachable
):
# See if the entire placeholder output is cached,
# if so, no database queries have to be performed.
# This will be omitted when an template is used,
# because there is no way to expire that or tell whether that template is cacheable.
output = get_cached_placeholder_output(parent, slot)
if output is None:
# Get the placeholder
try:
placeholder = Placeholder.objects.get_by_slot(parent, slot)
except Placeholder.DoesNotExist:
return "<!-- placeholder '{0}' does not yet exist -->".format(slot)
output = rendering.render_placeholder(
request,
placeholder,
parent,
template_name=template_name,
cachable=cachable,
limit_parent_language=True,
fallback_language=fallback_language,
)
# Assume it doesn't hurt to register media. TODO: should this be optional?
rendering.register_frontend_media(request, output.media)
return output.html
@register.tag
def render_placeholder(parser, token):
"""
Render a shared placeholder. Syntax:
.. code-block:: html+django
{% render_placeholder someobject.placeholder %}
"""
return RenderPlaceholderNode.parse(parser, token)
class RenderPlaceholderNode(BaseAssignmentOrOutputNode):
"""
The template node of the ``render_placeholder`` tag.
It renders the provided placeholder object.
"""
min_args = 1
max_args = 1
allowed_kwargs = ("template", "cachable", "fallback")
@classmethod
def validate_args(cls, tag_name, *args, **kwargs):
if len(args) != 1:
raise TemplateSyntaxError(
"""{0} tag allows only one parameter: a placeholder object.""".format(
tag_name
)
)
super(RenderPlaceholderNode, cls).validate_args(tag_name, *args, **kwargs)
def get_value(self, context, *tag_args, **tag_kwargs):
request = self.get_request(context)
# Parse arguments
try:
placeholder = _get_placeholder_arg(self.args[0], tag_args[0])
except RuntimeWarning as e:
return u"<!-- {0} -->".format(e)
template_name = tag_kwargs.get("template", None)
# cachable default is True unless there is a template.
cachable = is_true(tag_kwargs.get("cachable", not bool(template_name)))
fallback_language = is_true(tag_kwargs.get("fallback", False))
if template_name and cachable and not extract_literal(self.kwargs["template"]):
# If the template name originates from a variable, it can change any time.
# See PagePlaceholderNode.render_tag() why this is not allowed.
raise TemplateSyntaxError(
"{0} tag does not allow 'cachable' for variable template names!".format(
self.tag_name
)
)
# Fetching placeholder.parent should not cause queries if fetched via PlaceholderFieldDescriptor.
# See render_placeholder() for more details
output = rendering.render_placeholder(
request,
placeholder,
placeholder.parent,
template_name=template_name,
cachable=cachable,
limit_parent_language=True,
fallback_language=fallback_language,
)
# Need to track frontend media here, as the template tag can't return it.
rendering.register_frontend_media(request, output.media)
return output.html
def _get_placeholder_arg(arg_name, placeholder):
"""
Validate and return the Placeholder object that the template variable points to.
"""
if placeholder is None:
raise RuntimeWarning(u"placeholder object is None")
elif isinstance(placeholder, Placeholder):
return placeholder
elif isinstance(placeholder, Manager):
manager = placeholder
try:
parent_object = manager.instance # read RelatedManager code
except AttributeError:
parent_object = None
try:
placeholder = manager.all()[0]
if parent_object is not None:
placeholder.parent = parent_object # Fill GFK cache
return placeholder
except IndexError:
raise RuntimeWarning(
u"No placeholders found for query '{0}.all.0'".format(arg_name)
)
else:
raise ValueError(
u"The field '{0}' does not refer to a placeholder object!".format(arg_name)
)
@register.tag
def render_content_items_media(parser, token):
"""
Render the JS/CSS includes for the media which was collected during the handling of the request.
This tag should be placed at the bottom of the page.
.. code-block:: html+django
{% render_content_items_media %}
{% render_content_items_media css %}
{% render_content_items_media js %}
{% render_content_items_media js local %}
{% render_content_items_media js external %}
.. note::
The output of plugins is typically cached. Changes the the
registered media only show up after flushing the cache,
or re-saving the items (which flushes the cache).
"""
return RenderContentItemsMedia.parse(parser, token)
class RenderContentItemsMedia(BaseNode):
"""
The template node of the ``render_plugin_media`` tag.
It renders the media object object.
"""
compile_args = False
compile_kwargs = False
min_args = 0
max_args = 2
@classmethod
def validate_args(cls, tag_name, *args, **kwargs):
super(RenderContentItemsMedia, cls).validate_args(tag_name, *args, **kwargs)
if args:
if args[0] not in ("css", "js"):
raise TemplateSyntaxError(
"'{0}' tag only supports `css` or `js` as first argument".format(
tag_name
)
)
if len(args) > 1 and args[1] not in ("local", "external"):
raise TemplateSyntaxError(
"'{0}' tag only supports `local` or `external` as second argument".format(
tag_name
)
)
def render_tag(self, context, media_type=None, domain=None):
request = self.get_request(context)
media = rendering.get_frontend_media(request)
if not media or not (media._js or media._css):
return u""
if not media_type:
return media.render()
elif media_type == "js":
if domain:
media = _split_js(media, domain)
return u"\n".join(media.render_js())
elif media_type == "css":
if domain:
media = _split_css(media, domain)
return u"\n".join(media.render_css())
else:
return ""
if settings.STATIC_URL is None:
_LOCAL_PREFIX = settings.MEDIA_URL # backwards compatibility
else:
_LOCAL_PREFIX = settings.STATIC_URL
def _is_local(url):
# URL can be http:// if that's what's also in STATIC_URL.
# Otherwise, the domain is external.
return not url.startswith(("//", "http://", "https://")) or url.startswith(
_LOCAL_PREFIX
)
def _split_js(media, domain):
"""
Extract the local or external URLs from a Media object.
"""
# Read internal property without creating new Media instance.
if not media._js:
return ImmutableMedia.empty_instance
needs_local = domain == "local"
new_js = []
for url in media._js:
if needs_local == _is_local(url):
new_js.append(url)
if not new_js:
return ImmutableMedia.empty_instance
else:
return Media(js=new_js)
def _split_css(media, domain):
"""
Extract the local or external URLs from a Media object.
"""
# Read internal property without creating new Media instance.
if not media._css:
return ImmutableMedia.empty_instance
needs_local = domain == "local"
new_css = {}
for medium, url in media._css.items():
if needs_local == _is_local(url):
new_css.setdefault(medium, []).append(url)
if not new_css:
return ImmutableMedia.empty_instance
else:
return Media(css=new_css)
|
|
"""DHCPv4 address release process"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_control
import misc
import srv_msg
@pytest.mark.v4
@pytest.mark.release
@pytest.mark.parametrize("backend", ['memfile', 'mysql', 'postgresql'])
def test_v4_release_success(backend):
misc.test_setup()
srv_control.define_temporary_lease_db_backend(backend)
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(1)
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:11:11:22')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_copy_option('server_id')
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:11:11:22')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
my_lease = srv_msg.get_all_leases()
srv_msg.check_leases(my_lease, backend=backend)
misc.test_procedure()
srv_msg.client_copy_option('server_id')
srv_msg.client_sets_value('Client', 'ciaddr', '192.168.50.1')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:11')
srv_msg.client_does_include_with_value('client_id', '00010203040111')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
@pytest.mark.v4
@pytest.mark.release
def test_v4_release_success_with_additional_offer():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
misc.test_procedure()
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
misc.test_procedure()
srv_msg.client_save_option('server_id')
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040506')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(erase=True)
srv_msg.client_sets_value('Client', 'chaddr', 'default')
srv_msg.client_sets_value('Client', 'ciaddr', '192.168.50.1')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:11')
srv_msg.client_does_include_with_value('client_id', '00010203040111')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
@pytest.mark.v4
@pytest.mark.release
def test_v4_release_fail_with_different_chaddr_client_id():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:1F:D0:00:00:11')
srv_msg.client_does_include_with_value('client_id', '00001FD0040111')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_include_option(61)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00001FD0040111')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:1f:d0:00:00:11')
srv_msg.client_does_include_with_value('client_id', '00001FD0040111')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_include_option(61)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00001FD0040111')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:1f:d0:11:22:33')
srv_msg.client_does_include_with_value('client_id', '00001FD0112233')
srv_msg.client_copy_option('server_id')
srv_msg.client_sets_value('Client', 'ciaddr', '192.168.50.1')
srv_msg.client_send_msg('RELEASE')
# address not released
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040111')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
@pytest.mark.v4
@pytest.mark.release
def test_v4_release_fail_with_same_chaddr_different_client_id():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:1F:D0:00:00:11')
srv_msg.client_does_include_with_value('client_id', '00001FD0040111')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_include_option(61)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00001FD0040111')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:1f:d0:00:00:11')
srv_msg.client_does_include_with_value('client_id', '00001FD0040111')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_include_option(61)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00001FD0040111')
misc.test_procedure()
# client id changed!
srv_msg.client_sets_value('Client', 'chaddr', '00:1f:d0:00:00:11')
srv_msg.client_does_include_with_value('client_id', '00001FD0112233')
srv_msg.client_copy_option('server_id')
srv_msg.client_sets_value('Client', 'ciaddr', '192.168.50.1')
srv_msg.client_send_msg('RELEASE')
# address not released
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040111')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
@pytest.mark.v4
@pytest.mark.release
def test_v4_release_fail_with_different_chaddr_same_client_id():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:1F:D0:00:00:11')
srv_msg.client_does_include_with_value('client_id', '00001FD0040111')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_include_option(61)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00001FD0040111')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:1f:d0:00:00:11')
srv_msg.client_does_include_with_value('client_id', '00001FD0040111')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_include_option(61)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
srv_msg.response_check_option_content(61, 'value', '00001FD0040111')
misc.test_procedure()
# chaddr changed!
srv_msg.client_sets_value('Client', 'chaddr', '00:1f:d0:11:11:11')
srv_msg.client_does_include_with_value('client_id', '00001FD0040111')
srv_msg.client_copy_option('server_id')
srv_msg.client_sets_value('Client', 'ciaddr', '192.168.50.1')
srv_msg.client_send_msg('RELEASE')
# address not released
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_does_include_with_value('client_id', '00010203040111')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_include_option(61)
@pytest.mark.v4
@pytest.mark.release
def test_v4_release_only_chaddr_same_chaddr():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:1F:D0:00:00:11')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:1f:d0:00:00:11')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
misc.test_procedure()
# client id changed!
srv_msg.client_sets_value('Client', 'chaddr', '00:1f:d0:00:00:11')
srv_msg.client_copy_option('server_id')
srv_msg.client_sets_value('Client', 'ciaddr', '192.168.50.1')
srv_msg.client_send_msg('RELEASE')
# address not released
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
@pytest.mark.v4
@pytest.mark.release
def test_v4_release_fail_only_chaddr_different_chaddr():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:1F:D0:00:00:11')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:1f:d0:00:00:11')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(54)
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_option_content(54, 'value', '$(SRV4_ADDR)')
misc.test_procedure()
# chaddr changed!
srv_msg.client_sets_value('Client', 'chaddr', '00:1f:d0:11:11:11')
srv_msg.client_copy_option('server_id')
srv_msg.client_sets_value('Client', 'ciaddr', '192.168.50.1')
srv_msg.client_send_msg('RELEASE')
# address not released
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:00')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
@pytest.mark.v4
@pytest.mark.release
def test_v4_release_leases_expired():
misc.test_setup()
srv_control.set_time('renew-timer', 1)
srv_control.set_time('rebind-timer', 2)
srv_control.set_time('valid-lifetime', 3)
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.1-192.168.50.1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.1')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.forge_sleep(4, 'seconds')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:11')
srv_msg.client_does_include_with_value('client_id', '00010203040111')
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.1')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
|
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
from SimPEG import EM
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib
import matplotlib.gridspec as gridspec
matplotlib.rcParams['font.size'] = 12
import warnings
warnings.filterwarnings("ignore")
from ipywidgets import *
from .View import DataView
from .Base import widgetify
from .FDEMDipolarfields import *
def linefun(x1, x2, y1, y2, nx,tol=1e-3):
dx = x2-x1
dy = y2-y1
if np.abs(dx)<tol:
y = np.linspace(y1, y2,nx)
x = np.ones_like(y)*x1
elif np.abs(dy)<tol:
x = np.linspace(x1, x2, nx)
y = np.ones_like(x)*y1
else:
x = np.linspace(x1, x2, nx)
slope = (y2-y1)/(x2-x1)
y=slope*(x-x1)+y1
return x, y
class DipoleWidgetFD(object):
"""DipoleWidget"""
x = None
y = None
z = None
func = None
# Fixed spatial range in 3D
xmin, xmax = -50., 50.
ymin, ymax = -50., 50.
zmin, zmax = -50., 50.
def __init__(self):
self.dataview = DataView()
def SetDataview(self, srcLoc, sig, f, orientation, normal, functype, na=100, nb=100, loc=0.):
self.srcLoc = srcLoc
self.sig = sig
self.f = f
self.normal = normal
self.SetGrid(normal, loc, na, nb)
self.functype = functype
self.dataview.set_xyz(self.x, self.y, self.z, normal=normal) # set plane and locations ...
if self.functype == "E_from_ED":
self.func = E_from_ElectricDipoleWholeSpace
elif self.functype == "E_from_ED_galvanic":
self.func = E_galvanic_from_ElectricDipoleWholeSpace
elif self.functype == "E_from_ED_inductive":
self.func = E_inductive_from_ElectricDipoleWholeSpace
elif self.functype == "H_from_ED":
self.func = H_from_ElectricDipoleWholeSpace
elif self.functype == "J_from_ED":
self.func = J_from_ElectricDipoleWholeSpace
elif self.functype == "E_from_MD":
self.func = E_from_MagneticDipoleWholeSpace
elif self.functype == "H_from_MD":
self.func = H_from_MagneticDipoleWholeSpace
elif self.functype == "J_from_MD":
self.func = J_from_MagneticDipoleWholeSpace
else:
raise NotImplementedError()
self.dataview.eval_2D(srcLoc, sig, f, orientation, self.func) # evaluate
def SetGrid(self, normal, loc, na, nb):
# Assume we are seeing xy plane
if normal =="X" or normal=="x":
self.x = np.r_[loc]
self.y = np.linspace(self.ymin, self.ymax, na)
self.z = np.linspace(self.zmin, self.zmax, nb)
if normal =="Y" or normal=="y":
self.x = np.linspace(self.xmin, self.xmax, na)
self.y = np.r_[loc]
self.z = np.linspace(self.zmin, self.zmax, nb)
if normal =="Z" or normal=="z":
self.x = np.linspace(self.xmin, self.xmax, na)
self.y = np.linspace(self.ymin, self.ymax, nb)
self.z = np.r_[loc]
def Dipole2Dviz(self, x1, y1, x2, y2, npts2D, npts, sig, f, srcLoc=np.r_[0., 0., 0.], orientation="x", component="real", view="x", normal="Z", functype="E_from_ED", loc=0., scale="log", dx=50., plot1D=False, plotTxProfile=False):
nx, ny = npts2D, npts2D
x, y = linefun(x1, x2, y1, y2, npts)
if scale == "log":
logamp = True
elif scale == "linear":
logamp = False
else:
raise NotImplementedError()
self.SetDataview(srcLoc, sig, f, orientation, normal, functype, na=nx, nb=ny, loc=loc)
# plot1D = False
# plotTxProfile = False
if normal =="X" or normal=="x":
if abs(loc - 50) < 1e-5:
plot1D = True
xyz_line = np.c_[np.ones_like(x)*self.x, x, y]
self.dataview.xyz_line = xyz_line
if normal =="Y" or normal=="y":
if abs(loc - 0.) < 1e-5:
plot1D = True
plotTxProfile = True
xyz_line = np.c_[x, np.ones_like(x)*self.y, y]
self.dataview.xyz_line = xyz_line
if normal =="Z" or normal=="z":
xyz_line = np.c_[x, y, np.ones_like(x)*self.z]
self.dataview.xyz_line = xyz_line
fig = plt.figure(figsize=(18*1.5,3.4*1.5))
gs1 = gridspec.GridSpec(2, 7)
gs1.update(left=0.05, right=0.48, wspace=0.05)
ax1 = plt.subplot(gs1[:2, :3])
ax1.axis("equal")
ax1, dat1 = self.dataview.plot2D_FD(ax=ax1, component=component,view=view, colorbar=False, logamp=logamp)
vmin, vmax = dat1.cvalues.min(), dat1.cvalues.max()
if scale == "log":
cb = plt.colorbar(dat1, ax=ax1, ticks=np.linspace(vmin, vmax, 5), format="$10^{%.1f}$")
elif scale == "linear":
cb = plt.colorbar(dat1, ax=ax1, ticks=np.linspace(vmin, vmax, 5), format="%.1e")
ax1.text(x[0], y[0], 'A', fontsize = 16, color='w')
ax1.text(x[-1], y[-1]-5, 'B', fontsize = 16, color='w')
tempstr = functype.split("_")
if view == "vec":
tname = "Vector "
title = tname+tempstr[0]+"-field from "+tempstr[2]
elif view== "amp":
tname = "|"
title = tname+tempstr[0]+"|-field from "+tempstr[2]
else:
if component == "real":
tname = "Re("
elif component == "imag":
tname = "Im("
elif component == "amplitude":
tname = "Amp("
elif component == "phase":
tname = "Phase("
title = tname + tempstr[0]+view+")-field from "+tempstr[2]
if tempstr[0] == "E":
unit = " (V/m)"
fieldname = "Electric field"
elif tempstr[0] == "H":
unit = " (A/m)"
fieldname = "Magnetic field"
elif tempstr[0] == "J":
unit = " (A/m$^2$) "
fieldname = "Current density"
else:
raise NotImplementedError()
if component == "phase":
unit = " (rad)"
label = fieldname + unit
label_cb = tempstr[0]+view+"-field from "+tempstr[2]
cb.set_label(label)
ax1.set_title(title)
if plotTxProfile:
ax1.plot(np.r_[-20., 80.],np.zeros(2), 'b-', lw=1)
if plot1D:
ax1.plot(x,y, 'r.', ms=4)
ax2 = plt.subplot(gs1[:, 4:6])
val_line_x, val_line_y, val_line_z = self.dataview.eval(xyz_line, srcLoc, np.r_[sig], np.r_[f], orientation, self.func)
if view =="X" or view =="x":
val_line = val_line_x
elif view =="Y" or view =="y":
val_line = val_line_y
elif view =="Z" or view =="z":
val_line = val_line_z
elif view =="vec" or "amp":
vecamp = lambda a, b, c: np.sqrt((a)**2+(b)**2+(c)**2)
if component == "real":
val_line = vecamp(val_line_x.real, val_line_y.real, val_line_z.real)
elif component == "imag":
val_line = vecamp(val_line_x.imag, val_line_y.imag, val_line_z.imag)
elif component == "amplitude":
val_line = vecamp(abs(val_line_x), abs(val_line_y), abs(val_line_z))
elif component == "phase":
val_line = vecamp(np.angle(val_line_x), np.angle(val_line_y), np.angle(val_line_z))
distance = np.sqrt((x-x1)**2+(y-y1)**2) - dx # specific purpose
if component == "real":
val_line = val_line.real
elif component == "imag":
val_line = val_line.imag
elif component == "amplitude":
val_line = abs(val_line)
elif component == "phase":
val_line = np.angle(val_line)
if scale == "log":
temp = val_line.copy()*np.nan
temp[val_line>0.] = val_line[val_line>0.]
ax2.plot(temp, distance, 'k.-')
temp = val_line.copy()*np.nan
temp[val_line<0.] = -val_line[val_line<0.]
ax2.plot(temp, distance, 'k.--')
ax2.set_xlim(abs(val_line).min(), abs(val_line).max())
ax2.set_xscale(scale)
elif scale == "linear":
ax2.plot(val_line, distance, 'k.-')
ax2.set_xlim(val_line.min(), val_line.max())
ax2.set_xscale(scale)
xticks = np.linspace(val_line.min(), val_line.max(), 3)
plt.plot(np.r_[0., 0.], np.r_[distance.min(), distance.max()], 'k-', lw=2)
ax2.xaxis.set_ticks(xticks)
ax2.xaxis.set_major_formatter(ticker.FormatStrFormatter("%.0e"))
ax2.set_ylim(distance.min(), distance.max())
ax2.set_ylabel("A-B profile (m)")
if tempstr[0] == "E":
if view == "vec" or view== "amp":
label = "|"+tempstr[0]+"|-field (V/m) "
else:
label = tname+tempstr[0]+view+")-field (V/m) "
elif tempstr[0] == "H":
if view == "vec" or view== "amp":
label = "|"+tempstr[0]+"|-field field (A/m) "
else:
label = tname+tempstr[0]+view+")-field (A/m) "
elif tempstr[0] == "J":
if view == "vec" or view== "amp":
label = "|"+tempstr[0]+"|-field field (A/m$^2$) "
else:
label = tname+tempstr[0]+view+")-field (A/m$^2$) "
else:
raise NotImplementedError()
if component == "phase":
label = tname+tempstr[0]+view+")-field (rad) "
ax2.set_title("EM data at Rx hole")
ax2.set_xlabel(label)
# ax2.text(distance.min(), val_line.max(), 'A', fontsize = 16)
# ax2.text(distance.max()*0.97, val_line.max(), 'B', fontsize = 16)
# ax2.legend((component, ), bbox_to_anchor=(0.5, -0.3))
ax2.grid(True)
plt.show()
pass
def InteractiveDipoleBH(self, nRx=20, npts2D=50, scale="log", offset_plane=50.,\
X1=-20, X2=80, Y1=-50, Y2=50, Z1=-50, Z2=50, \
plane="YZ", SrcType="ED", fieldvalue="E", compvalue="z"):
# x1, x2, y1, y2 = offset_rx, offset_rx, Z1, Z2
self.xmin, self.xmax = X1, X2
self.ymin, self.ymax = Y1, Y2
self.zmin, self.zmax = Z1, Z2
def foo(Field, AmpDir, Component, ComplexNumber, Frequency, Sigma, Offset, Scale, Slider, FreqLog, SigLog, SrcType=SrcType):
if Slider ==True:
f = np.r_[10**FreqLog]
sig = np.r_[10**SigLog]
else:
f = np.r_[Frequency]
sig = np.r_[Sigma]
if plane == "XZ":
normal = "Y"
self.offset_rx = 50.
elif plane == "YZ":
normal = "X"
self.offset_rx = 0.
x1, x2, y1, y2 = self.offset_rx, self.offset_rx, Z1, Z2
if ComplexNumber == "Re":
ComplexNumber = "real"
elif ComplexNumber == "Im":
ComplexNumber = "imag"
elif ComplexNumber == "Amp":
ComplexNumber = "amplitude"
elif ComplexNumber == "Phase":
ComplexNumber = "phase"
if AmpDir == "Direction":
# ComplexNumber = "real"
Component = "vec"
elif AmpDir == "Amp":
# ComplexNumber = "real"
Component = "amp"
if SrcType == "ED":
Field = Field+"_from_ED"
elif SrcType == "MD":
Field = Field+"_from_MD"
return self.Dipole2Dviz(x1, y1, x2, y2, npts2D, nRx, sig, f, srcLoc=np.r_[0., 0., 0.], orientation="z", component=ComplexNumber, view=Component, normal=normal, functype=Field, loc=Offset, scale=Scale)
out = widgetify(foo
,Field=widgets.ToggleButtons(options=["E", "H", "J"], value=fieldvalue) \
,AmpDir=widgets.ToggleButtons(options=['None','Amp','Direction'], value="Direction") \
,Component=widgets.ToggleButtons(options=['x','y','z'], value=compvalue, description='Comp.') \
,ComplexNumber=widgets.ToggleButtons(options=['Re','Im','Amp', 'Phase']) \
,Frequency=widgets.FloatText(value=0., continuous_update=False, description='f (Hz)') \
,Sigma=widgets.FloatText(value=0.01, continuous_update=False, description='$\sigma$ (S/m)') \
,Offset=widgets.FloatText(value = offset_plane, continuous_update=False) \
,Scale=widgets.ToggleButtons(options=['log','linear'], value="log") \
,Slider=widgets.widget_bool.Checkbox(value=False)\
,FreqLog=widgets.FloatSlider(min=-3, max=6, step=0.5, value=-3, continuous_update=False) \
,SigLog=widgets.FloatSlider(min=-3, max=3, step=0.5, value=-3, continuous_update=False) \
,SrcType = fixed(SrcType)
)
return out
def InteractiveDipole(self):
def foo(orientation, normal, component, view, functype, flog, siglog, x1, y1, x2, y2, npts2D, npts, loc):
f = np.r_[10**flog]
sig = np.r_[10**siglog]
return self.Dipole2Dviz(x1, y1, x2, y2, npts2D, npts, sig, f, srcLoc=np.r_[0., 0., 0.], orientation=orientation, component=component, view=view, normal=normal, functype=functype, loc=loc, dx=50.)
out = widgetify(foo
,orientation=widgets.ToggleButtons(options=['x','y','z']) \
,normal=widgets.ToggleButtons(options=['X','Y','Z'], value="Z") \
,component=widgets.ToggleButtons(options=['real','imag','amplitude', 'phase']) \
,view=widgets.ToggleButtons(options=['x','y','z', 'vec']) \
,functype=widgets.ToggleButtons(options=["E_from_ED", "H_from_ED", "E_from_ED_galvanic", "E_from_ED_inductive"]) \
,flog=widgets.FloatSlider(min=-3, max=6, step=0.5, value=-3, continuous_update=False) \
,siglog=widgets.FloatSlider(min=-3, max=3, step=0.5, value=-3, continuous_update=False) \
,loc=widgets.FloatText(value=0.01) \
,x1=widgets.FloatText(value=-10) \
,y1=widgets.FloatText(value=0.01) \
,x2=widgets.FloatText(value=10) \
,y2=widgets.FloatText(value=0.01) \
,npts2D=widgets.IntSlider(min=4,max=200,step=2,value=40) \
,npts=widgets.IntSlider(min=4,max=200,step=2,value=40)
)
return out
def DisPosNegvalues(val):
temp_p = val.copy()*np.nan
temp_p[val>0.] = val[val>0.]
temp_n = val.copy()*np.nan
temp_n[val<0.] = -val[val<0.]
return temp_p, temp_n
def InteractiveDipoleProfile(self, sig, Field, Scale):
srcLoc = np.r_[0., 0., 0.]
orientation = "z"
nRx = int(100)
# def foo(Component, Profile, Scale, F1, F2, F3):
def foo(Component, ComplexNumber, Sigma, Profile, F1, F2, F3, Scale, FixedScale=False):
# Scale = "log"
orientation = "z"
vals = []
if Field =="E":
unit = " (V/m)"
elif Field =="H":
unit = " (A/m)"
elif Field =="J":
unit = " (A/m $^2$)"
if ComplexNumber == "ReIm":
headerr, headeri = "Re(", "Im("
textsep = ")"
elif ComplexNumber == "AmpPhase":
headerr, headeri = "|", "Phase("
textsep = "|"
labelr = headerr+Field+Component+textsep+"-field "+unit
if ComplexNumber == "AmpPhase":
unit = " (rad)"
labeli = headeri+Field+Component+")-field " + unit
F = [F1, F2, F3]
if Component == "x":
icomp = 0
elif Component == "y":
icomp = 1
elif Component == "z":
icomp = 2
if Profile == "TxProfile":
xyz_line = np.c_[np.linspace(-20., 80., nRx), np.zeros(nRx), np.zeros(nRx)]
r = xyz_line[:,0]
fig = plt.figure(figsize=(18*1.5,3.4*1.5))
gs1 = gridspec.GridSpec(2, 7)
gs1.update(left=0.05, right=0.48, wspace=0.05)
ax1 = plt.subplot(gs1[:2, :3])
ax2 = ax1.twinx()
else:
if Profile == "Rxhole":
xyz_line = self.dataview.xyz_line.copy()
elif Profile == "Txhole":
xyz_line = self.dataview.xyz_line.copy()
xyz_line[:,0] = 0.
else:
raise NotImplementedError()
r = xyz_line[:,2]
fig = plt.figure(figsize=(18*1.0,3.4*1.5))
gs1 = gridspec.GridSpec(2, 7)
gs1.update(left=0.05, right=0.48, wspace=0.05)
ax1 = plt.subplot(gs1[:2, :3])
ax2 = ax1.twiny()
for ifreq, f in enumerate(F):
Frequency = f
vals.append(self.dataview.eval(xyz_line, srcLoc, np.r_[Sigma], np.r_[f], orientation, self.dataview.func2D))
# for ifreq, f in enumerate(F):
if ComplexNumber == "ReIm":
valr = vals[ifreq][icomp].real.flatten()
vali = vals[ifreq][icomp].imag.flatten()
elif ComplexNumber == "AmpPhase":
valr = abs(vals[ifreq][icomp]).flatten()
vali = np.angle(vals[ifreq][icomp]).flatten()
if Scale == "log":
valr_p, valr_n = DisPosNegvalues(valr)
vali_p, vali_n = DisPosNegvalues(vali)
if Profile == "Rxhole" or Profile == "Txhole" :
ax1.plot(valr_p, r, 'k-')
ax1.plot(valr_n, r, 'k--')
if Frequency > 0.:
ax2.plot(vali_p, r, 'r-')
ax2.plot(vali_n, r, 'r--')
elif Profile == "TxProfile":
ax1.plot(r, valr_p, 'k-')
ax1.plot(r, valr_n, 'k--')
if Frequency > 0.:
ax2.plot(r, vali_p, 'r-')
ax2.plot(r, vali_n, 'r--')
elif Scale == "linear":
if Profile == "Rxhole" or Profile == "Txhole" :
ax1.plot(valr, r, 'k-')
if Frequency > 0.:
ax1.plot(vali, r, 'r-')
elif Profile == "TxProfile":
ax1.plot(r, valr, 'k-')
if Frequency > 0.:
ax1.plot(r, vali, 'r-')
if Profile == "Rxhole" or Profile == "Txhole" :
ax1.set_xscale(Scale)
ax1.set_ylim(-50, 50)
if Frequency > 0.:
ax2.set_xscale(Scale)
if FixedScale:
vmin1, vmax1 = ax1.get_xlim()
vmin2, vmax2 = ax2.get_xlim()
vmin = min(vmin1, vmin2)
vmax = max(vmax1, vmax2)
ax1.set_xlim(vmin, vmax)
ax2.set_xlim(vmin, vmax)
ax2.set_xlabel(labeli, color='r')
ax1.set_xlabel(labelr, color='k')
ax1.set_ylabel("Z (m)")
elif Profile == "TxProfile":
ax1.set_yscale(Scale)
ax1.set_xlim(-20, 80)
if Frequency > 0.:
ax2.set_yscale(Scale)
if FixedScale:
vmin1, vmax1 = ax1.get_ylim()
vmin2, vmax2 = ax2.get_ylim()
vmin = min(vmin1, vmin2)
vmax = max(vmax1, vmax2)
ax1.set_ylim(vmin, vmax)
ax2.set_ylim(vmin, vmax)
ax2.set_ylabel(labeli, color='r')
ax1.set_ylabel(labelr, color='k')
ax1.set_xlabel("X (m)")
if Scale == "linear":
if Profile == "Rxhole" or Profile == "Txhole" :
# xticksa = np.linspace(valr.min(), valr.max(), 3)
x = ax1.xaxis.get_majorticklocs()
xticksa = np.linspace(x.min(), x.max(), 3)
ax1.xaxis.set_ticks(xticksa)
ax1.xaxis.set_major_formatter(ticker.FormatStrFormatter("%.0e"))
if Frequency > 0.:
if FixedScale is not True:
x = ax2.xaxis.get_majorticklocs()
for tl in ax2.get_yticklabels():
tl.set_color('r')
xticksb = np.linspace(x.min(), x.max(), 3)
ax2.xaxis.set_ticks(xticksb)
ax2.xaxis.set_major_formatter(ticker.FormatStrFormatter("%.0e"))
elif Profile == "TxProfile":
# yticksa = np.linspace(valr.min(), valr.max(), 3)
y = ax1.yaxis.get_majorticklocs()
yticksa = np.linspace(y.min(), y.max(), 3)
ax1.yaxis.set_ticks(yticksa)
ax1.yaxis.set_major_formatter(ticker.FormatStrFormatter("%.0e"))
if Frequency > 0.:
if FixedScale is not True:
y = ax2.yaxis.get_majorticklocs()
yticksb = np.linspace(y.min(), y.max(), 3)
ax2.yaxis.set_ticks(yticksb)
ax2.yaxis.set_major_formatter(ticker.FormatStrFormatter("%.0e"))
if Frequency > 0.:
if Profile == "Rxhole" or Profile == "Txhole":
for tl in ax2.get_xticklabels():
tl.set_color('r')
elif Profile == "TxProfile":
for tl in ax2.get_yticklabels():
tl.set_color('r')
else:
raise NotImplementedError()
ax1.grid(True)
Q2 = widgetify(foo
,Profile=widgets.ToggleButtons(options=['Rxhole','Txhole','TxProfile'], value='Rxhole')
,Component=widgets.ToggleButtons(options=['x','y','z'], value='z', description='Comp.') \
,ComplexNumber=widgets.ToggleButtons(options=['ReIm','AmpPhase']) \
,Sigma=widgets.FloatText(value=sig, continuous_update=False, description='$\sigma$ (S/m)') \
,Scale=widgets.ToggleButtons(options=['log','linear'], value=Scale) \
,FixedScale=widgets.widget_bool.Checkbox(value=False, description='Fixed')
,F1=widgets.FloatText(value=0.1, continuous_update=False, description='$f_1$ (Hz)')
,F2=widgets.FloatText(value=100, continuous_update=False, description='$f_2$ (Hz)')\
,F3=widgets.FloatText(value=1000, continuous_update=False, description='$f_3$ (Hz)'))
return Q2
|
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.ip_messaging.v1.service.channel import ChannelList
from twilio.rest.ip_messaging.v1.service.role import RoleList
from twilio.rest.ip_messaging.v1.service.user import UserList
class ServiceList(ListResource):
def __init__(self, version):
"""
Initialize the ServiceList
:param Version version: Version that contains the resource
:returns: twilio.rest.ip_messaging.v1.service.ServiceList
:rtype: twilio.rest.ip_messaging.v1.service.ServiceList
"""
super(ServiceList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Services'.format(**self._solution)
def create(self, friendly_name):
"""
Create a new ServiceInstance
:param unicode friendly_name: The friendly_name
:returns: Newly created ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceInstance
"""
data = values.of({
'FriendlyName': friendly_name,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return ServiceInstance(
self._version,
payload,
)
def stream(self, limit=None, page_size=None):
"""
Streams ServiceInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.ip_messaging.v1.service.ServiceInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists ServiceInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.ip_messaging.v1.service.ServiceInstance]
"""
return list(self.stream(
limit=limit,
page_size=page_size,
))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of ServiceInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServicePage
"""
params = values.of({
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return ServicePage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a ServiceContext
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.service.ServiceContext
:rtype: twilio.rest.ip_messaging.v1.service.ServiceContext
"""
return ServiceContext(
self._version,
sid=sid,
)
def __call__(self, sid):
"""
Constructs a ServiceContext
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.service.ServiceContext
:rtype: twilio.rest.ip_messaging.v1.service.ServiceContext
"""
return ServiceContext(
self._version,
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.IpMessaging.V1.ServiceList>'
class ServicePage(Page):
def __init__(self, version, response, solution):
"""
Initialize the ServicePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.ip_messaging.v1.service.ServicePage
:rtype: twilio.rest.ip_messaging.v1.service.ServicePage
"""
super(ServicePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ServiceInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.ip_messaging.v1.service.ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceInstance
"""
return ServiceInstance(
self._version,
payload,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.IpMessaging.V1.ServicePage>'
class ServiceContext(InstanceContext):
def __init__(self, version, sid):
"""
Initialize the ServiceContext
:param Version version: Version that contains the resource
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.service.ServiceContext
:rtype: twilio.rest.ip_messaging.v1.service.ServiceContext
"""
super(ServiceContext, self).__init__(version)
# Path Solution
self._solution = {
'sid': sid,
}
self._uri = '/Services/{sid}'.format(**self._solution)
# Dependents
self._channels = None
self._roles = None
self._users = None
def fetch(self):
"""
Fetch a ServiceInstance
:returns: Fetched ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return ServiceInstance(
self._version,
payload,
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the ServiceInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def update(self, friendly_name=values.unset,
default_service_role_sid=values.unset,
default_channel_role_sid=values.unset,
default_channel_creator_role_sid=values.unset,
read_status_enabled=values.unset, reachability_enabled=values.unset,
typing_indicator_timeout=values.unset,
consumption_report_interval=values.unset,
notifications_new_message_enabled=values.unset,
notifications_new_message_template=values.unset,
notifications_added_to_channel_enabled=values.unset,
notifications_added_to_channel_template=values.unset,
notifications_removed_from_channel_enabled=values.unset,
notifications_removed_from_channel_template=values.unset,
notifications_invited_to_channel_enabled=values.unset,
notifications_invited_to_channel_template=values.unset,
pre_webhook_url=values.unset, post_webhook_url=values.unset,
webhook_method=values.unset, webhook_filters=values.unset,
webhooks_on_message_send_url=values.unset,
webhooks_on_message_send_method=values.unset,
webhooks_on_message_send_format=values.unset,
webhooks_on_message_update_url=values.unset,
webhooks_on_message_update_method=values.unset,
webhooks_on_message_update_format=values.unset,
webhooks_on_message_remove_url=values.unset,
webhooks_on_message_remove_method=values.unset,
webhooks_on_message_remove_format=values.unset,
webhooks_on_channel_add_url=values.unset,
webhooks_on_channel_add_method=values.unset,
webhooks_on_channel_add_format=values.unset,
webhooks_on_channel_destroy_url=values.unset,
webhooks_on_channel_destroy_method=values.unset,
webhooks_on_channel_destroy_format=values.unset,
webhooks_on_channel_update_url=values.unset,
webhooks_on_channel_update_method=values.unset,
webhooks_on_channel_update_format=values.unset,
webhooks_on_member_add_url=values.unset,
webhooks_on_member_add_method=values.unset,
webhooks_on_member_add_format=values.unset,
webhooks_on_member_remove_url=values.unset,
webhooks_on_member_remove_method=values.unset,
webhooks_on_member_remove_format=values.unset,
webhooks_on_message_sent_url=values.unset,
webhooks_on_message_sent_method=values.unset,
webhooks_on_message_sent_format=values.unset,
webhooks_on_message_updated_url=values.unset,
webhooks_on_message_updated_method=values.unset,
webhooks_on_message_updated_format=values.unset,
webhooks_on_message_removed_url=values.unset,
webhooks_on_message_removed_method=values.unset,
webhooks_on_message_removed_format=values.unset,
webhooks_on_channel_added_url=values.unset,
webhooks_on_channel_added_method=values.unset,
webhooks_on_channel_added_format=values.unset,
webhooks_on_channel_destroyed_url=values.unset,
webhooks_on_channel_destroyed_method=values.unset,
webhooks_on_channel_destroyed_format=values.unset,
webhooks_on_channel_updated_url=values.unset,
webhooks_on_channel_updated_method=values.unset,
webhooks_on_channel_updated_format=values.unset,
webhooks_on_member_added_url=values.unset,
webhooks_on_member_added_method=values.unset,
webhooks_on_member_added_format=values.unset,
webhooks_on_member_removed_url=values.unset,
webhooks_on_member_removed_method=values.unset,
webhooks_on_member_removed_format=values.unset,
limits_channel_members=values.unset,
limits_user_channels=values.unset):
"""
Update the ServiceInstance
:param unicode friendly_name: The friendly_name
:param unicode default_service_role_sid: The default_service_role_sid
:param unicode default_channel_role_sid: The default_channel_role_sid
:param unicode default_channel_creator_role_sid: The default_channel_creator_role_sid
:param bool read_status_enabled: The read_status_enabled
:param bool reachability_enabled: The reachability_enabled
:param unicode typing_indicator_timeout: The typing_indicator_timeout
:param unicode consumption_report_interval: The consumption_report_interval
:param bool notifications_new_message_enabled: The notifications.new_message.enabled
:param unicode notifications_new_message_template: The notifications.new_message.template
:param bool notifications_added_to_channel_enabled: The notifications.added_to_channel.enabled
:param unicode notifications_added_to_channel_template: The notifications.added_to_channel.template
:param bool notifications_removed_from_channel_enabled: The notifications.removed_from_channel.enabled
:param unicode notifications_removed_from_channel_template: The notifications.removed_from_channel.template
:param bool notifications_invited_to_channel_enabled: The notifications.invited_to_channel.enabled
:param unicode notifications_invited_to_channel_template: The notifications.invited_to_channel.template
:param unicode pre_webhook_url: The pre_webhook_url
:param unicode post_webhook_url: The post_webhook_url
:param unicode webhook_method: The webhook_method
:param unicode webhook_filters: The webhook_filters
:param unicode webhooks_on_message_send_url: The webhooks.on_message_send.url
:param unicode webhooks_on_message_send_method: The webhooks.on_message_send.method
:param unicode webhooks_on_message_send_format: The webhooks.on_message_send.format
:param unicode webhooks_on_message_update_url: The webhooks.on_message_update.url
:param unicode webhooks_on_message_update_method: The webhooks.on_message_update.method
:param unicode webhooks_on_message_update_format: The webhooks.on_message_update.format
:param unicode webhooks_on_message_remove_url: The webhooks.on_message_remove.url
:param unicode webhooks_on_message_remove_method: The webhooks.on_message_remove.method
:param unicode webhooks_on_message_remove_format: The webhooks.on_message_remove.format
:param unicode webhooks_on_channel_add_url: The webhooks.on_channel_add.url
:param unicode webhooks_on_channel_add_method: The webhooks.on_channel_add.method
:param unicode webhooks_on_channel_add_format: The webhooks.on_channel_add.format
:param unicode webhooks_on_channel_destroy_url: The webhooks.on_channel_destroy.url
:param unicode webhooks_on_channel_destroy_method: The webhooks.on_channel_destroy.method
:param unicode webhooks_on_channel_destroy_format: The webhooks.on_channel_destroy.format
:param unicode webhooks_on_channel_update_url: The webhooks.on_channel_update.url
:param unicode webhooks_on_channel_update_method: The webhooks.on_channel_update.method
:param unicode webhooks_on_channel_update_format: The webhooks.on_channel_update.format
:param unicode webhooks_on_member_add_url: The webhooks.on_member_add.url
:param unicode webhooks_on_member_add_method: The webhooks.on_member_add.method
:param unicode webhooks_on_member_add_format: The webhooks.on_member_add.format
:param unicode webhooks_on_member_remove_url: The webhooks.on_member_remove.url
:param unicode webhooks_on_member_remove_method: The webhooks.on_member_remove.method
:param unicode webhooks_on_member_remove_format: The webhooks.on_member_remove.format
:param unicode webhooks_on_message_sent_url: The webhooks.on_message_sent.url
:param unicode webhooks_on_message_sent_method: The webhooks.on_message_sent.method
:param unicode webhooks_on_message_sent_format: The webhooks.on_message_sent.format
:param unicode webhooks_on_message_updated_url: The webhooks.on_message_updated.url
:param unicode webhooks_on_message_updated_method: The webhooks.on_message_updated.method
:param unicode webhooks_on_message_updated_format: The webhooks.on_message_updated.format
:param unicode webhooks_on_message_removed_url: The webhooks.on_message_removed.url
:param unicode webhooks_on_message_removed_method: The webhooks.on_message_removed.method
:param unicode webhooks_on_message_removed_format: The webhooks.on_message_removed.format
:param unicode webhooks_on_channel_added_url: The webhooks.on_channel_added.url
:param unicode webhooks_on_channel_added_method: The webhooks.on_channel_added.method
:param unicode webhooks_on_channel_added_format: The webhooks.on_channel_added.format
:param unicode webhooks_on_channel_destroyed_url: The webhooks.on_channel_destroyed.url
:param unicode webhooks_on_channel_destroyed_method: The webhooks.on_channel_destroyed.method
:param unicode webhooks_on_channel_destroyed_format: The webhooks.on_channel_destroyed.format
:param unicode webhooks_on_channel_updated_url: The webhooks.on_channel_updated.url
:param unicode webhooks_on_channel_updated_method: The webhooks.on_channel_updated.method
:param unicode webhooks_on_channel_updated_format: The webhooks.on_channel_updated.format
:param unicode webhooks_on_member_added_url: The webhooks.on_member_added.url
:param unicode webhooks_on_member_added_method: The webhooks.on_member_added.method
:param unicode webhooks_on_member_added_format: The webhooks.on_member_added.format
:param unicode webhooks_on_member_removed_url: The webhooks.on_member_removed.url
:param unicode webhooks_on_member_removed_method: The webhooks.on_member_removed.method
:param unicode webhooks_on_member_removed_format: The webhooks.on_member_removed.format
:param unicode limits_channel_members: The limits.channel_members
:param unicode limits_user_channels: The limits.user_channels
:returns: Updated ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'DefaultServiceRoleSid': default_service_role_sid,
'DefaultChannelRoleSid': default_channel_role_sid,
'DefaultChannelCreatorRoleSid': default_channel_creator_role_sid,
'ReadStatusEnabled': read_status_enabled,
'ReachabilityEnabled': reachability_enabled,
'TypingIndicatorTimeout': typing_indicator_timeout,
'ConsumptionReportInterval': consumption_report_interval,
'Notifications.NewMessage.Enabled': notifications_new_message_enabled,
'Notifications.NewMessage.Template': notifications_new_message_template,
'Notifications.AddedToChannel.Enabled': notifications_added_to_channel_enabled,
'Notifications.AddedToChannel.Template': notifications_added_to_channel_template,
'Notifications.RemovedFromChannel.Enabled': notifications_removed_from_channel_enabled,
'Notifications.RemovedFromChannel.Template': notifications_removed_from_channel_template,
'Notifications.InvitedToChannel.Enabled': notifications_invited_to_channel_enabled,
'Notifications.InvitedToChannel.Template': notifications_invited_to_channel_template,
'PreWebhookUrl': pre_webhook_url,
'PostWebhookUrl': post_webhook_url,
'WebhookMethod': webhook_method,
'WebhookFilters': webhook_filters,
'Webhooks.OnMessageSend.Url': webhooks_on_message_send_url,
'Webhooks.OnMessageSend.Method': webhooks_on_message_send_method,
'Webhooks.OnMessageSend.Format': webhooks_on_message_send_format,
'Webhooks.OnMessageUpdate.Url': webhooks_on_message_update_url,
'Webhooks.OnMessageUpdate.Method': webhooks_on_message_update_method,
'Webhooks.OnMessageUpdate.Format': webhooks_on_message_update_format,
'Webhooks.OnMessageRemove.Url': webhooks_on_message_remove_url,
'Webhooks.OnMessageRemove.Method': webhooks_on_message_remove_method,
'Webhooks.OnMessageRemove.Format': webhooks_on_message_remove_format,
'Webhooks.OnChannelAdd.Url': webhooks_on_channel_add_url,
'Webhooks.OnChannelAdd.Method': webhooks_on_channel_add_method,
'Webhooks.OnChannelAdd.Format': webhooks_on_channel_add_format,
'Webhooks.OnChannelDestroy.Url': webhooks_on_channel_destroy_url,
'Webhooks.OnChannelDestroy.Method': webhooks_on_channel_destroy_method,
'Webhooks.OnChannelDestroy.Format': webhooks_on_channel_destroy_format,
'Webhooks.OnChannelUpdate.Url': webhooks_on_channel_update_url,
'Webhooks.OnChannelUpdate.Method': webhooks_on_channel_update_method,
'Webhooks.OnChannelUpdate.Format': webhooks_on_channel_update_format,
'Webhooks.OnMemberAdd.Url': webhooks_on_member_add_url,
'Webhooks.OnMemberAdd.Method': webhooks_on_member_add_method,
'Webhooks.OnMemberAdd.Format': webhooks_on_member_add_format,
'Webhooks.OnMemberRemove.Url': webhooks_on_member_remove_url,
'Webhooks.OnMemberRemove.Method': webhooks_on_member_remove_method,
'Webhooks.OnMemberRemove.Format': webhooks_on_member_remove_format,
'Webhooks.OnMessageSent.Url': webhooks_on_message_sent_url,
'Webhooks.OnMessageSent.Method': webhooks_on_message_sent_method,
'Webhooks.OnMessageSent.Format': webhooks_on_message_sent_format,
'Webhooks.OnMessageUpdated.Url': webhooks_on_message_updated_url,
'Webhooks.OnMessageUpdated.Method': webhooks_on_message_updated_method,
'Webhooks.OnMessageUpdated.Format': webhooks_on_message_updated_format,
'Webhooks.OnMessageRemoved.Url': webhooks_on_message_removed_url,
'Webhooks.OnMessageRemoved.Method': webhooks_on_message_removed_method,
'Webhooks.OnMessageRemoved.Format': webhooks_on_message_removed_format,
'Webhooks.OnChannelAdded.Url': webhooks_on_channel_added_url,
'Webhooks.OnChannelAdded.Method': webhooks_on_channel_added_method,
'Webhooks.OnChannelAdded.Format': webhooks_on_channel_added_format,
'Webhooks.OnChannelDestroyed.Url': webhooks_on_channel_destroyed_url,
'Webhooks.OnChannelDestroyed.Method': webhooks_on_channel_destroyed_method,
'Webhooks.OnChannelDestroyed.Format': webhooks_on_channel_destroyed_format,
'Webhooks.OnChannelUpdated.Url': webhooks_on_channel_updated_url,
'Webhooks.OnChannelUpdated.Method': webhooks_on_channel_updated_method,
'Webhooks.OnChannelUpdated.Format': webhooks_on_channel_updated_format,
'Webhooks.OnMemberAdded.Url': webhooks_on_member_added_url,
'Webhooks.OnMemberAdded.Method': webhooks_on_member_added_method,
'Webhooks.OnMemberAdded.Format': webhooks_on_member_added_format,
'Webhooks.OnMemberRemoved.Url': webhooks_on_member_removed_url,
'Webhooks.OnMemberRemoved.Method': webhooks_on_member_removed_method,
'Webhooks.OnMemberRemoved.Format': webhooks_on_member_removed_format,
'Limits.ChannelMembers': limits_channel_members,
'Limits.UserChannels': limits_user_channels,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ServiceInstance(
self._version,
payload,
sid=self._solution['sid'],
)
@property
def channels(self):
"""
Access the channels
:returns: twilio.rest.ip_messaging.v1.service.channel.ChannelList
:rtype: twilio.rest.ip_messaging.v1.service.channel.ChannelList
"""
if self._channels is None:
self._channels = ChannelList(
self._version,
service_sid=self._solution['sid'],
)
return self._channels
@property
def roles(self):
"""
Access the roles
:returns: twilio.rest.ip_messaging.v1.service.role.RoleList
:rtype: twilio.rest.ip_messaging.v1.service.role.RoleList
"""
if self._roles is None:
self._roles = RoleList(
self._version,
service_sid=self._solution['sid'],
)
return self._roles
@property
def users(self):
"""
Access the users
:returns: twilio.rest.ip_messaging.v1.service.user.UserList
:rtype: twilio.rest.ip_messaging.v1.service.user.UserList
"""
if self._users is None:
self._users = UserList(
self._version,
service_sid=self._solution['sid'],
)
return self._users
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.IpMessaging.V1.ServiceContext {}>'.format(context)
class ServiceInstance(InstanceResource):
def __init__(self, version, payload, sid=None):
"""
Initialize the ServiceInstance
:returns: twilio.rest.ip_messaging.v1.service.ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceInstance
"""
super(ServiceInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'account_sid': payload['account_sid'],
'friendly_name': payload['friendly_name'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'default_service_role_sid': payload['default_service_role_sid'],
'default_channel_role_sid': payload['default_channel_role_sid'],
'default_channel_creator_role_sid': payload['default_channel_creator_role_sid'],
'read_status_enabled': payload['read_status_enabled'],
'reachability_enabled': payload['reachability_enabled'],
'typing_indicator_timeout': deserialize.integer(payload['typing_indicator_timeout']),
'consumption_report_interval': deserialize.integer(payload['consumption_report_interval']),
'limits': payload['limits'],
'webhooks': payload['webhooks'],
'pre_webhook_url': payload['pre_webhook_url'],
'post_webhook_url': payload['post_webhook_url'],
'webhook_method': payload['webhook_method'],
'webhook_filters': payload['webhook_filters'],
'notifications': payload['notifications'],
'url': payload['url'],
'links': payload['links'],
}
# Context
self._context = None
self._solution = {
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ServiceContext for this ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceContext
"""
if self._context is None:
self._context = ServiceContext(
self._version,
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def friendly_name(self):
"""
:returns: The friendly_name
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def default_service_role_sid(self):
"""
:returns: The default_service_role_sid
:rtype: unicode
"""
return self._properties['default_service_role_sid']
@property
def default_channel_role_sid(self):
"""
:returns: The default_channel_role_sid
:rtype: unicode
"""
return self._properties['default_channel_role_sid']
@property
def default_channel_creator_role_sid(self):
"""
:returns: The default_channel_creator_role_sid
:rtype: unicode
"""
return self._properties['default_channel_creator_role_sid']
@property
def read_status_enabled(self):
"""
:returns: The read_status_enabled
:rtype: bool
"""
return self._properties['read_status_enabled']
@property
def reachability_enabled(self):
"""
:returns: The reachability_enabled
:rtype: bool
"""
return self._properties['reachability_enabled']
@property
def typing_indicator_timeout(self):
"""
:returns: The typing_indicator_timeout
:rtype: unicode
"""
return self._properties['typing_indicator_timeout']
@property
def consumption_report_interval(self):
"""
:returns: The consumption_report_interval
:rtype: unicode
"""
return self._properties['consumption_report_interval']
@property
def limits(self):
"""
:returns: The limits
:rtype: dict
"""
return self._properties['limits']
@property
def webhooks(self):
"""
:returns: The webhooks
:rtype: dict
"""
return self._properties['webhooks']
@property
def pre_webhook_url(self):
"""
:returns: The pre_webhook_url
:rtype: unicode
"""
return self._properties['pre_webhook_url']
@property
def post_webhook_url(self):
"""
:returns: The post_webhook_url
:rtype: unicode
"""
return self._properties['post_webhook_url']
@property
def webhook_method(self):
"""
:returns: The webhook_method
:rtype: unicode
"""
return self._properties['webhook_method']
@property
def webhook_filters(self):
"""
:returns: The webhook_filters
:rtype: unicode
"""
return self._properties['webhook_filters']
@property
def notifications(self):
"""
:returns: The notifications
:rtype: dict
"""
return self._properties['notifications']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: The links
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch a ServiceInstance
:returns: Fetched ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the ServiceInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def update(self, friendly_name=values.unset,
default_service_role_sid=values.unset,
default_channel_role_sid=values.unset,
default_channel_creator_role_sid=values.unset,
read_status_enabled=values.unset, reachability_enabled=values.unset,
typing_indicator_timeout=values.unset,
consumption_report_interval=values.unset,
notifications_new_message_enabled=values.unset,
notifications_new_message_template=values.unset,
notifications_added_to_channel_enabled=values.unset,
notifications_added_to_channel_template=values.unset,
notifications_removed_from_channel_enabled=values.unset,
notifications_removed_from_channel_template=values.unset,
notifications_invited_to_channel_enabled=values.unset,
notifications_invited_to_channel_template=values.unset,
pre_webhook_url=values.unset, post_webhook_url=values.unset,
webhook_method=values.unset, webhook_filters=values.unset,
webhooks_on_message_send_url=values.unset,
webhooks_on_message_send_method=values.unset,
webhooks_on_message_send_format=values.unset,
webhooks_on_message_update_url=values.unset,
webhooks_on_message_update_method=values.unset,
webhooks_on_message_update_format=values.unset,
webhooks_on_message_remove_url=values.unset,
webhooks_on_message_remove_method=values.unset,
webhooks_on_message_remove_format=values.unset,
webhooks_on_channel_add_url=values.unset,
webhooks_on_channel_add_method=values.unset,
webhooks_on_channel_add_format=values.unset,
webhooks_on_channel_destroy_url=values.unset,
webhooks_on_channel_destroy_method=values.unset,
webhooks_on_channel_destroy_format=values.unset,
webhooks_on_channel_update_url=values.unset,
webhooks_on_channel_update_method=values.unset,
webhooks_on_channel_update_format=values.unset,
webhooks_on_member_add_url=values.unset,
webhooks_on_member_add_method=values.unset,
webhooks_on_member_add_format=values.unset,
webhooks_on_member_remove_url=values.unset,
webhooks_on_member_remove_method=values.unset,
webhooks_on_member_remove_format=values.unset,
webhooks_on_message_sent_url=values.unset,
webhooks_on_message_sent_method=values.unset,
webhooks_on_message_sent_format=values.unset,
webhooks_on_message_updated_url=values.unset,
webhooks_on_message_updated_method=values.unset,
webhooks_on_message_updated_format=values.unset,
webhooks_on_message_removed_url=values.unset,
webhooks_on_message_removed_method=values.unset,
webhooks_on_message_removed_format=values.unset,
webhooks_on_channel_added_url=values.unset,
webhooks_on_channel_added_method=values.unset,
webhooks_on_channel_added_format=values.unset,
webhooks_on_channel_destroyed_url=values.unset,
webhooks_on_channel_destroyed_method=values.unset,
webhooks_on_channel_destroyed_format=values.unset,
webhooks_on_channel_updated_url=values.unset,
webhooks_on_channel_updated_method=values.unset,
webhooks_on_channel_updated_format=values.unset,
webhooks_on_member_added_url=values.unset,
webhooks_on_member_added_method=values.unset,
webhooks_on_member_added_format=values.unset,
webhooks_on_member_removed_url=values.unset,
webhooks_on_member_removed_method=values.unset,
webhooks_on_member_removed_format=values.unset,
limits_channel_members=values.unset,
limits_user_channels=values.unset):
"""
Update the ServiceInstance
:param unicode friendly_name: The friendly_name
:param unicode default_service_role_sid: The default_service_role_sid
:param unicode default_channel_role_sid: The default_channel_role_sid
:param unicode default_channel_creator_role_sid: The default_channel_creator_role_sid
:param bool read_status_enabled: The read_status_enabled
:param bool reachability_enabled: The reachability_enabled
:param unicode typing_indicator_timeout: The typing_indicator_timeout
:param unicode consumption_report_interval: The consumption_report_interval
:param bool notifications_new_message_enabled: The notifications.new_message.enabled
:param unicode notifications_new_message_template: The notifications.new_message.template
:param bool notifications_added_to_channel_enabled: The notifications.added_to_channel.enabled
:param unicode notifications_added_to_channel_template: The notifications.added_to_channel.template
:param bool notifications_removed_from_channel_enabled: The notifications.removed_from_channel.enabled
:param unicode notifications_removed_from_channel_template: The notifications.removed_from_channel.template
:param bool notifications_invited_to_channel_enabled: The notifications.invited_to_channel.enabled
:param unicode notifications_invited_to_channel_template: The notifications.invited_to_channel.template
:param unicode pre_webhook_url: The pre_webhook_url
:param unicode post_webhook_url: The post_webhook_url
:param unicode webhook_method: The webhook_method
:param unicode webhook_filters: The webhook_filters
:param unicode webhooks_on_message_send_url: The webhooks.on_message_send.url
:param unicode webhooks_on_message_send_method: The webhooks.on_message_send.method
:param unicode webhooks_on_message_send_format: The webhooks.on_message_send.format
:param unicode webhooks_on_message_update_url: The webhooks.on_message_update.url
:param unicode webhooks_on_message_update_method: The webhooks.on_message_update.method
:param unicode webhooks_on_message_update_format: The webhooks.on_message_update.format
:param unicode webhooks_on_message_remove_url: The webhooks.on_message_remove.url
:param unicode webhooks_on_message_remove_method: The webhooks.on_message_remove.method
:param unicode webhooks_on_message_remove_format: The webhooks.on_message_remove.format
:param unicode webhooks_on_channel_add_url: The webhooks.on_channel_add.url
:param unicode webhooks_on_channel_add_method: The webhooks.on_channel_add.method
:param unicode webhooks_on_channel_add_format: The webhooks.on_channel_add.format
:param unicode webhooks_on_channel_destroy_url: The webhooks.on_channel_destroy.url
:param unicode webhooks_on_channel_destroy_method: The webhooks.on_channel_destroy.method
:param unicode webhooks_on_channel_destroy_format: The webhooks.on_channel_destroy.format
:param unicode webhooks_on_channel_update_url: The webhooks.on_channel_update.url
:param unicode webhooks_on_channel_update_method: The webhooks.on_channel_update.method
:param unicode webhooks_on_channel_update_format: The webhooks.on_channel_update.format
:param unicode webhooks_on_member_add_url: The webhooks.on_member_add.url
:param unicode webhooks_on_member_add_method: The webhooks.on_member_add.method
:param unicode webhooks_on_member_add_format: The webhooks.on_member_add.format
:param unicode webhooks_on_member_remove_url: The webhooks.on_member_remove.url
:param unicode webhooks_on_member_remove_method: The webhooks.on_member_remove.method
:param unicode webhooks_on_member_remove_format: The webhooks.on_member_remove.format
:param unicode webhooks_on_message_sent_url: The webhooks.on_message_sent.url
:param unicode webhooks_on_message_sent_method: The webhooks.on_message_sent.method
:param unicode webhooks_on_message_sent_format: The webhooks.on_message_sent.format
:param unicode webhooks_on_message_updated_url: The webhooks.on_message_updated.url
:param unicode webhooks_on_message_updated_method: The webhooks.on_message_updated.method
:param unicode webhooks_on_message_updated_format: The webhooks.on_message_updated.format
:param unicode webhooks_on_message_removed_url: The webhooks.on_message_removed.url
:param unicode webhooks_on_message_removed_method: The webhooks.on_message_removed.method
:param unicode webhooks_on_message_removed_format: The webhooks.on_message_removed.format
:param unicode webhooks_on_channel_added_url: The webhooks.on_channel_added.url
:param unicode webhooks_on_channel_added_method: The webhooks.on_channel_added.method
:param unicode webhooks_on_channel_added_format: The webhooks.on_channel_added.format
:param unicode webhooks_on_channel_destroyed_url: The webhooks.on_channel_destroyed.url
:param unicode webhooks_on_channel_destroyed_method: The webhooks.on_channel_destroyed.method
:param unicode webhooks_on_channel_destroyed_format: The webhooks.on_channel_destroyed.format
:param unicode webhooks_on_channel_updated_url: The webhooks.on_channel_updated.url
:param unicode webhooks_on_channel_updated_method: The webhooks.on_channel_updated.method
:param unicode webhooks_on_channel_updated_format: The webhooks.on_channel_updated.format
:param unicode webhooks_on_member_added_url: The webhooks.on_member_added.url
:param unicode webhooks_on_member_added_method: The webhooks.on_member_added.method
:param unicode webhooks_on_member_added_format: The webhooks.on_member_added.format
:param unicode webhooks_on_member_removed_url: The webhooks.on_member_removed.url
:param unicode webhooks_on_member_removed_method: The webhooks.on_member_removed.method
:param unicode webhooks_on_member_removed_format: The webhooks.on_member_removed.format
:param unicode limits_channel_members: The limits.channel_members
:param unicode limits_user_channels: The limits.user_channels
:returns: Updated ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
default_service_role_sid=default_service_role_sid,
default_channel_role_sid=default_channel_role_sid,
default_channel_creator_role_sid=default_channel_creator_role_sid,
read_status_enabled=read_status_enabled,
reachability_enabled=reachability_enabled,
typing_indicator_timeout=typing_indicator_timeout,
consumption_report_interval=consumption_report_interval,
notifications_new_message_enabled=notifications_new_message_enabled,
notifications_new_message_template=notifications_new_message_template,
notifications_added_to_channel_enabled=notifications_added_to_channel_enabled,
notifications_added_to_channel_template=notifications_added_to_channel_template,
notifications_removed_from_channel_enabled=notifications_removed_from_channel_enabled,
notifications_removed_from_channel_template=notifications_removed_from_channel_template,
notifications_invited_to_channel_enabled=notifications_invited_to_channel_enabled,
notifications_invited_to_channel_template=notifications_invited_to_channel_template,
pre_webhook_url=pre_webhook_url,
post_webhook_url=post_webhook_url,
webhook_method=webhook_method,
webhook_filters=webhook_filters,
webhooks_on_message_send_url=webhooks_on_message_send_url,
webhooks_on_message_send_method=webhooks_on_message_send_method,
webhooks_on_message_send_format=webhooks_on_message_send_format,
webhooks_on_message_update_url=webhooks_on_message_update_url,
webhooks_on_message_update_method=webhooks_on_message_update_method,
webhooks_on_message_update_format=webhooks_on_message_update_format,
webhooks_on_message_remove_url=webhooks_on_message_remove_url,
webhooks_on_message_remove_method=webhooks_on_message_remove_method,
webhooks_on_message_remove_format=webhooks_on_message_remove_format,
webhooks_on_channel_add_url=webhooks_on_channel_add_url,
webhooks_on_channel_add_method=webhooks_on_channel_add_method,
webhooks_on_channel_add_format=webhooks_on_channel_add_format,
webhooks_on_channel_destroy_url=webhooks_on_channel_destroy_url,
webhooks_on_channel_destroy_method=webhooks_on_channel_destroy_method,
webhooks_on_channel_destroy_format=webhooks_on_channel_destroy_format,
webhooks_on_channel_update_url=webhooks_on_channel_update_url,
webhooks_on_channel_update_method=webhooks_on_channel_update_method,
webhooks_on_channel_update_format=webhooks_on_channel_update_format,
webhooks_on_member_add_url=webhooks_on_member_add_url,
webhooks_on_member_add_method=webhooks_on_member_add_method,
webhooks_on_member_add_format=webhooks_on_member_add_format,
webhooks_on_member_remove_url=webhooks_on_member_remove_url,
webhooks_on_member_remove_method=webhooks_on_member_remove_method,
webhooks_on_member_remove_format=webhooks_on_member_remove_format,
webhooks_on_message_sent_url=webhooks_on_message_sent_url,
webhooks_on_message_sent_method=webhooks_on_message_sent_method,
webhooks_on_message_sent_format=webhooks_on_message_sent_format,
webhooks_on_message_updated_url=webhooks_on_message_updated_url,
webhooks_on_message_updated_method=webhooks_on_message_updated_method,
webhooks_on_message_updated_format=webhooks_on_message_updated_format,
webhooks_on_message_removed_url=webhooks_on_message_removed_url,
webhooks_on_message_removed_method=webhooks_on_message_removed_method,
webhooks_on_message_removed_format=webhooks_on_message_removed_format,
webhooks_on_channel_added_url=webhooks_on_channel_added_url,
webhooks_on_channel_added_method=webhooks_on_channel_added_method,
webhooks_on_channel_added_format=webhooks_on_channel_added_format,
webhooks_on_channel_destroyed_url=webhooks_on_channel_destroyed_url,
webhooks_on_channel_destroyed_method=webhooks_on_channel_destroyed_method,
webhooks_on_channel_destroyed_format=webhooks_on_channel_destroyed_format,
webhooks_on_channel_updated_url=webhooks_on_channel_updated_url,
webhooks_on_channel_updated_method=webhooks_on_channel_updated_method,
webhooks_on_channel_updated_format=webhooks_on_channel_updated_format,
webhooks_on_member_added_url=webhooks_on_member_added_url,
webhooks_on_member_added_method=webhooks_on_member_added_method,
webhooks_on_member_added_format=webhooks_on_member_added_format,
webhooks_on_member_removed_url=webhooks_on_member_removed_url,
webhooks_on_member_removed_method=webhooks_on_member_removed_method,
webhooks_on_member_removed_format=webhooks_on_member_removed_format,
limits_channel_members=limits_channel_members,
limits_user_channels=limits_user_channels,
)
@property
def channels(self):
"""
Access the channels
:returns: twilio.rest.ip_messaging.v1.service.channel.ChannelList
:rtype: twilio.rest.ip_messaging.v1.service.channel.ChannelList
"""
return self._proxy.channels
@property
def roles(self):
"""
Access the roles
:returns: twilio.rest.ip_messaging.v1.service.role.RoleList
:rtype: twilio.rest.ip_messaging.v1.service.role.RoleList
"""
return self._proxy.roles
@property
def users(self):
"""
Access the users
:returns: twilio.rest.ip_messaging.v1.service.user.UserList
:rtype: twilio.rest.ip_messaging.v1.service.user.UserList
"""
return self._proxy.users
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.IpMessaging.V1.ServiceInstance {}>'.format(context)
|
|
"""
jTDS/MSSQL database backend for Django.
Django uses this if the DATABASE_ENGINE setting is empty (None or empty string).
Each of these API functions, except connection.close(), raises
ImproperlyConfigured.
"""
try:
# Force the database driver to load
from java.lang import Class
cls = Class.forName("net.sourceforge.jtds.jdbc.Driver").newInstance()
from pool import ManualPoolingDriver
from com.ziclix.python.sql import zxJDBC as Database
from com.ziclix.python.sql import zxJDBC
from com.ziclix.python.sql import PyStatement, PyExtendedCursor, PyCursor, PyConnection
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading zxJDBC module: %s" % e)
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import *
from django.db.backends import BaseDatabaseFeatures, BaseDatabaseValidation
from django.conf import settings
from pool import ManualPoolingDriver
from doj.backends.zxjdbc.common import zxJDBCDatabaseWrapper
from operations import DatabaseOperations
from introspection import DatabaseIntrospection
from creation import DatabaseCreation
if not hasattr(settings, "DATABASE_COLLATION"):
settings.DATABASE_COLLATION = 'Latin1_General_CI_AS'
def complain(*args, **kwargs):
raise ImproperlyConfigured, "You haven't set the DATABASE_ENGINE setting yet."
DatabaseError = zxJDBC.DatabaseError
IntegrityError = zxJDBC.IntegrityError
class DatabaseClient(BaseDatabaseClient):
runshell = complain
class DatabaseWrapper(zxJDBCDatabaseWrapper):
jdbc_url_pattern = "jdbc:jtds:sqlserver://%(DATABASE_HOST)s%(DATABASE_PORT)s/%(DATABASE_NAME)s"
driver_class_name = "net.sourceforge.jtds.jdbc.Driver"
operators = {
# Since '=' is used not only for string comparision there is no way
# to make it case (in)sensitive. It will simply fallback to the
# database collation.
'exact': '= %s ',
'iexact': "= UPPER(%s) ",
'contains': "LIKE %s ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
'icontains': "LIKE UPPER(%s) ESCAPE '\\' COLLATE "+ settings.DATABASE_COLLATION,
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
'endswith': "LIKE %s ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
'istartswith': "LIKE UPPER(%s) ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
'iendswith': "LIKE UPPER(%s) ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
}
def _register_driver(self):
# Configure the pooled connection driver
if self._LAST_DATABASE_NAME == settings.DATABASE_NAME:
return "jdbc_pool_%s" % self._db_count
self._db_count += 1
pool_name = "jdbc_pool_%s" % self._db_count
db_dict = {
'DATABASE_NAME': settings.DATABASE_NAME,
'DATABASE_HOST': settings.DATABASE_HOST or 'localhost',
'DATABASE_PORT': settings.DATABASE_PORT or 1433,
}
self.driver = ManualPoolingDriver("jdbc:jtds:sqlserver://%(DATABASE_HOST)s:%(DATABASE_PORT)s/%(DATABASE_NAME)s" % db_dict,
settings.DATABASE_USER,
settings.DATABASE_PASSWORD,
pool_name,
)
self._LAST_DATABASE_NAME = settings.DATABASE_NAME
return pool_name
def _cursor(self, settings):
'''
Implementation specific cursor
'''
new_conn = False
if self.connection is None:
# TODO: Refactor this DBCP pool setup to zxJDBCCursorWrapper
new_conn = True
self.connection = self.new_jndi_connection()
if self.connection is None:
pool_name = self._register_driver()
if not settings.DATABASE_NAME:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You need to specify DATABASE_NAME in your Django settings file.")
url='jdbc:apache:commons:dbcp:%s' % pool_name
self.connection = Database.connect(url, None, None, 'org.apache.commons.dbcp.PoolingDriver')
cursor = self.connection.cursor()
if new_conn:
cursor.execute("SET DATEFORMAT ymd")
# SQL Server violates the SQL standard w.r.t handling NULL values in UNIQUE columns.
# We work around this by creating schema bound views on tables with with nullable unique columns
# but we need to modify the cursor to abort if the view has problems.
# See http://blogs.msdn.com/sqlcat/archive/2005/12/20/506138.aspx
cursor.execute("SET ARITHABORT ON")
cursor.execute("SET CONCAT_NULL_YIELDS_NULL ON")
cursor.execute("SET QUOTED_IDENTIFIER ON")
cursor.execute("SET ANSI_NULLS ON")
cursor.execute("SET ANSI_PADDING ON")
cursor.execute("SET ANSI_WARNINGS ON")
cursor.execute("SET NUMERIC_ROUNDABORT OFF")
cursor.execute("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE")
# jTDS can't execute some sql like CREATE DATABASE etc. in
# Multi-statement, so we need to commit the above SQL sentences to
# avoid this
return CursorWrapper(cursor)
def __init__(self, autocommit=False, **kwargs):
super(DatabaseWrapper, self).__init__(autocommit=autocommit, **kwargs)
self._LAST_DATABASE_NAME = None
self.connection = None
self._db_count = 0
self.features = DatabaseFeatures()
self.ops = DatabaseOperations()
self.client = DatabaseClient() # XXX: No client is supported yet
self.creation = DatabaseCreation(self) # Basic type declarations for creating tables
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation() # XXX: No real database validation yet
class DatabaseFeatures(BaseDatabaseFeatures):
uses_custom_query_class = True
class CursorWrapper(object):
"""
A wrapper around the zxJDBC's cursor that takes in account some zxJDBC
DB-API 2.0 implementation and common ODBC driver particularities.
"""
def __init__(self, cursor):
self.cursor = cursor
def format_sql(self, sql):
# zxjdbc uses '?' instead of '%s' as parameter placeholder.
if "%s" in sql:
sql = sql.replace('%s', '?')
return sql
def format_params(self, params):
fp = []
for p in params:
p = coerce_sql2k_type(p)
fp.append(p)
return tuple(fp)
def execute(self, sql, params=()):
sql = self.format_sql(sql)
params = self.format_params(params)
return self.cursor.execute(sql, params)
def executemany(self, sql, params_list):
sql = self.format_sql(sql)
# zxjdbc's cursor.executemany() doesn't support an empty param_list
if not params_list:
if '?' in sql:
return
else:
raw_pll = params_list
params_list = [self.format_params(p) for p in raw_pll]
return self.cursor.executemany(sql, params_list)
def format_results(self, rows):
"""
Decode data coming from the database if needed and convert rows to tuples
(zxjdbc Rows are not sliceable).
"""
fr = []
for row in rows:
fr.append(row)
return tuple(fr)
def fetchone(self):
row = self.cursor.fetchone()
if row is not None:
return self.format_results(row)
return row
def fetchmany(self, chunk):
return [self.format_results(row) for row in self.cursor.fetchmany(chunk)]
def fetchall(self):
return [self.format_results(row) for row in self.cursor.fetchall()]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def coerce_sql2k_type(p):
'''
Need to coerce some python types to jTDS friendly types
so that PreparedStatement::setObject() can work properly
'''
if isinstance(p, type(True)):
if p:
return 1
else:
return 0
elif isinstance(p, type(5L)):
# zxJDBC doesn't like injecting long types, or maybe it
# actually depends on the underlying SQL datatype..
# Need to figure out a better fix for this
if p == int(p):
return int(p)
else:
raise RuntimeError, "SQL Serer 2000 +jTDS can't seem to handle long values. Found : [%s]" % p
return p
|
|
# -*- coding: utf-8 -*-
import authorize
from authorize.exceptions import AuthorizeInvalidError, \
AuthorizeResponseError
from trytond.pool import PoolMeta, Pool
from trytond.pyson import Eval
from trytond.model import fields
__all__ = [
'PaymentGatewayAuthorize', 'AddPaymentProfile',
'AuthorizeNetTransaction', 'Party', 'Address', 'PaymentProfile'
]
__metaclass__ = PoolMeta
class PaymentGatewayAuthorize:
"Authorize.net Gateway Implementation"
__name__ = 'payment_gateway.gateway'
authorize_net_login = fields.Char(
'API Login', states={
'required': Eval('provider') == 'authorize_net',
'invisible': Eval('provider') != 'authorize_net',
'readonly': ~Eval('active', True),
}, depends=['provider', 'active']
)
authorize_net_transaction_key = fields.Char(
'Transaction Key', states={
'required': Eval('provider') == 'authorize_net',
'invisible': Eval('provider') != 'authorize_net',
'readonly': ~Eval('active', True),
}, depends=['provider', 'active']
)
@classmethod
def view_attributes(cls):
return super(PaymentGatewayAuthorize, cls).view_attributes() + [
('//notebook/page[@id="authorize_net"]', 'states', {
'invisible': Eval('provider') != 'authorize_net'
})]
@classmethod
def get_providers(cls, values=None):
"""
Downstream modules can add to the list
"""
rv = super(PaymentGatewayAuthorize, cls).get_providers()
authorize_record = ('authorize_net', 'Authorize.net')
if authorize_record not in rv:
rv.append(authorize_record)
return rv
def get_methods(self):
if self.provider == 'authorize_net':
return [
('credit_card', 'Credit Card - Authorize.net'),
]
return super(PaymentGatewayAuthorize, self).get_methods()
def get_authorize_client(self):
"""
Return an authenticated authorize.net client.
"""
assert self.provider == 'authorize_net', 'Invalid provider'
authorize.Configuration.configure(
authorize.Environment.TEST if self.test else authorize.Environment.PRODUCTION, # noqa
self.authorize_net_login,
self.authorize_net_transaction_key,
)
class AuthorizeNetTransaction:
"""
Implement the authorize and capture methods
"""
__name__ = 'payment_gateway.transaction'
@classmethod
def __setup__(cls):
super(AuthorizeNetTransaction, cls).__setup__()
cls._error_messages.update({
'cancel_only_authorized': 'Only authorized transactions can be' + (
' cancelled.'),
})
def authorize_authorize_net(self, card_info=None):
"""
Authorize using authorize.net for the specific transaction.
"""
TransactionLog = Pool().get('payment_gateway.transaction.log')
# Initialize authorize client
self.gateway.get_authorize_client()
auth_data = self.get_authorize_net_request_data()
if card_info:
billing_address = self.address.get_authorize_address(
card_info.owner)
shipping_address = {}
if self.shipping_address:
shipping_address = self.shipping_address.get_authorize_address(
card_info.owner)
auth_data.update({
'email': self.party.email,
'credit_card': {
'card_number': card_info.number,
'card_code': str(card_info.csc),
'expiration_date': "%s/%s" % (
card_info.expiry_month, card_info.expiry_year
),
},
'billing': billing_address,
'shipping': shipping_address,
})
elif self.payment_profile:
if self.shipping_address:
if self.shipping_address.authorize_id:
address_id = self.shipping_address.authorize_id
else:
address_id = self.shipping_address.send_to_authorize(
self.payment_profile.authorize_profile_id)
else:
if self.address.authorize_id:
address_id = self.address.authorize_id
else:
address_id = self.address.send_to_authorize(
self.payment_profile.authorize_profile_id)
auth_data.update({
'customer_id': self.payment_profile.authorize_profile_id,
'payment_id': self.payment_profile.provider_reference,
'shipping_id': address_id,
})
else:
self.raise_user_error('no_card_or_profile')
try:
result = authorize.Transaction.auth(auth_data)
except AuthorizeResponseError, exc:
self.state = 'failed'
self.save()
TransactionLog.serialize_and_create(self, exc.full_response)
else:
# Following response codes are given:
# 1 -- Approved
# 2 -- Declined
# 3 -- Error
# 4 -- Held for Review
self.provider_reference = str(result.transaction_response.trans_id)
self.last_four_digits = card_info.number[-4:] if card_info else \
self.payment_profile.last_4_digits
if result.transaction_response.response_code == '1':
self.state = 'authorized'
elif result.transaction_response.response_code == '4':
self.state = 'in-progress'
else:
self.state = 'failed'
self.save()
TransactionLog.serialize_and_create(self, result)
def settle_authorize_net(self):
"""
Settles this transaction if it is a previous authorization.
"""
TransactionLog = Pool().get('payment_gateway.transaction.log')
# Initialize authorize.net client
self.gateway.get_authorize_client()
try:
result = authorize.Transaction.settle(
self.provider_reference, self.amount
)
except AuthorizeResponseError, exc:
self.state = 'failed'
self.save()
TransactionLog.serialize_and_create(self, exc.full_response)
else:
# Following response codes are given:
# 1 -- Approved
# 2 -- Declined
# 3 -- Error
# 4 -- Held for Review
self.provider_reference = str(result.transaction_response.trans_id)
if result.transaction_response.response_code == '1':
self.state = 'completed'
elif result.transaction_response.response_code == '4':
self.state = 'in-progress'
else:
self.state = 'failed'
self.save()
TransactionLog.serialize_and_create(self, result)
if self.state == 'completed':
self.safe_post()
def capture_authorize_net(self, card_info=None):
"""
Capture using authorize.net for the specific transaction.
"""
TransactionLog = Pool().get('payment_gateway.transaction.log')
# Initialize authorize client
self.gateway.get_authorize_client()
capture_data = self.get_authorize_net_request_data()
if card_info:
billing_address = self.address.get_authorize_address(
card_info.owner)
shipping_address = {}
if self.shipping_address:
shipping_address = self.shipping_address.get_authorize_address(
card_info.owner)
capture_data.update({
'email': self.party.email,
'credit_card': {
'card_number': card_info.number,
'card_code': str(card_info.csc),
'expiration_date': "%s/%s" % (
card_info.expiry_month, card_info.expiry_year
),
},
'billing': billing_address,
'shipping': shipping_address,
})
elif self.payment_profile:
if self.shipping_address:
if self.shipping_address.authorize_id:
address_id = self.shipping_address.authorize_id
else:
address_id = self.shipping_address.send_to_authorize(
self.payment_profile.authorize_profile_id)
else:
if self.address.authorize_id:
address_id = self.address.authorize_id
else:
address_id = self.address.send_to_authorize(
self.payment_profile.authorize_profile_id)
capture_data.update({
'customer_id': self.payment_profile.authorize_profile_id,
'payment_id': self.payment_profile.provider_reference,
'shipping_id': address_id,
})
else:
self.raise_user_error('no_card_or_profile')
try:
result = authorize.Transaction.sale(capture_data)
except AuthorizeResponseError, exc:
self.state = 'failed'
self.save()
TransactionLog.serialize_and_create(self, exc.full_response)
else:
# Following response codes are given:
# 1 -- Approved
# 2 -- Declined
# 3 -- Error
# 4 -- Held for Review
self.provider_reference = str(result.transaction_response.trans_id)
self.last_four_digits = card_info.number[-4:] if card_info else \
self.payment_profile.last_4_digits
if result.transaction_response.response_code == '1':
self.state = 'completed'
elif result.transaction_response.response_code == '4':
self.state = 'in-progress'
else:
self.state = 'failed'
self.save()
TransactionLog.serialize_and_create(self, result)
if self.state == 'completed':
self.safe_post()
def retry_authorize_net(self, credit_card=None): # pragma: no cover
"""
Authorize using Authorize.net for the specific transaction.
:param credit_card: An instance of CreditCardView
"""
raise self.raise_user_error('feature_not_available')
def update_authorize_net(self): # pragma: no cover
"""
Update the status of the transaction from Authorize.net
"""
TransactionLog = Pool().get('payment_gateway.transaction.log')
result = authorize.Transaction.details(self.provider_reference)
if result.transaction.response_code == '1':
if result.transaction.transaction_type in (
'authCaptureTransaction', 'priorAuthCaptureTransaction'
):
self.state = 'completed'
elif result.transaction.transaction_type == 'authorizeOnlyTransaction': # noqa
self.state = 'authorized'
elif result.transaction.response_code == '4':
pass
else:
self.state = 'failed'
self.save()
TransactionLog.serialize_and_create(self, result)
if self.state == 'completed':
self.safe_post()
def cancel_authorize_net(self):
"""
Cancel this authorization or request
"""
TransactionLog = Pool().get('payment_gateway.transaction.log')
if self.state != 'authorized':
self.raise_user_error('cancel_only_authorized')
# Initialize authurize.net client
self.gateway.get_authorize_client()
# Try to void the transaction
try:
result = authorize.Transaction.void(self.provider_reference)
except AuthorizeResponseError, exc:
TransactionLog.serialize_and_create(self, exc.full_response)
else:
self.state = 'cancel'
self.save()
TransactionLog.serialize_and_create(self, result)
def get_authorize_net_request_data(self):
"""
Downstream modules can modify this method to send extra data to
authorize.net
Ref: http://vcatalano.github.io/py-authorize/transaction.html
"""
return {
'amount': self.amount
}
def refund_authorize_net(self):
TransactionLog = Pool().get('payment_gateway.transaction.log')
# Initialize authorize.net client
self.gateway.get_authorize_client()
try:
result = authorize.Transaction.refund({
'amount': self.amount,
'last_four': self.last_four_digits,
'transaction_id': self.origin.provider_reference,
})
except AuthorizeResponseError, exc:
self.state = 'failed'
self.save()
TransactionLog.serialize_and_create(self, exc.full_response)
else:
self.state = 'completed'
self.save()
TransactionLog.serialize_and_create(self, result)
self.safe_post()
class AddPaymentProfile:
"""
Add a payment profile
"""
__name__ = 'party.party.payment_profile.add'
def transition_add_authorize_net(self):
"""
Handle the case if the profile should be added for authorize.net
"""
card_info = self.card_info
# Initialize authorize.net client
card_info.gateway.get_authorize_client()
customer_id = card_info.party._get_authorize_net_customer_id(
card_info.gateway.id
)
# Create new customer profile if no old profile is there
if not customer_id:
customer_id = self.create_auth_customer_profile()
# Now create new credit card and associate it with the above
# created customer
credit_card_data = {
'card_number': card_info.number,
'card_code': str(card_info.csc),
'expiration_date': "%s/%s" % (
card_info.expiry_month, card_info.expiry_year
),
'billing': card_info.address.get_authorize_address(card_info.owner)
}
for try_count in range(2):
try:
credit_card = authorize.CreditCard.create(
customer_id, credit_card_data
)
# Validate newly created credit card
authorize.CreditCard.validate(
customer_id, credit_card.payment_id, {
'card_code': credit_card_data['card_code'],
'validationMode': 'testMode' if card_info.gateway.test
else 'liveMode'
}
)
break
except AuthorizeInvalidError, exc:
self.raise_user_error(unicode(exc))
except AuthorizeResponseError, exc:
if try_count == 0 and 'E00039' in unicode(exc):
# Delete all unused payment profiles on authorize.net
customer_details = authorize.Customer.details(customer_id)
auth_payment_ids = set([
p.payment_id for p in customer_details.profile.payments
])
if card_info.party.payment_profiles:
local_payment_ids = set([
p.provider_reference for p in card_info.party.payment_profiles # noqa
])
ids_to_delete = auth_payment_ids.difference(
local_payment_ids
)
else:
ids_to_delete = auth_payment_ids
if ids_to_delete:
for payment_id in ids_to_delete:
authorize.CreditCard.delete(customer_id, payment_id)
continue
self.raise_user_error(unicode(exc.message))
return self.create_profile(
credit_card.payment_id,
authorize_profile_id=customer_id
)
def create_auth_customer_profile(self):
"""
Creates a customer profile on authorize.net and returns
created profile's ID
"""
customer_party = self.card_info.party
try:
customer = authorize.Customer.create({
'description': customer_party.name,
'email': customer_party.email,
})
except AuthorizeInvalidError, exc:
self.raise_user_error(unicode(exc))
return customer.customer_id
class Party:
__name__ = 'party.party'
def _get_authorize_net_customer_id(self, gateway_id):
"""
Extracts and returns customer id from party's payment profile
Return None if no customer id is found.
:param gateway_id: The gateway ID to which the customer id is associated
"""
PaymentProfile = Pool().get('party.payment_profile')
payment_profiles = PaymentProfile.search([
('party', '=', self.id),
('authorize_profile_id', '!=', None),
('gateway', '=', gateway_id),
])
if payment_profiles:
return payment_profiles[0].authorize_profile_id
return None
class Address:
__name__ = 'party.address'
authorize_id = fields.Char(
'Authorize.net ID', readonly=True
)
def send_to_authorize(self, profile_id):
"""
Helpler method which creates a new address record on
authorize.net servers and returns it's ID.
:param profile_id: The profile_id of customer profile for
which you want to create address. Required if create=True
"""
Address = Pool().get('party.address')
for try_count in range(2):
try:
address = authorize.Address.create(
profile_id, self.get_authorize_address()
)
break
except AuthorizeResponseError, exc:
if try_count == 0 and 'E00039' in unicode(exc):
# Delete all addresses on authorize.net
self.delete_authorize_addresses(profile_id)
continue
self.raise_user_error(unicode(exc.message))
except AuthorizeInvalidError, exc:
self.raise_user_error(unicode(exc))
address_id = address.address_id
Address.write([self], {
'authorize_id': address_id,
})
return address_id
def get_authorize_address(self, name=None):
"""
Returns address as a dictionary to send to authorize.net
:param name: Name to send as first name in address.
Default is party's name.
"""
name = name or self.name or self.party.name
try:
first_name, last_name = name.split(" ", 1)
except ValueError:
first_name = name
last_name = ""
return {
'first_name': first_name,
'last_name': last_name,
'company': self.party.name,
'address': '\n'.join(filter(None, [self.street, self.streetbis])),
'city': self.city,
'state': self.subdivision and self.subdivision.code,
'zip': self.zip,
'country': self.country and self.country.code,
'phone_number': self.party.phone,
'fax_number': self.party.fax,
}
def delete_authorize_addresses(self, profile_id):
"""
Delete all shipping addresses for customer on authorize.net
"""
Address = Pool().get('party.address')
customer_details = authorize.Customer.details(profile_id)
address_ids = [
a.address_id for a in customer_details.profile.addresses
]
for address_id in address_ids:
authorize.Address.delete(profile_id, address_id)
# Set authorize_id none for all party addresses
Address.write(list(self.party.addresses), {
'authorize_id': None,
})
class PaymentProfile:
__name__ = 'party.payment_profile'
authorize_profile_id = fields.Char(
'Authorize.net Profile ID', readonly=True
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.