gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
import logging
import os
from google.appengine.ext import webapp
from google.appengine.api import users
from google.appengine.ext.webapp import template
import oauth
from stores import check_valid_callback
from utils import initialize_server_request, send_oauth_error
from decorators import oauth_required
from consts import OUT_OF_BAND
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class RequestTokenHandler(webapp.RequestHandler):
"""HTTP request handler with OAuth support."""
def get(self, *args):
logger.warning("!!!START REQUEST!!!")
"""Handler method for OAuth GET requests."""
logger.warning("!!!Req URL: %s"%self.request.url)
logger.warning("!!!Entering REQUEST_TOKEN_URL")
oauth_server, oauth_request = initialize_server_request(self.request)
if oauth_server is None:
send_oauth_error(oauth.OAuthError('Invalid request parameters.'), self.response)
return
else:
logger.warning("!!!OAuth Params: %s"%oauth_request.parameters)
try:
# create a request token
token = oauth_server.fetch_request_token(oauth_request)
# return the token
self.response.set_status(200, 'OK')
self.response.out.write(token.to_string())
except oauth.OAuthError, err:
logger.exception("Error when trying to do a request_token")
send_oauth_error(err, self.response)
return
logger.warning("!!!End request")
return
def post(self, *args):
"""Handler method for OAuth POST requests."""
return self.get()
class AuthorizeHandler(webapp.RequestHandler):
"""HTTP request handler with OAuth support."""
def get(self, *args):
logger.warning("!!!START REQUEST!!!")
"""Handler method for OAuth GET requests."""
logger.warning("!!!Req URL: %s"%self.request.url)
# user authorization
#TODO: put up a screen explaining what this authorization is for before
#approving the request_token, and allowing the user to decide if they
#want to proceed- now it just approves right away. If the user rejects
#the approval , redirect to the callback with an error parameter
logger.warning("!!!Entering AUTHORIZATION_URL")
# get the request token
oauth_server, oauth_request = initialize_server_request(self.request)
if oauth_server is None:
return send_oauth_error(oauth.OAuthError('Invalid request parameters.'), self.response)
else:
logger.warning("!!!OAuth Params: %s"%oauth_request.parameters)
try:
# get the request token
token = oauth_server.fetch_request_token(oauth_request)
except oauth.OAuthError, err:
logger.exception("Failed accessing request token")
return send_oauth_error(err, self.response)
try:
# get the request callback, though there might not be one if this is OAuth 1.0a
callback = oauth_server.get_callback(oauth_request)
# OAuth 1.0a: this parameter should not be present on this version
if token.callback_confirmed:
return send_oauth_error(oauth.OAuthError("Cannot specify oauth_callback at authorization step for 1.0a protocol"), self.response)
if not check_valid_callback(callback):
return send_oauth_error(oauth.OAuthError("Invalid callback URL"), self.response)
except oauth.OAuthError,err:
callback = None
# OAuth 1.0a: use the token's callback if confirmed
if token.callback_confirmed:
callback = token.callback
if callback == OUT_OF_BAND:
callback = None
logger.warning("!!!Callback : %s"%callback)
try:
user = users.get_current_user()
logger.warning("!!!User logged in ")
if user:
if self.request.get('authorize_access'):
if int(self.request.get('authorize_access'))==1:
logger.warning("User has clicked authorize_access")
#check if they want to :s`authorize the token
token = oauth_server.authorize_token(token, user)
# return the token key
args = { 'token': token }
elif int(self.request.get('authorize_access')) == 0:
args = { 'error': _('Access not granted by user.') }
if callback:
if "?" in callback:
url_delimiter = "&"
else:
url_delimiter = "?"
if 'token' in args:
query_args = args['token'].to_string(only_key=True)
else: # access is not authorized i.e. error
query_args = 'error=%s' % args['error']
logger.warning('Redirecting to: %s%s%s' % (callback, url_delimiter, query_args))
self.redirect(('%s%s%s' % (callback, url_delimiter, query_args)))
else:
self.response.set_status(200, 'OK')
self.response.out.write("Successfully authorised : %s"%token.to_string(only_key=True))
else:
logger.warning("User has logged in but not authorized_access yet")
#display the authorize view
path = os.path.join(os.path.dirname(__file__),'templates',
'authorize.html')
self.response.out.write(template.render(path,
{'token':token}))
else:
logger.warning("!!!User not logged in - fwd to login page ")
#handle the fact that this might be a POST request and the
#required oauth_token (and possibly oauth_callback for
# OAuth 1.0 requests) will not be on the request.uri
#Hence we add it to it before redirecting to the login page
request_uri = self.request.uri
if 'oauth_token' not in request_uri and '?' not in request_uri:
request_uri = '%s?%s' % (request_uri,token.to_string(only_key=True))
elif 'oauth_token' not in request_uri and '?' in request_uri:
request_uri = '%s&%s' % (request_uri,token.to_string(only_key=True))
if not token.callback_confirmed and 'oauth_callback' not in request_uri and '?' not in request_uri:
request_uri = '%s?oauth_callback=%s' % (request_uri,callback)
elif not token.callback_confirmed and 'oauth_callback' not in request_uri and '?' in request_uri:
request_uri = '%s&oauth_callback=%s' % (request_uri,callback)
self.redirect(users.create_login_url(request_uri))
except oauth.OAuthError, err:
logger.exception("Error when trying to do an authorization")
send_oauth_error(err, self.response)
logger.warning("!!!End request")
return
def post(self, *args):
"""Handler method for OAuth POST requests."""
return self.get()
class AccessTokenHandler(webapp.RequestHandler):
"""HTTP request handler with OAuth support."""
def get(self, *args):
logger.warning("!!!START REQUEST!!!")
"""Handler method for OAuth GET requests."""
logger.warning("!!!Req URL: %s"%self.request.url)
# access token
logger.warning("!!!Entering ACESS_TOKEN_URL")
oauth_server, oauth_request = initialize_server_request(self.request)
if oauth_server is None:
return send_oauth_error(oauth.OAuthError('Invalid request parameters.'), self.response)
else:
logger.warning("!!!OAuth Params: %s"%oauth_request.parameters)
try:
# create an access token
token = oauth_server.fetch_access_token(oauth_request)
if token == None:
logger.warning("!!! oauth_server.fetch_access_token returning None")
send_oauth_error(oauth.OAuthError("Cannot find corresponding access token."), self.response)
return
# send okay response
self.response.set_status(200, 'OK')
# return the token
self.response.out.write(token.to_string())
except oauth.OAuthError, err:
send_oauth_error(err, self.response)
logger.warning("!!!End request")
return
def post(self, *args):
"""Handler method for OAuth POST requests."""
return self.get()
class AuthorizationView(webapp.RequestHandler):
"""This class provides a basic authorization view that is served up to the
user upon logging in, verifying if they indeed do want to authorize the
consumer"""
def get(self):
logger.warning("!!!Start TomboyPublicApi request")
hostname = self.request.headers.get('host')
self.response.out.write("""
<html>
<form action="" method="post">
Authorize access to this applications data ?
<input type="Submit">
</form>
</html>
"""%(hostname,hostname,hostname,hostname,hostname))
logger.warning("!!!End TomboyApi request")
return
class ProtectedResource(webapp.RequestHandler):
@oauth_required
def get(self):
self.response.out.write('Protected Resource access!')
return
def post(self, *args):
"""Handler method for OAuth POST requests."""
return self.get()
def application():
"""
This is used for tests
"""
url_mappings = [
('/request_token',RequestTokenHandler),
('/access_token', AccessTokenHandler),
('/authorize', AuthorizeHandler),
('/protected',ProtectedResource)
]
return webapp.WSGIApplication(url_mappings, debug=True)
| |
# -*- coding: utf-8 -*-
from nose.tools import * # noqa
import mock
import httpretty
from tests.base import OsfTestCase
from tests.factories import AuthUserFactory, ProjectFactory
import urlparse
from framework.auth import authenticate, Auth
from website.addons.mendeley.tests.factories import (
MendeleyAccountFactory,
MendeleyUserSettingsFactory,
MendeleyNodeSettingsFactory
)
from website.util import api_url_for
from website.addons.mendeley.serializer import MendeleySerializer
from website.addons.mendeley import views
from utils import mock_responses
API_URL = 'https://api.mendeley.com'
class MockNode(object):
addon = None
@property
def is_deleted(self):
return False
@property
def is_public(self):
return True
def get_addon(self, name):
if name == 'mendeley':
return self.addon
return None
class MockFolder(object):
def __init__(self, **kwargs):
for k,v in kwargs.iteritems():
setattr(self, k, v)
class MendeleyViewsTestCase(OsfTestCase):
def setUp(self):
super(MendeleyViewsTestCase, self).setUp()
self.account = MendeleyAccountFactory()
self.user = AuthUserFactory(external_accounts=[self.account])
self.account.display_name = self.user.fullname
self.account.save()
self.user_addon = MendeleyUserSettingsFactory(owner=self.user, external_account=self.account)
self.project = ProjectFactory(creator=self.user)
self.node_addon = MendeleyNodeSettingsFactory(owner=self.project)
self.node_addon.set_auth(external_account=self.account, user=self.user)
#self.user_addon.grant_oauth_access(self.node_addon, self.account, metadata={'lists': 'list'})
self.node = MockNode()
self.node.addon = self.node_addon
self.id_patcher = mock.patch('website.addons.mendeley.model.Mendeley.client_id')
self.secret_patcher = mock.patch('website.addons.mendeley.model.Mendeley.client_secret')
self.id_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.secret_patcher.__get__ = mock.Mock(return_value='1234567890asdf')
self.id_patcher.start()
self.secret_patcher.start()
def tearDown(self):
self.id_patcher.stop()
self.secret_patcher.stop()
def test_serialize_settings_authorizer(self):
#"""dict: a serialized version of user-specific addon settings"""
res = self.app.get(
self.project.api_url_for('mendeley_get_config'),
auth=self.user.auth,
)
result = res.json['result']
assert_true(result['nodeHasAuth'])
assert_true(result['userHasAuth'])
assert_true(result['userIsOwner'])
assert_equal(result['folder'], {'name': ''})
assert_equal(result['ownerName'], self.user.fullname)
assert_true(result['urls']['auth'])
assert_true(result['urls']['config'])
assert_true(result['urls']['deauthorize'])
assert_true(result['urls']['folders'])
assert_true(result['urls']['importAuth'])
assert_true(result['urls']['settings'])
def test_serialize_settings_non_authorizer(self):
#"""dict: a serialized version of user-specific addon settings"""
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
res = self.app.get(
self.project.api_url_for('mendeley_get_config'),
auth=non_authorizing_user.auth,
)
result = res.json['result']
assert_true(result['nodeHasAuth'])
assert_false(result['userHasAuth'])
assert_false(result['userIsOwner'])
assert_equal(result['folder'], {'name': ''})
assert_equal(result['ownerName'], self.user.fullname)
assert_true(result['urls']['auth'])
assert_true(result['urls']['config'])
assert_true(result['urls']['deauthorize'])
assert_true(result['urls']['folders'])
assert_true(result['urls']['importAuth'])
assert_true(result['urls']['settings'])
def test_set_auth(self):
res = self.app.put_json(
self.project.api_url_for('mendeley_add_user_auth'),
{
'external_account_id': self.account._id,
},
auth=self.user.auth,
)
assert_equal(
res.status_code,
200
)
assert_true(res.json['result']['userHasAuth'])
assert_equal(
self.node_addon.user_settings,
self.user_addon
)
assert_equal(
self.node_addon.external_account,
self.account
)
def test_remove_user_auth(self):
self.node_addon.set_auth(self.account, self.user)
res = self.app.delete_json(
self.project.api_url_for('mendeley_remove_user_auth'),
{
'external_account_id': self.account._id,
},
auth=self.user.auth,
)
assert_equal(
res.status_code,
200
)
self.node_addon.reload()
assert_is_none(self.node_addon.user_settings)
assert_is_none(self.node_addon.external_account)
@mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')
def test_set_config_owner(self, mock_metadata):
mock_metadata.return_value = MockFolder(name='Fake Folder')
# Settings config updates node settings
self.node_addon.associated_user_settings = []
self.node_addon.save()
res = self.app.put_json(
self.project.api_url_for('mendeley_set_config'),
{
'external_account_id': self.account._id,
'external_list_id': 'list',
},
auth=self.user.auth,
)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=self.user_addon)
expected = {
'result': serializer.serialized_node_settings
}
assert_equal(res.json, expected)
@mock.patch('website.addons.mendeley.model.Mendeley._folder_metadata')
def test_set_config_not_owner(self, mock_metadata):
mock_metadata.return_value = MockFolder(name='Fake Folder')
user = AuthUserFactory()
user.add_addon('mendeley')
self.project.add_contributor(user)
self.project.save()
res = self.app.put_json(
self.project.api_url_for('mendeley_set_config'),
{
'external_account_id': self.account._id,
'external_list_id': 'list',
},
auth=user.auth,
)
self.node_addon.reload()
assert_equal(self.user_addon, self.node_addon.user_settings)
serializer = MendeleySerializer(node_settings=self.node_addon, user_settings=None)
expected = {
'result': serializer.serialized_node_settings
}
assert_equal(res.json, expected)
def test_mendeley_widget_view_complete(self):
# JSON: everything a widget needs
assert_false(self.node_addon.complete)
assert_equal(self.node_addon.mendeley_list_id, None)
self.node_addon.set_target_folder('ROOT-ID', 'ROOT', auth=Auth(user=self.user))
res = views.mendeley_widget(node_addon=self.node_addon,
project=self.project,
node=self.node,
nid=self.node_addon._id,
pid=self.project._id,
auth=self.user.auth)
assert_true(res['complete'])
assert_equal(res['list_id'], 'ROOT-ID')
def test_widget_view_incomplete(self):
# JSON: tell the widget when it hasn't been configured
assert_false(self.node_addon.complete)
assert_equal(self.node_addon.mendeley_list_id, None)
res = views.mendeley_widget(node_addon=self.node_addon,
project=self.project,
node=self.node,
nid=self.node_addon._id,
pid=self.project._id,
auth=self.user.auth)
assert_false(res['complete'])
assert_is_none(res['list_id'])
@httpretty.activate
def test_mendeley_citation_list_root(self):
httpretty.register_uri(
httpretty.GET,
urlparse.urljoin(API_URL, 'folders'),
body=mock_responses['folders'],
content_type='application/json'
)
res = self.app.get(
self.project.api_url_for('mendeley_citation_list'),
auth=self.user.auth
)
root = res.json['contents'][0]
assert_equal(root['kind'], 'folder')
assert_equal(root['id'], 'ROOT')
assert_equal(root['parent_list_id'], '__')
@httpretty.activate
def test_mendeley_citation_list_non_root(self):
httpretty.register_uri(
httpretty.GET,
urlparse.urljoin(API_URL, 'folders'),
body=mock_responses['folders'],
content_type='application/json'
)
httpretty.register_uri(
httpretty.GET,
urlparse.urljoin(API_URL, 'documents'),
body=mock_responses['documents'],
content_type='application/json'
)
res = self.app.get(
self.project.api_url_for('mendeley_citation_list', mendeley_list_id='ROOT'),
auth=self.user.auth
)
children = res.json['contents']
assert_equal(len(children), 7)
assert_equal(children[0]['kind'], 'folder')
assert_equal(children[1]['kind'], 'file')
assert_true(children[1].get('csl') is not None)
@httpretty.activate
def test_mendeley_citation_list_non_linked_or_child_non_authorizer(self):
non_authorizing_user = AuthUserFactory()
self.project.add_contributor(non_authorizing_user, save=True)
self.node_addon.mendeley_list_id = 'e843da05-8818-47c2-8c37-41eebfc4fe3f'
self.node_addon.save()
httpretty.register_uri(
httpretty.GET,
urlparse.urljoin(API_URL, 'folders'),
body=mock_responses['folders'],
content_type='application/json'
)
httpretty.register_uri(
httpretty.GET,
urlparse.urljoin(API_URL, 'documents'),
body=mock_responses['documents'],
content_type='application/json'
)
res = self.app.get(
self.project.api_url_for('mendeley_citation_list', mendeley_list_id='ROOT'),
auth=non_authorizing_user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
| |
"""
* NCtoolKit (C) 2007-2017 Luis F.Loureiro, under zlib software license.
* code.google.com/p/nctoolkit
*
* Toolkit python buildscript
"""
import subprocess
import os,sys
from stat import *
import datetime
compileOptimization = '-O0' # '-O3'
def PrepareCompiteToObject(incfolders,srcfile,outfile):
fileFlag = os.path.exists(outfile)
compileFlag = False
res = None
if(fileFlag):
time_src = os.stat(srcfile)[ST_MTIME];time_obj = os.stat(outfile)[ST_MTIME]
if(time_src>time_obj):
compileFlag = True
else:
compileFlag = True
if(compileFlag):
print("Compiling : " + srcfile)
commands = ['g++',srcfile,'-c','-msse3','-mfpmath=sse','-g','-ggdb',compileOptimization,'-o',outfile];
if incfolders != None:
for ipath in incfolders:
commands.append('-I')
commands.append(ipath);
res = subprocess.Popen(commands,stdout=subprocess.PIPE,stderr=subprocess.PIPE);
return res;
def WaitForCompile(process):
process.wait()
stderr_msg = process.stderr.read()
if(process.returncode==1):
print (stderr_msg)
return False
elif (stderr_msg):
print (stderr_msg)
return True
def CompileToOBJ(incfolders,srcfile,outfile):
proc = PrepareCompiteToObject(incfolders,srcfile,outfile)
return WaitForCompile(proc);
def LinkFromOBJ(objfolder,objfiles,appfile,libraries):
fileFlag = os.path.exists(appfile)
linkFlag = False
if(fileFlag):
time_app = os.stat(appfile)[ST_MTIME]
for o in objfiles:
if(not os.path.exists(objfolder+o) or os.stat(objfolder+o)[ST_MTIME]>time_app):
linkFlag=True
break
else:
linkFlag=True
if(linkFlag):
print("Linking : " + appfile)
linkerArgs = ['g++']
for o in objfiles:
linkerArgs.append(objfolder+o)
if libraries != None:
for lib in libraries:
linkerArgs.append("-"+lib)
linkerArgs.append('-o');linkerArgs.append(appfile)
#print(linkerArgs)
res = subprocess.Popen(linkerArgs,stdout=subprocess.PIPE,stderr=subprocess.PIPE);res.wait()
stderr_msg = res.stderr.read()
if(res.returncode==1):
print (stderr_msg)
return False
elif (stderr_msg):
print (stderr_msg)
return True
def CompileSource(ROOT_SRC_FOLDER, OUT_FOLDER):
retvalue = True
SRC_FILES = [
#xml
["xml",["tinystr.cpp","tinyxml.cpp","tinyxmlerror.cpp","tinyxmlparser.cpp"],None],
#core objects
["core",["nckChronometer.cpp","nckPoint.cpp","nckQueueBuffer.cpp","nckDate.cpp",
"nckDataIO.cpp","nckImage.cpp","nckThread.cpp","nckException.cpp","nckUtils.cpp","nckWindow.cpp",
"linux/nckWindow_linux.cpp"],None],
#bxon
["bxon",["bxon.cpp","bxonDataIO.cpp"],["core","math"]],
#math objects
["math",["nckBoundBox.cpp","nckColor4.cpp","nckFrustum.cpp","nckLine.cpp","nckMat44.cpp","nckMathUtils.cpp",
"nckPlane.cpp","nckQuadtree.cpp","nckQuat.cpp","nckTransform.cpp","nckTriangle.cpp","nckVec2.cpp","nckVec3.cpp","nckVec4.cpp"],None],
#graphics objects
["graphics",["nckGraphics.cpp","nckShaderParser.cpp","nckTextureCache.cpp","gl2/nckExtensions_gl2.cpp","gl2/nckGraphics_gl2.cpp",
"gl2/nckMesh_gl2.cpp","gl2/nckShader_gl2.cpp","gl2/nckTexture_gl2.cpp","gl2/nckProxy_gl2.cpp"],["math","core"]],
#audio objects
["audio",["nckAudioDevice.cpp","alsa/nckDeviceAlsa.cpp","nckOggStream.cpp"],["core"]],
#scene objects
["scene",["nckAnimation.cpp","nckArmature.cpp","nckCamera.cpp","nckCompound.cpp","nckDatablock.cpp",
"nckLamp.cpp","nckMaterial.cpp","nckModel.cpp","nckObject.cpp","nckTexture.cpp","nckMarchingCubes.cpp",
"nckCurve.cpp","nckGeometry.cpp"],["math","core","graphics","bxon"]],
#gui objects
["gui",["nckFont.cpp","nckShapeRenderer.cpp","nckWidget.cpp","nckWidgetRenderer.cpp"],["math","core","graphics"]],
#io objects
["io",["nckSerialPort.cpp"],["core"]],
#network objects
["network",["nckHttpServer.cpp"],["core"]],
#video objects
["video",["nckCameraDevice.cpp","nckColorSpace.cpp"],["core"]],
#app
["apps",["main.cpp","nckDemo.cpp","nckDemo_Selector.cpp","demos/nckDemo_Webcam.cpp","demos/nckDemo_Triangles.cpp",
"demos/nckDemo_TextureWrapping.cpp","demos/nckDemo_TextureNPT.cpp","demos/nckDemo_TextureFiltering.cpp",
"demos/nckDemo_TextureCubemap.cpp","demos/nckDemo_Texture3D.cpp","demos/nckDemo_Texture2D.cpp","demos/nckDemo_Shadows.cpp",
"demos/nckDemo_ShaderProgram.cpp","demos/nckDemo_Serial.cpp","demos/nckDemo_Quadtree.cpp","demos/nckDemo_Particles.cpp",
"demos/nckDemo_OcclusionQuery.cpp","demos/nckDemo_MultiCamera.cpp","demos/nckDemo_MotionBlur.cpp","demos/nckDemo_Model.cpp",
"demos/nckDemo_Metaballs.cpp","demos/nckDemo_Material.cpp","demos/nckDemo_HttpStream.cpp","demos/nckDemo_HttpServer.cpp",
"demos/nckDemo_GUI.cpp","demos/nckDemo_FBO.cpp","demos/nckDemo_Curves.cpp","demos/nckDemo_CubemapFBO.cpp",
"demos/nckDemo_Compound.cpp","demos/nckDemo_Bumpmap.cpp","demos/nckDemo_AudioOut.cpp","demos/nckDemo_AudioOgg.cpp",
"demos/nckDemo_AudioFFT.cpp","demos/nckDemo_Armature.cpp","demos/nckDemo_Animation.cpp"],["bxon","xml","math","core","graphics","gui","scene","network","audio","video","bxporter","io"]]
]
previous = None
for entry in SRC_FILES:
src_fld = entry[0]+"/";
inc_fld = None;
if not entry[2] == None:
for inc_entry in entry[2]:
if inc_fld == None:
inc_fld = []
inc_fld.append(ROOT_SRC_FOLDER+inc_entry)
for src in entry[1]:
current = PrepareCompiteToObject(inc_fld,ROOT_SRC_FOLDER+src_fld+src,OUT_FOLDER + os.path.basename(src.split('.')[0] + ".o"))
if previous!=None:
if(not WaitForCompile(previous)):
retvalue = False
previous = current
if previous!=None:
if(not WaitForCompile(previous)):
retvalue = False
return retvalue
def LinkExporter(OBJ_FOLDER,OUTPUT_BINARY,LIBS):
retvalue = True
LIB_OBJ_FILES =[
# xml
"tinystr.o","tinyxml.o","tinyxmlerror.o","tinyxmlparser.o",
#bxon
"bxon.o","bxonDataIO.o",
# core objs
"nckChronometer.o","nckPoint.o","nckQueueBuffer.o","nckDataIO.o","nckDate.o","nckImage.o","nckThread.o","nckUtils.o","nckWindow.o","nckWindow_linux.o","nckException.o",
# math objs
"nckBoundBox.o","nckColor4.o","nckFrustum.o","nckLine.o","nckMat44.o","nckMathUtils.o",
"nckPlane.o","nckQuadtree.o","nckQuat.o","nckTransform.o","nckTriangle.o","nckVec2.o","nckVec3.o","nckVec4.o",
# graphics objs
"nckGraphics.o","nckTextureCache.o","nckShaderParser.o","nckExtensions_gl2.o","nckGraphics_gl2.o",
"nckMesh_gl2.o","nckShader_gl2.o","nckTexture_gl2.o","nckProxy_gl2.o",
# scene objs
"nckAnimation.o","nckArmature.o","nckCamera.o","nckCompound.o","nckDatablock.o",
"nckLamp.o","nckMaterial.o","nckModel.o","nckObject.o","nckTexture.o","nckMarchingCubes.o","nckGeometry.o","nckCurve.o",
# gui objs
"nckFont.o","nckShapeRenderer.o","nckWidget.o","nckWidgetRenderer.o",
# network obs
"nckHttpServer.o",
# audio objs
"nckAudioDevice.o","nckDeviceAlsa.o","nckOggStream.o",
# video objs
"nckCameraDevice.o","nckColorSpace.o",
# IO
"nckSerialPort.o",
# main
"main.o","nckDemo.o","nckDemo_Selector.o","nckDemo_Webcam.o","nckDemo_Triangles.o",
"nckDemo_TextureWrapping.o","nckDemo_TextureNPT.o","nckDemo_TextureFiltering.o",
"nckDemo_TextureCubemap.o","nckDemo_Texture3D.o","nckDemo_Texture2D.o","nckDemo_Shadows.o",
"nckDemo_ShaderProgram.o","nckDemo_Serial.o","nckDemo_Quadtree.o","nckDemo_Particles.o",
"nckDemo_OcclusionQuery.o","nckDemo_MultiCamera.o","nckDemo_MotionBlur.o","nckDemo_Model.o",
"nckDemo_Metaballs.o","nckDemo_Material.o","nckDemo_HttpStream.o","nckDemo_HttpServer.o",
"nckDemo_GUI.o","nckDemo_FBO.o","nckDemo_Curves.o","nckDemo_CubemapFBO.o",
"nckDemo_Compound.o","nckDemo_Bumpmap.o","nckDemo_AudioOut.o","nckDemo_AudioOgg.o",
"nckDemo_AudioFFT.o","nckDemo_Armature.o","nckDemo_Animation.o"]
LinkFromOBJ(OBJ_FOLDER,LIB_OBJ_FILES,OUTPUT_BINARY,LIBS)
return retvalue
source_folder = "../../source/"
output_folder = "obj/"
output_binary = "nctoolkit"
linker_libs = ["lGLU","lGL","lpng","ljpeg","lz","lX11","lasound","logg","lvorbisfile","lvorbis","lpthread"]
if not os.path.isdir(source_folder):
print "Source folder not found !"
exit(0);
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
print("Output folder created !");
if CompileSource(source_folder,output_folder) and LinkExporter(output_folder,output_binary,linker_libs):
print "nctoolkit compiled & linked, done !"
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
from heat.engine import translation
class Net(neutron.NeutronResource):
"""A resource for managing Neutron net.
A network is a virtual isolated layer-2 broadcast domain which is typically
reserved to the tenant who created it, unless the network has been
explicitly configured to be shared.
"""
entity = 'network'
PROPERTIES = (
NAME, VALUE_SPECS, ADMIN_STATE_UP, TENANT_ID, SHARED,
DHCP_AGENT_IDS, PORT_SECURITY_ENABLED, QOS_POLICY,
DNS_DOMAIN, TAGS,
) = (
'name', 'value_specs', 'admin_state_up', 'tenant_id', 'shared',
'dhcp_agent_ids', 'port_security_enabled', 'qos_policy',
'dns_domain', 'tags',
)
ATTRIBUTES = (
STATUS, NAME_ATTR, SUBNETS, ADMIN_STATE_UP_ATTR, TENANT_ID_ATTR,
PORT_SECURITY_ENABLED_ATTR, MTU_ATTR, QOS_POLICY_ATTR, L2_ADJACENCY,
SEGMENTS,
) = (
"status", "name", "subnets", "admin_state_up", "tenant_id",
"port_security_enabled", "mtu", 'qos_policy_id', 'l2_adjacency',
'segments',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('A string specifying a symbolic name for the network, which is '
'not required to be unique.'),
update_allowed=True
),
VALUE_SPECS: properties.Schema(
properties.Schema.MAP,
_('Extra parameters to include in the request. Parameters are '
'often specific to installed hardware or extensions.'),
default={},
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('A boolean value specifying the administrative status of the '
'network.'),
default=True,
update_allowed=True
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the tenant which will own the network. Only '
'administrative users can set the tenant identifier; this '
'cannot be changed using authorization policies.')
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether this network should be shared across all tenants. '
'Note that the default policy setting restricts usage of this '
'attribute to administrative users only.'),
default=False,
update_allowed=True
),
DHCP_AGENT_IDS: properties.Schema(
properties.Schema.LIST,
_('The IDs of the DHCP agent to schedule the network. Note that '
'the default policy setting in Neutron restricts usage of this '
'property to administrative users only.'),
update_allowed=True
),
PORT_SECURITY_ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Flag to enable/disable port security on the network. It '
'provides the default value for the attribute of the ports '
'created on this network.'),
update_allowed=True,
support_status=support.SupportStatus(version='5.0.0')
),
QOS_POLICY: properties.Schema(
properties.Schema.STRING,
_('The name or ID of QoS policy to attach to this network.'),
constraints=[
constraints.CustomConstraint('neutron.qos_policy')
],
update_allowed=True,
support_status=support.SupportStatus(version='6.0.0')
),
DNS_DOMAIN: properties.Schema(
properties.Schema.STRING,
_('DNS domain associated with this network.'),
constraints=[
constraints.CustomConstraint('dns_domain')
],
update_allowed=True,
support_status=support.SupportStatus(version='7.0.0')
),
TAGS: properties.Schema(
properties.Schema.LIST,
_('The tags to be added to the network.'),
schema=properties.Schema(properties.Schema.STRING),
update_allowed=True,
support_status=support.SupportStatus(version='9.0.0')
),
}
attributes_schema = {
STATUS: attributes.Schema(
_("The status of the network."),
type=attributes.Schema.STRING
),
NAME_ATTR: attributes.Schema(
_("The name of the network."),
type=attributes.Schema.STRING
),
SUBNETS: attributes.Schema(
_("Subnets of this network."),
type=attributes.Schema.LIST
),
ADMIN_STATE_UP_ATTR: attributes.Schema(
_("The administrative status of the network."),
type=attributes.Schema.STRING
),
TENANT_ID_ATTR: attributes.Schema(
_("The tenant owning this network."),
type=attributes.Schema.STRING
),
PORT_SECURITY_ENABLED_ATTR: attributes.Schema(
_("Port security enabled of the network."),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.BOOLEAN
),
MTU_ATTR: attributes.Schema(
_("The maximum transmission unit size(in bytes) for the network."),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.INTEGER
),
QOS_POLICY_ATTR: attributes.Schema(
_("The QoS policy ID attached to this network."),
type=attributes.Schema.STRING,
support_status=support.SupportStatus(version='6.0.0'),
),
L2_ADJACENCY: attributes.Schema(
_("A boolean value for L2 adjacency, True means that you can "
"expect L2 connectivity throughout the Network."),
type=attributes.Schema.BOOLEAN,
support_status=support.SupportStatus(version='9.0.0'),
),
SEGMENTS: attributes.Schema(
_("The segments of this network."),
type=attributes.Schema.LIST,
support_status=support.SupportStatus(version='11.0.0'),
),
}
def translation_rules(self, properties):
return [translation.TranslationRule(
properties,
translation.TranslationRule.RESOLVE,
[self.QOS_POLICY],
client_plugin=self.client_plugin(),
finder='get_qos_policy_id')]
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
dhcp_agent_ids = props.pop(self.DHCP_AGENT_IDS, None)
qos_policy = props.pop(self.QOS_POLICY, None)
tags = props.pop(self.TAGS, [])
if qos_policy:
props['qos_policy_id'] = qos_policy
net = self.client().create_network({'network': props})['network']
self.resource_id_set(net['id'])
if dhcp_agent_ids:
self._replace_dhcp_agents(dhcp_agent_ids)
if tags:
self.set_tags(tags)
def check_create_complete(self, *args):
attributes = self._show_resource()
self._store_config_default_properties(attributes)
return self.is_built(attributes)
def handle_delete(self):
try:
self.client().delete_network(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.prepare_update_properties(prop_diff)
if self.DHCP_AGENT_IDS in prop_diff:
dhcp_agent_ids = prop_diff.pop(self.DHCP_AGENT_IDS) or []
self._replace_dhcp_agents(dhcp_agent_ids)
if self.QOS_POLICY in prop_diff:
qos_policy = prop_diff.pop(self.QOS_POLICY)
prop_diff[
'qos_policy_id'] = self.client_plugin().get_qos_policy_id(
qos_policy) if qos_policy else None
if self.TAGS in prop_diff:
self.set_tags(prop_diff.pop(self.TAGS))
if prop_diff:
self.client().update_network(self.resource_id,
{'network': prop_diff})
def check_update_complete(self, *args):
attributes = self._show_resource()
return self.is_built(attributes)
def _replace_dhcp_agents(self, dhcp_agent_ids):
ret = self.client().list_dhcp_agent_hosting_networks(
self.resource_id)
old = set([agent['id'] for agent in ret['agents']])
new = set(dhcp_agent_ids)
for dhcp_agent_id in new - old:
try:
self.client().add_network_to_dhcp_agent(
dhcp_agent_id, {'network_id': self.resource_id})
except Exception as ex:
# if 409 is happened, the agent is already associated.
if not self.client_plugin().is_conflict(ex):
raise
for dhcp_agent_id in old - new:
try:
self.client().remove_network_from_dhcp_agent(
dhcp_agent_id, self.resource_id)
except Exception as ex:
# assume 2 patterns about status_code following:
# 404: the network or agent is already gone
# 409: the network isn't scheduled by the dhcp_agent
if not (self.client_plugin().is_conflict(ex) or
self.client_plugin().is_not_found(ex)):
raise
def parse_live_resource_data(self, resource_properties, resource_data):
result = super(Net, self).parse_live_resource_data(
resource_properties, resource_data)
result.pop(self.SHARED)
result[self.QOS_POLICY] = resource_data.get('qos_policy_id')
try:
dhcp = self.client().list_dhcp_agent_hosting_networks(
self.resource_id)
dhcp_agents = set([agent['id'] for agent in dhcp['agents']])
result.update({self.DHCP_AGENT_IDS: list(dhcp_agents)})
except self.client_plugin().exceptions.Forbidden:
# Just don't add dhcp_clients if we can't get values.
pass
return result
def _resolve_attribute(self, name):
if self.resource_id is None:
return
if name == self.SEGMENTS:
segments = [segment.to_dict() for segment in list(self.client(
'openstack').network.segments(network_id=self.resource_id))]
# Sort segments without name attribute first.
# See bug: https://bugs.launchpad.net/tripleo/+bug/1894920
segments.sort(key=lambda s: s['name'] is not None)
return segments
attributes = self._show_resource()
return attributes[name]
def resource_mapping():
return {
'OS::Neutron::Net': Net,
}
| |
from __future__ import division
import calendar
import copy
import math
import time
import re
import urllib
from django.http import HttpResponse
from django_conneg.decorators import renderer
from django_conneg.http import HttpResponseSeeOther
from django_conneg.views import HTMLView, JSONPView
import rdflib
from humfrey.sparql.views import StoreView
from humfrey.sparql.utils import get_labels
from humfrey.linkeddata.uri import doc_forwards
from humfrey.linkeddata.views import MappingView
from humfrey.utils.namespaces import expand, contract
from humfrey.utils import json
from .forms import SearchForm
from .query import ElasticSearchEndpoint
from .opensearch import OpenSearchView
class ElasticSearchView(HTMLView, JSONPView, MappingView, StoreView):
def get(self, request):
query = self.get_query()
if query is not None:
results = self.get_results(query)
self.context.update(results)
self.finalize_context()
return self.render()
@property
def search_endpoint(self):
try:
return self._search_endpoint
except AttributeError:
type_name = self.request.GET['type'] if re.match(r'^[a-z\-\d]+$', self.request.GET.get('type', '')) else None
self._search_endpoint = ElasticSearchEndpoint(self.store, type_name)
return self._search_endpoint
def get_query(self):
return None
def get_results(self, query):
results = self.search_endpoint.query(query)
results['q'] = query
for hit in results['hits']['hits']:
try:
hit['_url'] = doc_forwards(hit['_source']['uri'])[None]
except KeyError:
raise
return results
@classmethod
def strip_underscores(cls, value):
if isinstance(value, dict):
for key, subvalue in value.items():
if key.startswith('_'):
value[key[1:]] = value.pop(key)
cls.strip_underscores(subvalue)
if isinstance(value, list):
for subvalue in value:
cls.strip_underscores(subvalue)
def finalize_context(self):
pass
@renderer(format="html", mimetypes=('text/html', 'application/xhtml+xml'), priority=1, name='HTML')
def render_html(self, request, context, template_name):
if 'hits' in context:
self.strip_underscores(context['hits'])
return super(ElasticSearchView, self).render_html(request, context, template_name)
@renderer(format="autocomplete", name="Autocomplete JSON")
def render_autocomplete(self, request, context, template_name):
if not context.get('hits'):
raise self.MissingQuery()
context = [{'value': hit['_source']['uri'],
'altNames': '\t'.join(l for l in hit['_source'].get('altLabel', []) + hit['_source'].get('hiddenLabel', [])),
'label': hit['_source']['label']} for hit in context['hits']['hits']]
content, mimetype = json.dumps(context), 'application/json'
if 'callback' in request.GET:
content, mimetype = [request.GET['callback'], '(', content, ');'], 'application/javascript'
return HttpResponse(content, mimetype=mimetype)
def error(self, request, exception, args, kwargs, status_code):
if isinstance(exception, self.MissingQuery):
return self.error_view(request,
{'error': {'status_code': 400,
'message': "Missing 'q' parameter."}},
'elasticsearch/bad_request')
else:
return super(SearchView, self).error(request, exception, args, kwargs, status_code)
class SearchView(HTMLView, JSONPView, MappingView, OpenSearchView, StoreView):
page_size = 10
# e.g. {'filter.category.uri': ('filter.subcategory.uri',)}
dependent_parameters = {}
facets = {'type': {'terms': {'field': 'type.uri',
'size': 20}}}
template_name = 'elasticsearch/search'
default_search_item_template_name = 'elasticsearch/search_item'
default_types = None
class MissingQuery(Exception):
pass
@classmethod
def strip_underscores(cls, value):
if isinstance(value, dict):
for key, subvalue in value.items():
if key.startswith('_'):
value[key[1:]] = value.pop(key)
cls.strip_underscores(subvalue)
if isinstance(value, list):
for subvalue in value:
cls.strip_underscores(subvalue)
@property
def search_endpoint(self):
try:
return self._search_endpoint
except AttributeError:
type_name = self.request.GET['type'] if re.match(r'^[a-z\-\d]+$', self.request.GET.get('type', '')) else None
self._search_endpoint = ElasticSearchEndpoint(self.store, type_name)
return self._search_endpoint
def get(self, request):
context = self.context
form = SearchForm(request.GET or None)
context.update({'form': form,
'base_url': request.build_absolute_uri(),
'dependent_parameters': self.dependent_parameters,
'default_search_item_template_name': self.default_search_item_template_name})
if form.is_valid():
self.context.update(self.get_results(dict((k, request.GET[k]) for k in request.GET),
form.cleaned_data))
if context['page'] > context['page_count']:
query = copy.copy(request.GET)
query['page'] = context['page_count']
query = urllib.urlencode(query)
return HttpResponseSeeOther('{path}?{query}'.format(path=request.path,
query=query))
context = self.finalize_context(request, context)
return self.render()
def finalize_context(self, request, context):
return context
def get_query(self, parameters, cleaned_data, start, page_size):
default_operator = parameters.get('default_operator', '').upper()
if default_operator not in ('AND', 'OR'):
default_operator = 'AND'
query = {
'query': {'query_string': {'query': cleaned_data['q'],
'default_operator': default_operator}},
'from': start,
'size': page_size,
# A blank conjunctive filter. We'll remove this later if necessary.
'filter': {'and': []},
}
# Parse query parameters of the form 'FTYPE.FIELDNAME'.
filter_fields = set()
for key, values in self.request.GET.lists():
if '.' not in key:
continue
ftype, field = key.split('.', 1)
filters = []
for value in values:
if not value:
continue
if ftype == 'filter':
if value == '-':
filter = {'missing': {'field': field}}
else:
if field.endswith('.uri') and ':' in value:
value = expand(value)
filter = {'term': {field: value}}
elif ftype == 'not':
if field.endswith('.uri') and ':' in value:
value = expand(value)
filter = {'not': {'term': {field: value}}}
elif ftype in ('gt', 'gte', 'lt', 'lte'):
if value == 'now':
value = int(calendar.timegm(time.gmtime()) * 1000)
filter = {'range': {field : {ftype: value}}}
else:
continue
filters.append(filter)
if len(filters) == 1:
query['filter']['and'].append(filters[0])
elif len(filters) > 1:
query['filter']['and'].append({'or': filters})
else:
continue
filter_fields.add(field)
if self.facets:
# Copy the facet definitions as we'll be playing with them shortly.
facets = copy.deepcopy(self.facets)
# Add facet filters for all active filters except any acting on this
# particular facet.
if 'filter' in query:
for facet in facets.itervalues():
for filter in query['filter']['and']:
if facet['terms']['field'] not in filter_fields:
if 'facet_filter' not in facet:
facet['facet_filter'] = {'and': []}
facet['facet_filter']['and'].append(filter)
query['facets'] = facets
# If default_types set, add a filter to restrict the results.
if self.default_types and 'type' not in self.request.GET:
query['filter']['and'].append({'or': [{'type': {'value': t}} for t in self.default_types]})
if not query['filter']['and']:
del query['filter']['and']
if not query['filter']:
del query['filter']
return query
def get_results(self, parameters, cleaned_data):
page = cleaned_data.get('page') or 1
page_size = cleaned_data.get('page_size') or self.page_size
start = (page - 1) * page_size
query = self.get_query(parameters, cleaned_data, start, page_size)
# If there aren't any filters defined, we don't want a filter part of
# our query.
if 'filter' in query:
if 'and' in query['filter'] and not query['filter']['and']:
del query['filter']['and']
if not query['filter']:
del query['filter']
results = self.search_endpoint.query(query)
results.update(self.get_pagination(page_size, page, start, results))
results['q'] = cleaned_data['q']
facet_labels = set()
for key in query['facets']:
meta = results['facets'][key]['meta'] = query['facets'][key]
filter_value = parameters.get('filter.%s' % query['facets'][key]['terms']['field'])
results['facets'][key]['filter'] = {'present': filter_value is not None,
'value': filter_value}
if meta['terms']['field'].endswith('.uri'):
for term in results['facets'][key]['terms']:
facet_labels.add(term['term'])
term['value'] = contract(term['term'])
else:
for term in results['facets'][key]['terms']:
term['value'] = term['term']
labels = get_labels(map(rdflib.URIRef, facet_labels), endpoint=self.endpoint)
for key in query['facets']:
if results['facets'][key]['meta']['terms']['field'].endswith('.uri'):
for term in results['facets'][key]['terms']:
uri = rdflib.URIRef(term['term'])
if uri in labels:
term['label'] = unicode(labels[uri])
for hit in results['hits']['hits']:
try:
hit['_url'] = doc_forwards(hit['_source']['uri'])[None]
except KeyError:
raise
return results
def get_pagination(self, page_size, page, start, results):
page_count = int(math.ceil(results['hits']['total'] / page_size))
pages = set([1, page_count])
pages.update(p for p in range(page-5, page+6) if 1 <= p <= page_count)
pages = sorted(pages)
pages_out = []
for p in pages:
if pages_out and pages_out[-1] != p - 1:
pages_out.append(None)
pages_out.append(p)
return {'page_count': max(1, page_count),
'start': start + 1,
'pages': pages_out,
'page': page,
'page_size': page_size}
@renderer(format="html", mimetypes=('text/html', 'application/xhtml+xml'), priority=1, name='HTML')
def render_html(self, request, context, template_name):
if 'hits' in context:
self.strip_underscores(context['hits'])
return super(SearchView, self).render_html(request, context, template_name)
@renderer(format="autocomplete", name="Autocomplete JSON")
def render_autocomplete(self, request, context, template_name):
if not context.get('hits'):
raise self.MissingQuery()
context = [{'value': hit['_source']['uri'],
'altNames': '\t'.join(l for l in hit['_source'].get('altLabel', []) + hit['_source'].get('hiddenLabel', [])),
'label': hit['_source']['label']} for hit in context['hits']['hits']]
content, mimetype = json.dumps(context), 'application/json'
if 'callback' in request.GET:
content, mimetype = [request.GET['callback'], '(', content, ');'], 'application/javascript'
return HttpResponse(content, mimetype=mimetype)
def error(self, request, exception, args, kwargs, status_code):
if isinstance(exception, self.MissingQuery):
return self.error_view(request,
{'error': {'status_code': 400,
'message': "Missing 'q' parameter."}},
'elasticsearch/bad_request')
else:
return super(SearchView, self).error(request, exception, args, kwargs, status_code)
| |
#!/usr/bin/env python
"""Test for client."""
import array
import pdb
import StringIO
import time
import urllib2
from M2Crypto import X509
import logging
from grr.client import actions
from grr.client import client
from grr.client import comms
from grr.lib import aff4
from grr.lib import communicator
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import queues
from grr.lib import rdfvalue
from grr.lib import stats
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.flows.general import ca_enroller
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import crypto as rdf_crypto
from grr.lib.rdfvalues import flows as rdf_flows
# pylint: mode=test
class ServerCommunicatorFake(flow.ServerCommunicator):
"""A fake communicator to initialize the ServerCommunicator."""
# For tests we bypass loading of the server certificate.
def _LoadOurCertificate(self):
return communicator.Communicator._LoadOurCertificate(self)
class ClientCommsTest(test_lib.GRRBaseTest):
"""Test the communicator."""
def setUp(self):
"""Set up communicator tests."""
test_lib.GRRBaseTest.setUp(self)
self.client_private_key = config_lib.CONFIG["Client.private_key"]
self.server_serial_number = 0
self.server_certificate = config_lib.CONFIG["Frontend.certificate"]
self.server_private_key = config_lib.CONFIG["PrivateKeys.server_key"]
self.client_communicator = comms.ClientCommunicator(
private_key=self.client_private_key)
self.client_communicator.LoadServerCertificate(
server_certificate=self.server_certificate,
ca_certificate=config_lib.CONFIG["CA.certificate"])
self.server_communicator = ServerCommunicatorFake(
certificate=self.server_certificate,
private_key=self.server_private_key,
token=self.token)
self.last_urlmock_error = None
def ClientServerCommunicate(self, timestamp=None):
"""Tests the end to end encrypted communicators."""
message_list = rdf_flows.MessageList()
for i in range(1, 11):
message_list.job.Append(
session_id=rdfvalue.SessionID(base="aff4:/flows",
queue=queues.FLOWS,
flow_name=i),
name="OMG it's a string")
result = rdf_flows.ClientCommunication()
timestamp = self.client_communicator.EncodeMessages(message_list, result,
timestamp=timestamp)
self.cipher_text = result.SerializeToString()
(decoded_messages, source, client_timestamp) = (
self.server_communicator.DecryptMessage(self.cipher_text))
self.assertEqual(source, self.client_communicator.common_name)
self.assertEqual(client_timestamp, timestamp)
self.assertEqual(len(decoded_messages), 10)
for i in range(1, 11):
self.assertEqual(decoded_messages[i-1].session_id,
rdfvalue.SessionID(base="aff4:/flows",
queue=queues.FLOWS,
flow_name=i))
return decoded_messages
def testCommunications(self):
"""Test that messages from unknown clients are tagged unauthenticated."""
decoded_messages = self.ClientServerCommunicate()
for i in range(len(decoded_messages)):
self.assertEqual(decoded_messages[i].auth_state,
rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED)
def MakeClientAFF4Record(self):
"""Make a client in the data store."""
cert = self.ClientCertFromPrivateKey(self.client_private_key)
client_cert = rdf_crypto.RDFX509Cert(cert.as_pem())
new_client = aff4.FACTORY.Create(client_cert.common_name, "VFSGRRClient",
token=self.token)
new_client.Set(new_client.Schema.CERT, client_cert)
new_client.Close()
def testKnownClient(self):
"""Test that messages from known clients are authenticated."""
self.MakeClientAFF4Record()
# Now the server should know about it
decoded_messages = self.ClientServerCommunicate()
for i in range(len(decoded_messages)):
self.assertEqual(decoded_messages[i].auth_state,
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
def testServerReplayAttack(self):
"""Test that replaying encrypted messages to the server invalidates them."""
self.MakeClientAFF4Record()
# First send some messages to the server
decoded_messages = self.ClientServerCommunicate()
self.assertEqual(decoded_messages[0].auth_state,
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
# Now replay the last message to the server
(decoded_messages, _, _) = self.server_communicator.DecryptMessage(
self.cipher_text)
# Messages should now be tagged as desynced
self.assertEqual(decoded_messages[0].auth_state,
rdf_flows.GrrMessage.AuthorizationState.DESYNCHRONIZED)
def testCompression(self):
"""Tests that the compression works."""
config_lib.CONFIG.Set("Network.compression", "UNCOMPRESSED")
self.testCommunications()
uncompressed_len = len(self.cipher_text)
# If the client compresses, the server should still be able to
# parse it:
config_lib.CONFIG.Set("Network.compression", "ZCOMPRESS")
self.testCommunications()
compressed_len = len(self.cipher_text)
self.assert_(compressed_len < uncompressed_len)
# If we chose a crazy compression scheme, the client should not
# compress.
config_lib.CONFIG.Set("Network.compression", "SOMECRAZYCOMPRESSION")
self.testCommunications()
compressed_len = len(self.cipher_text)
self.assertEqual(compressed_len, uncompressed_len)
def testX509Verify(self):
"""X509 Verify can have several failure paths."""
# This is a successful verify.
with utils.Stubber(X509.X509, "verify", lambda self, pkey=None: 1):
self.client_communicator.LoadServerCertificate(
self.server_certificate, config_lib.CONFIG["CA.certificate"])
# Mock the verify function to simulate certificate failures.
X509.X509.verify = lambda self, pkey=None: 0
self.assertRaises(
IOError, self.client_communicator.LoadServerCertificate,
self.server_certificate, config_lib.CONFIG["CA.certificate"])
# Verification can also fail with a -1 error.
X509.X509.verify = lambda self, pkey=None: -1
self.assertRaises(
IOError, self.client_communicator.LoadServerCertificate,
self.server_certificate, config_lib.CONFIG["CA.certificate"])
def testErrorDetection(self):
"""Tests the end to end encrypted communicators."""
# Install the client - now we can verify its signed messages
self.MakeClientAFF4Record()
# Make something to send
message_list = rdf_flows.MessageList()
for i in range(0, 10):
message_list.job.Append(session_id=str(i))
result = rdf_flows.ClientCommunication()
self.client_communicator.EncodeMessages(message_list, result)
cipher_text = result.SerializeToString()
# Depending on this modification several things may happen:
# 1) The padding may not match which will cause a decryption exception.
# 2) The protobuf may fail to decode causing a decoding exception.
# 3) The modification may affect the signature resulting in UNAUTHENTICATED
# messages.
# 4) The modification may have no effect on the data at all.
for x in range(0, len(cipher_text), 50):
# Futz with the cipher text (Make sure it's really changed)
mod_cipher_text = (cipher_text[:x] +
chr((ord(cipher_text[x]) % 250) + 1) +
cipher_text[x + 1:])
try:
decoded, client_id, _ = self.server_communicator.DecryptMessage(
mod_cipher_text)
for i, message in enumerate(decoded):
# If the message is actually authenticated it must not be changed!
if message.auth_state == message.AuthorizationState.AUTHENTICATED:
self.assertEqual(message.source, client_id)
# These fields are set by the decoder and are not present in the
# original message - so we clear them before comparison.
message.auth_state = None
message.source = None
self.assertRDFValueEqual(message, message_list.job[i])
else:
logging.debug("Message %s: Authstate: %s", i, message.auth_state)
except communicator.DecodingError as e:
logging.debug("Detected alteration at %s: %s", x, e)
def testEnrollingCommunicator(self):
"""Test that the ClientCommunicator generates good keys."""
self.client_communicator = comms.ClientCommunicator(
certificate="")
self.client_communicator.LoadServerCertificate(
self.server_certificate, config_lib.CONFIG["CA.certificate"])
req = X509.load_request_string(self.client_communicator.GetCSR())
# Verify that the CN is of the correct form
public_key = req.get_pubkey().get_rsa().pub()[1]
cn = rdf_client.ClientURN.FromPublicKey(public_key)
self.assertEqual(cn, req.get_subject().CN)
class HTTPClientTests(test_lib.GRRBaseTest):
"""Test the http communicator."""
def setUp(self):
"""Set up communicator tests."""
super(HTTPClientTests, self).setUp()
self.certificate = self.ClientCertFromPrivateKey(
config_lib.CONFIG["Client.private_key"]).as_pem()
self.server_serial_number = 0
self.server_private_key = config_lib.CONFIG["PrivateKeys.server_key"]
self.server_certificate = config_lib.CONFIG["Frontend.certificate"]
self.client_cn = rdf_crypto.RDFX509Cert(self.certificate).common_name
# Make a new client
self.CreateNewClientObject()
# The housekeeper threads of the time based caches also call time.time and
# interfere with some tests so we disable them here.
utils.InterruptableThread.exit = True
# The same also applies to the StatsCollector thread.
stats.StatsCollector.exit = True
# Make a client mock
self.client = aff4.FACTORY.Create(self.client_cn, "VFSGRRClient", mode="rw",
token=self.token)
self.client.Set(self.client.Schema.CERT(self.certificate))
self.client.Flush()
# Stop the client from actually processing anything
config_lib.CONFIG.Set("Client.max_out_queue", 0)
# And cache it in the server
self.CreateNewServerCommunicator()
self.urlopen = urllib2.urlopen
urllib2.urlopen = self.UrlMock
self.messages = []
ca_enroller.enrolment_cache.Flush()
# Response to send back to clients.
self.server_response = dict(session_id="aff4:/W:session", name="Echo",
response_id=2)
def CreateNewServerCommunicator(self):
self.server_communicator = ServerCommunicatorFake(
certificate=self.server_certificate,
private_key=self.server_private_key,
token=self.token)
self.server_communicator.client_cache.Put(
self.client_cn, self.client)
def tearDown(self):
urllib2.urlopen = self.urlopen
super(HTTPClientTests, self).tearDown()
def CreateNewClientObject(self):
self.client_communicator = comms.GRRHTTPClient(
ca_cert=config_lib.CONFIG["CA.certificate"],
worker=comms.GRRClientWorker)
# Disable stats collection for tests.
self.client_communicator.client_worker.last_stats_sent_time = (
time.time() + 3600)
# Build a client context with preloaded server certificates
self.client_communicator.communicator.LoadServerCertificate(
self.server_certificate, config_lib.CONFIG["CA.certificate"])
def UrlMock(self, req, num_messages=10, **kwargs):
"""A mock for url handler processing from the server's POV."""
if "server.pem" in req.get_full_url():
return StringIO.StringIO(config_lib.CONFIG["Frontend.certificate"])
_ = kwargs
try:
self.client_communication = rdf_flows.ClientCommunication(req.data)
# Decrypt incoming messages
self.messages, source, ts = self.server_communicator.DecodeMessages(
self.client_communication)
# Make sure the messages are correct
self.assertEqual(source, self.client_cn)
for i, message in enumerate(self.messages):
# Do not check any status messages.
if message.request_id:
self.assertEqual(message.response_id, i)
self.assertEqual(message.request_id, 1)
self.assertEqual(message.session_id, "aff4:/W:session")
# Now prepare a response
response_comms = rdf_flows.ClientCommunication()
message_list = rdf_flows.MessageList()
for i in range(0, num_messages):
message_list.job.Append(request_id=i, **self.server_response)
# Preserve the timestamp as a nonce
self.server_communicator.EncodeMessages(
message_list, response_comms, destination=source,
timestamp=ts, api_version=self.client_communication.api_version)
return StringIO.StringIO(response_comms.SerializeToString())
except communicator.UnknownClientCert:
raise urllib2.HTTPError(url=None, code=406, msg=None, hdrs=None, fp=None)
except Exception as e:
logging.info("Exception in mock urllib2.Open: %s.", e)
self.last_urlmock_error = e
if flags.FLAGS.debug:
pdb.post_mortem()
raise urllib2.HTTPError(url=None, code=500, msg=None, hdrs=None, fp=None)
def CheckClientQueue(self):
"""Checks that the client context received all server messages."""
# Check the incoming messages
self.assertEqual(self.client_communicator.client_worker.InQueueSize(), 10)
for i, message in enumerate(
self.client_communicator.client_worker._in_queue):
# This is the common name embedded in the certificate.
self.assertEqual(message.source, "aff4:/GRR Test Server")
self.assertEqual(message.response_id, 2)
self.assertEqual(message.request_id, i)
self.assertEqual(message.session_id, "aff4:/W:session")
self.assertEqual(message.auth_state,
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
# Clear the queue
self.client_communicator.client_worker._in_queue = []
def SendToServer(self):
"""Schedule some packets from client to server."""
# Generate some client traffic
for i in range(0, 10):
self.client_communicator.client_worker.SendReply(
rdf_flows.GrrStatus(),
session_id=rdfvalue.SessionID("W:session"),
response_id=i, request_id=1)
def testInitialEnrollment(self):
"""If the client has no certificate initially it should enroll."""
# Clear the certificate so we can generate a new one.
config_lib.CONFIG.Set("Client.private_key", "")
self.CreateNewClientObject()
# Client should get a new Common Name.
self.assertNotEqual(self.client_cn,
self.client_communicator.communicator.common_name)
self.client_cn = self.client_communicator.communicator.common_name
# Now communicate with the server.
status = self.client_communicator.RunOnce()
self.assertEqual(status.code, 406)
# The client should now send an enrollment request.
status = self.client_communicator.RunOnce()
# Client should generate enrollment message by itself.
self.assertEqual(len(self.messages), 1)
self.assertEqual(self.messages[0].session_id,
ca_enroller.Enroler.well_known_session_id)
def testEnrollment(self):
"""Test the http response to unknown clients."""
# We start off with the server not knowing about the client at all.
self.server_communicator.client_cache.Flush()
# Assume we do not know the client yet by clearing its certificate.
self.client = aff4.FACTORY.Create(self.client_cn, "VFSGRRClient", mode="rw",
token=self.token)
self.client.DeleteAttribute(self.client.Schema.CERT)
self.client.Flush()
# Now communicate with the server.
self.SendToServer()
status = self.client_communicator.RunOnce()
# We expect to receive a 406 and all client messages will be tagged as
# UNAUTHENTICATED.
self.assertEqual(status.code, 406)
self.assertEqual(len(self.messages), 10)
self.assertEqual(self.messages[0].auth_state,
rdf_flows.GrrMessage.AuthorizationState.UNAUTHENTICATED)
# The next request should be an enrolling request.
status = self.client_communicator.RunOnce()
self.assertEqual(len(self.messages), 11)
self.assertEqual(self.messages[-1].session_id,
ca_enroller.Enroler.well_known_session_id)
# Now we manually run the enroll well known flow with the enrollment
# request. This will start a new flow for enrolling the client, sign the
# cert and add it to the data store.
flow_obj = aff4.Enroler(aff4.Enroler.well_known_session_id, mode="rw",
token=self.token)
flow_obj.ProcessMessage(self.messages[-1])
# The next client communication should be enrolled now.
status = self.client_communicator.RunOnce()
self.assertEqual(status.code, 200)
# There should be a cert for the client right now.
self.client = aff4.FACTORY.Create(self.client_cn, "VFSGRRClient", mode="rw",
token=self.token)
self.assertTrue(self.client.Get(self.client.Schema.CERT))
# Now communicate with the server once again.
self.SendToServer()
status = self.client_communicator.RunOnce()
self.assertEqual(status.code, 200)
def testReboots(self):
"""Test the http communication with reboots."""
# Now we add the new client record to the server cache
self.SendToServer()
self.client_communicator.RunOnce()
self.CheckClientQueue()
# Simulate the client rebooted
self.CreateNewClientObject()
self.SendToServer()
self.client_communicator.RunOnce()
self.CheckClientQueue()
# Simulate the server rebooting
self.CreateNewServerCommunicator()
self.server_communicator.client_cache.Put(
self.client_cn, self.client)
self.SendToServer()
self.client_communicator.RunOnce()
self.CheckClientQueue()
def _CheckFastPoll(self, require_fastpoll, expected_sleeptime):
sleeptime = []
def RecordSleep(_, interval, **unused_kwargs):
sleeptime.append(interval)
self.server_response = dict(session_id="aff4:/W:session", name="Echo",
response_id=2, priority="LOW_PRIORITY",
require_fastpoll=require_fastpoll)
# Make sure we don't have any output messages that might override the
# fastpoll setting from the input messages we send
self.assertEqual(self.client_communicator.client_worker.OutQueueSize(), 0)
status = self.client_communicator.RunOnce()
self.assertEqual(status.received_count, 10)
self.assertEqual(status.require_fastpoll, require_fastpoll)
with utils.Stubber(comms.GRRHTTPClient, "Sleep", RecordSleep):
self.client_communicator.Wait(status)
self.assertEqual(len(sleeptime), 1)
self.assertEqual(sleeptime[0], expected_sleeptime)
self.CheckClientQueue()
def testNoFastPoll(self):
"""Test the the fast poll False is respected on input messages.
Also make sure we wait the correct amount of time before next poll.
"""
self._CheckFastPoll(False, config_lib.CONFIG["Client.poll_max"])
def testFastPoll(self):
"""Test the the fast poll True is respected on input messages.
Also make sure we wait the correct amount of time before next poll.
"""
self._CheckFastPoll(True, config_lib.CONFIG["Client.poll_min"])
def testCachedRSAOperations(self):
"""Make sure that expensive RSA operations are cached."""
# First time fill the cache.
self.SendToServer()
self.client_communicator.RunOnce()
self.CheckClientQueue()
metric_value = stats.STATS.GetMetricValue("grr_rsa_operations")
self.assert_(metric_value > 0)
for _ in range(100):
self.SendToServer()
self.client_communicator.RunOnce()
self.CheckClientQueue()
# There should not have been any expensive operations any more
self.assertEqual(stats.STATS.GetMetricValue("grr_rsa_operations"),
metric_value)
def testCorruption(self):
"""Simulate corruption of the http payload."""
self.corruptor_field = None
def Corruptor(req, **_):
"""Futz with some of the fields."""
self.client_communication = rdf_flows.ClientCommunication(req.data)
if self.corruptor_field:
field_data = getattr(self.client_communication, self.corruptor_field)
modified_data = array.array("c", field_data)
offset = len(field_data) / 2
modified_data[offset] = chr((ord(field_data[offset]) % 250) + 1)
setattr(self.client_communication, self.corruptor_field,
str(modified_data))
# Make sure we actually changed the data.
self.assertNotEqual(field_data, modified_data)
req.data = self.client_communication.SerializeToString()
return self.UrlMock(req)
with utils.Stubber(urllib2, "urlopen", Corruptor):
self.SendToServer()
status = self.client_communicator.RunOnce()
self.assertEqual(status.code, 200)
for field in ["packet_iv", "encrypted"]:
# Corrupting each field should result in HMAC verification errors.
self.corruptor_field = field
self.SendToServer()
status = self.client_communicator.RunOnce()
self.assertEqual(status.code, 500)
self.assertTrue(
"HMAC verification failed" in str(self.last_urlmock_error))
# Corruption of these fields will likely result in RSA errors, since we do
# the RSA operations before the HMAC verification (in order to recover the
# hmac key):
for field in ["encrypted_cipher", "encrypted_cipher_metadata"]:
# Corrupting each field should result in HMAC verification errors.
self.corruptor_field = field
self.SendToServer()
status = self.client_communicator.RunOnce()
self.assertEqual(status.code, 500)
def testClientRetransmission(self):
"""Test that client retransmits failed messages."""
fail = True
num_messages = 10
def FlakyServer(req):
if not fail:
return self.UrlMock(req, num_messages=num_messages)
raise urllib2.HTTPError(url=None, code=500, msg=None, hdrs=None, fp=None)
with utils.Stubber(urllib2, "urlopen", FlakyServer):
self.SendToServer()
status = self.client_communicator.RunOnce()
self.assertEqual(status.code, 500)
# Server should not receive anything.
self.assertEqual(len(self.messages), 0)
# Try to send these messages again.
fail = False
self.assertEqual(self.client_communicator.client_worker.InQueueSize(), 0)
status = self.client_communicator.RunOnce()
self.assertEqual(status.code, 200)
# We have received 10 client messages.
self.assertEqual(self.client_communicator.client_worker.InQueueSize(), 10)
self.CheckClientQueue()
# But we don't send anything to the server on the first successful
# connection.
self.assertEqual(len(self.messages), 0)
# There are no more messages coming in from the server.
num_messages = 0
status = self.client_communicator.RunOnce()
self.assertEqual(status.code, 200)
self.assertEqual(self.client_communicator.client_worker.InQueueSize(), 0)
# Server should have received 10 messages this time.
self.assertEqual(len(self.messages), 10)
def testClientStatsCollection(self):
"""Tests that the client stats are collected automatically."""
now = 1000000
# Pretend we have already sent stats.
self.client_communicator.client_worker.last_stats_sent_time = (
rdfvalue.RDFDatetime().FromSecondsFromEpoch(now))
with test_lib.FakeTime(now):
self.client_communicator.client_worker.CheckStats()
runs = []
action_cls = actions.ActionPlugin.classes.get("GetClientStatsAuto")
with utils.Stubber(action_cls, "Run", lambda cls, _: runs.append(1)):
# No stats collection after 10 minutes.
with test_lib.FakeTime(now + 600):
self.client_communicator.client_worker.CheckStats()
self.assertEqual(len(runs), 0)
# Let one hour pass.
with test_lib.FakeTime(now + 3600):
self.client_communicator.client_worker.CheckStats()
# This time the client should collect stats.
self.assertEqual(len(runs), 1)
# Let one hour and ten minutes pass.
with test_lib.FakeTime(now + 3600 + 600):
self.client_communicator.client_worker.CheckStats()
# Again, there should be no stats collection, as last collection
# happened less than an hour ago.
self.assertEqual(len(runs), 1)
def testClientStatsCollectionHappensEveryMinuteWhenClientIsBusy(self):
"""Tests that client stats are collected more often when client is busy."""
now = 1000000
# Pretend we have already sent stats.
self.client_communicator.client_worker.last_stats_sent_time = (
rdfvalue.RDFDatetime().FromSecondsFromEpoch(now))
self.client_communicator.client_worker._is_active = True
with test_lib.FakeTime(now):
self.client_communicator.client_worker.CheckStats()
runs = []
action_cls = actions.ActionPlugin.classes.get("GetClientStatsAuto")
with utils.Stubber(action_cls, "Run", lambda cls, _: runs.append(1)):
# No stats collection after 30 seconds.
with test_lib.FakeTime(now + 30):
self.client_communicator.client_worker.CheckStats()
self.assertEqual(len(runs), 0)
# Let 61 seconds pass.
with test_lib.FakeTime(now + 61):
self.client_communicator.client_worker.CheckStats()
# This time the client should collect stats.
self.assertEqual(len(runs), 1)
# No stats collection within one minute from the last time.
with test_lib.FakeTime(now + 61 + 59):
self.client_communicator.client_worker.CheckStats()
self.assertEqual(len(runs), 1)
# Stats collection happens as more than one minute has passed since the
# last one.
with test_lib.FakeTime(now + 61 + 61):
self.client_communicator.client_worker.CheckStats()
self.assertEqual(len(runs), 2)
def testClientStatsCollectionAlwaysHappensAfterHandleMessage(self):
"""Tests that client stats are collected more often when client is busy."""
now = 1000000
# Pretend we have already sent stats.
self.client_communicator.client_worker.last_stats_sent_time = (
rdfvalue.RDFDatetime().FromSecondsFromEpoch(now))
with test_lib.FakeTime(now):
self.client_communicator.client_worker.CheckStats()
runs = []
action_cls = actions.ActionPlugin.classes.get("GetClientStatsAuto")
with utils.Stubber(action_cls, "Run", lambda cls, _: runs.append(1)):
# No stats collection after 30 seconds.
with test_lib.FakeTime(now + 30):
self.client_communicator.client_worker.CheckStats()
self.assertEqual(len(runs), 0)
self.client_communicator.client_worker.HandleMessage(
rdf_flows.GrrMessage())
# HandleMessage was called, but one minute hasn't passed, so
# stats should not be sent.
with test_lib.FakeTime(now + 59):
self.client_communicator.client_worker.CheckStats()
self.assertEqual(len(runs), 0)
# HandleMessage was called more than one minute ago, so stats
# should be sent.
with test_lib.FakeTime(now + 61):
self.client_communicator.client_worker.CheckStats()
self.assertEqual(len(runs), 1)
def RaiseError(self, request, timeout=0):
raise urllib2.URLError("Not a real connection.")
def testClientConnectionErrors(self):
client_obj = client.GRRClient()
# Make the connection unavailable and skip the retry interval.
with utils.MultiStubber((urllib2, "urlopen", self.RaiseError),
(time, "sleep", lambda s: None)):
# Simulate a client run but keep control.
generator = client_obj.client.Run()
for _ in range(config_lib.CONFIG["Client.connection_error_limit"]):
generator.next()
# One too many connection errors, this should raise.
self.assertRaises(RuntimeError, generator.next)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| |
"""Models for the aps_production app."""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import timedelta
from django.utils.translation import ugettext_lazy as _
class Error(models.Model):
"""
An actual error, that occurred
:order_run: on which order run this error occurred
:error_bin: which error bin this belongs to
:quantity: how often this error occurred during the run
:comment: additional notes
"""
order_run = models.ForeignKey(
'aps_production.OrderRun',
verbose_name=_('order run'),
related_name='errors',
)
error_bin = models.ForeignKey(
'aps_production.ErrorBin',
verbose_name=_('error bin'),
related_name='errors',
)
quantity = models.PositiveIntegerField(
verbose_name=_('quantity'),
)
comment = models.TextField(
verbose_name=_('comment'),
blank=True,
)
class ErrorBin(models.Model):
"""
Master data containing all possible errors
:technology: the technology the error is related to
:error_code: an identifier for this error
:description: a further description about this error
:picture: ???
"""
technology = models.ForeignKey(
'aps_bom.Technology',
verbose_name=_('technology'),
related_name='error_bins',
)
error_code = models.CharField(
verbose_name=_('error code'),
max_length=64,
)
description = models.TextField(
verbose_name=_('description'),
blank=True,
)
picture = models.ImageField(
verbose_name=_('picture'),
blank=True, null=True,
upload_to='error_bin_pictures',
)
class Meta:
ordering = ('error_code', )
@python_2_unicode_compatible
class Order(models.Model):
"""
An order placed by a customer.
:order_number: unique string identifier for this order
:company: the company, that placed the order
:date_created: the date, this order was created
:customer_po_number: ???
:customer_po_date: ???
"""
order_number = models.CharField(
verbose_name=_('order number'),
max_length=64,
unique=True,
)
company = models.ForeignKey(
'aps_bom.Company',
verbose_name=_('company'),
related_name='orders',
)
date_created = models.DateTimeField(
verbose_name=_('date created'),
auto_now_add=True,
)
customer_po_number = models.CharField(
verbose_name=_('customer po number'),
max_length=30,
)
customer_po_date = models.DateTimeField(
verbose_name=_('customer po date'),
)
def __str__(self):
return self.order_number
class Meta:
ordering = ('order_number', )
class OrderLine(models.Model):
"""
Represents one line of an ``Order``.
:order: The order this line belongs to.
:line_no: the line number as entered by the planner
:product: the product, this line is about
:quantity_ordered: how many of this product, the customer orders
:date_requested: when the customer requested the product
computed:
:date_shipped: When *all* items are shipped
:date_delivered: The date of the last shipment plus the days it needs to
ship to the customer
"""
order = models.ForeignKey(
Order,
verbose_name=_('order'),
related_name='order_lines',
)
line_no = models.CharField(
verbose_name=_('line no'),
max_length=4,
)
product = models.ForeignKey(
'aps_bom.Product',
verbose_name=_('product'),
related_name='order_lines',
)
quantity_ordered = models.PositiveIntegerField(
verbose_name=_('quantity ordered'),
)
date_requested = models.DateTimeField(
verbose_name=_('date requested'),
)
@property
def date_shipped(self):
shipments = Shipment.objects.filter(
order_run__in=self.order_runs.all(),
# order_run__is_open=False,
)
if not shipments or shipments.filter(order_run__is_open=True).exists():
return None
# if the sum of all shipped items for this line is equal to all
# actually produced items, we can consider it fully shipped
if shipments.aggregate(models.Sum('quantity'))['quantity__sum'] == (
self.order_runs.all().aggregate(models.Sum('quantity_out'))[
'quantity_out__sum'
]
):
return shipments.order_by('-date_shipped')[0].date_shipped
return None
@property
def date_delivered(self):
shipped = self.date_shipped
shipping_days = self.order.company.shipping_days
if not shipped or not shipping_days:
return None
return self.date_shipped + timedelta(
days=self.order.company.shipping_days)
class Meta:
ordering = ('order', 'line_no')
unique_together = ('order', 'line_no')
class OrderRun(models.Model):
"""
One production run for a line of an order.
:order_line: the order line, for which this run is run
:run_number: the identifying number of this run
:parent: reference to another order run
:ipn: what IPN this goes under
:quantity_started: what number of items we start the run with
:quantity_dest_test: how many items have been destroyed in tests
:quantity_out: how many items this run yielded. This plus the items
destroyed in tests subtracted from the quantity we started with, equals
the amount of items lost through errors.
:is_open: if the run is still in progress. Defaults to True
:comment: comments on this run for additional notes
"""
order_line = models.ForeignKey(
OrderLine,
verbose_name=_('order line'),
related_name='order_runs',
)
run_number = models.CharField(
verbose_name=_('run number'),
max_length=64,
)
parent = models.ForeignKey(
'self',
verbose_name=_('parent'),
related_name='order_runs',
blank=True, null=True,
)
ipn = models.ForeignKey(
'aps_bom.IPN',
verbose_name=_('ipn'),
related_name='order_runs',
)
quantity_started = models.PositiveIntegerField(
verbose_name=_('quantity started'),
)
quantity_dest_out = models.PositiveIntegerField(
verbose_name=_('quantity dest out'),
blank=True, null=True,
)
quantity_out = models.PositiveIntegerField(
verbose_name=_('quantity out'),
blank=True, null=True,
)
is_open = models.BooleanField(
verbose_name=_('Is open'),
default=True,
)
comment = models.TextField(
verbose_name=_('comment'),
blank=True,
)
class Shipment(models.Model):
"""
Stores data about when an order run is shipped
:order_run: the ``OrderRun`` this shipment belongs to
:quantity: how many items were shipped
:date_shipped: when it was shipped
"""
order_run = models.ForeignKey(
OrderRun,
verbose_name=_('order run'),
related_name='shipments',
)
quantity = models.PositiveIntegerField(
verbose_name=_('quantity'),
)
date_shipped = models.DateTimeField(
verbose_name=_('date shipped'),
)
| |
# pylint: disable=invalid-name, unused-import
"""Symbolic graph construction API.
This namespace contains most of the registered operators.
For detailed list of operators, checkout ``Core Tensor Operators``
"""
from __future__ import absolute_import as _abs
import sys as _sys
import os as _os
import ctypes as _ctypes
from numbers import Number as _Number
from . import _base
from ._base import _LIB, check_call as _check_call, _FFI_MODE
from .attribute import AttrScope
from . import _symbol_internal as _internal
# Use different verison of SymbolBase
# When possible, use cython to speedup part of computation.
IMPORT_EXCEPT = RuntimeError if _FFI_MODE == "cython" else ImportError
try:
if _FFI_MODE == "ctypes":
raise ImportError()
if _sys.version_info >= (3, 0):
from ._cy3.symbol import SymbolBase, _init_symbol_module
else:
from ._cy2.symbol import SymbolBase, _init_symbol_module
except IMPORT_EXCEPT:
# pylint: disable=wrong-import-position
from ._ctypes.symbol import SymbolBase, _init_symbol_module
class Symbol(SymbolBase):
"""Symbol is basic operation unit for symbolic graph compostion."""
# disable dictionary storage, also do not have parent type.
__slots__ = []
_tvm_tcode = 16
@property
def _tvm_handle(self):
return self.handle.value
def __add__(self, other):
if isinstance(other, Symbol):
return __add_symbol__(self, other)
elif isinstance(other, _Number):
return __add_scalar__(self, scalar=other)
else:
raise TypeError("type %s not supported" % str(type(other)))
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, Symbol):
return __sub_symbol__(self, other)
if isinstance(other, _Number):
return __sub_scalar__(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rsub__(self, other):
if isinstance(other, _Number):
return __rsub_scalar__(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __mul__(self, other):
if isinstance(other, Symbol):
return __mul_symbol__(self, other)
if isinstance(other, _Number):
return __mul_scalar__(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
if isinstance(other, Symbol):
return __div_symbol__(self, other)
if isinstance(other, _Number):
return __div_scalar__(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rdiv__(self, other):
if isinstance(other, _Number):
return __rdiv_scalar__(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __truediv__(self, other):
return self.__div__(other)
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __pow__(self, other):
if isinstance(other, Symbol):
return __pow_symbol__(self, other)
if isinstance(other, _Number):
return __pow_scalar__(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rpow__(self, other):
if isinstance(other, _Number):
return __rpow_scalar__(self, scalar=other)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __neg__(self):
return self.__mul__(-1.0)
def __copy__(self):
return self.__deepcopy__()
def __deepcopy__(self, _=None):
handle = _base.SymbolHandle()
_base.check_call(_LIB.NNSymbolCopy(self.handle,
_ctypes.byref(handle)))
return Symbol(handle)
def __getitem__(self, index):
if isinstance(index, _base.string_types):
idx = None
for i, name in enumerate(self.list_output_names()):
if name == index:
if idx is not None:
raise ValueError('There are multiple outputs with name \"%s\"' % index)
idx = i
if idx is None:
raise ValueError('Cannot find output that matches name \"%s\"' % index)
index = idx
if not isinstance(index, int):
raise TypeError('Symbol only support integer index to fetch i-th output')
handle = _base.SymbolHandle()
_check_call(_LIB.NNSymbolGetOutput(
self.handle, _base.nn_uint(index), _ctypes.byref(handle)))
return Symbol(handle=handle)
def attr(self, key):
"""Get attribute string from the symbol, this function only works for non-grouped symbol.
Parameters
----------
key : str
The key to get attribute from.
Returns
-------
value : str
The attribute value of the key, returns None if attribute do not exist.
"""
ret = _ctypes.c_char_p()
success = _ctypes.c_int()
_check_call(_LIB.NNSymbolGetAttr(
self.handle, _base.c_str(key), _ctypes.byref(ret), _ctypes.byref(success)))
if success.value != 0:
return _base.py_str(ret.value)
return None
def list_attr(self, recursive=False):
"""Get all attributes from the symbol.
Parameters
----------
recursive : bool
Default `False`. When `recursive` is `True`, list recursively all the
attributes in the descendents. The attribute names are pre-pended with
the symbol names to avoid conflicts. If `False`, then only attributes
that belongs to this symbol is returned, and the attribute names will
**not** be pre-pended with the symbol name.
"""
size = _base.nn_uint()
pairs = _ctypes.POINTER(_ctypes.c_char_p)()
option = _ctypes.c_int(0) if recursive else _ctypes.c_int(1)
_check_call(_LIB.NNSymbolListAttrs(
self.handle, option, _ctypes.byref(size), _ctypes.byref(pairs)))
return {_base.py_str(pairs[i*2]): _base.py_str(pairs[i*2+1]) for i in range(size.value)}
def get_internals(self):
"""Get a new grouped symbol whose output contains all the internal outputs of this symbol.
Returns
-------
sgroup : Symbol
The internal of the symbol.
"""
handle = _base.SymbolHandle()
_check_call(_LIB.NNSymbolGetInternals(
self.handle, _ctypes.byref(handle)))
return Symbol(handle=handle)
def _get_list_copt(self, option):
"""internal function to get list option"""
if option == 'all':
return _ctypes.c_int(0)
elif option == 'read_only':
return _ctypes.c_int(1)
elif option == 'aux_state':
return _ctypes.c_int(2)
else:
raise ValueError("option need to be in {'all', 'read_only, 'aux_state'}")
def list_input_variables(self, option='all'):
"""List all the input variables in the symbol.
Parameters
----------
option : {'all', 'read_only', 'aux_state'}, optional
The listing option
- 'all' will list all the arguments.
- 'read_only' lists arguments that are readed by the graph.
- 'aux_state' lists arguments that are mutated by the graph as state.
Returns
-------
vars : list of symbol
List of all the variables
"""
size = _ctypes.c_uint()
sarr = _ctypes.POINTER(_base.SymbolHandle)()
_check_call(_LIB.NNSymbolListInputVariables(
self.handle, self._get_list_copt(option),
_ctypes.byref(size), _ctypes.byref(sarr)))
return [Symbol(_base.SymbolHandle(sarr[i])) for i in range(size.value)]
def list_input_names(self, option='all'):
"""List all the inputs in the symbol.
Parameters
----------
option : {'all', 'read_only', 'aux_state'}, optional
The listing option
- 'all' will list all the arguments.
- 'read_only' lists arguments that are readed by the graph.
- 'aux_state' lists arguments that are mutated by the graph as state.
Returns
-------
args : list of string
List of all the arguments.
"""
size = _ctypes.c_uint()
sarr = _ctypes.POINTER(_ctypes.c_char_p)()
_check_call(_LIB.NNSymbolListInputNames(
self.handle, self._get_list_copt(option),
_ctypes.byref(size), _ctypes.byref(sarr)))
return [_base.py_str(sarr[i]) for i in range(size.value)]
def list_output_names(self):
"""List all outputs in the symbol.
Returns
-------
returns : list of string
List of all the outputs.
"""
size = _ctypes.c_uint()
sarr = _ctypes.POINTER(_ctypes.c_char_p)()
_check_call(_LIB.NNSymbolListOutputNames(
self.handle, _ctypes.byref(size), _ctypes.byref(sarr)))
return [_base.py_str(sarr[i]) for i in range(size.value)]
def debug_str(self):
"""Get a debug string.
Returns
-------
debug_str : string
Debug string of the symbol.
"""
debug_str = _ctypes.c_char_p()
_check_call(_LIB.NNSymbolPrint(
self.handle, _ctypes.byref(debug_str)))
return _base.py_str(debug_str.value)
def _add_control_deps(self, deps):
"""Add control flow dependencies.
This makes current op depend on the deps.
Only use when necessary,
this function mutate the current symbol node.
Returns
-------
deps : Symbol for list of symbol
The dependencies
"""
if isinstance(deps, list):
deps = Group(deps)
_check_call(_LIB.NNAddControlDeps(
self.handle, deps.handle))
def Variable(name, **kwargs):
"""Create a symbolic variable with specified name.
Parameters
----------
name : str
Name of the variable.
kwargs : dict of string -> string
Additional attributes to set on the variable.
Returns
-------
variable : Symbol
The created variable symbol.
"""
if not isinstance(name, _base.string_types):
raise TypeError('Expect a string for variable `name`')
handle = _base.SymbolHandle()
_base.check_call(_LIB.NNSymbolCreateVariable(
_base.c_str(name), _ctypes.byref(handle)))
ret = Symbol(handle)
attr = AttrScope.current.get(kwargs)
if attr:
ret._set_attr(**attr)
return ret
def Group(symbols):
"""Create a symbol that groups symbols together.
Parameters
----------
symbols : list
List of symbols to be grouped.
Returns
-------
sym : Symbol
The created group symbol.
"""
ihandles = []
for sym in symbols:
if not isinstance(sym, Symbol):
raise TypeError('Expect Symbols in the list input')
ihandles.append(sym.handle)
handle = _base.SymbolHandle()
_check_call(_LIB.NNSymbolCreateGroup(
_base.nn_uint(len(ihandles)),
_base.c_array(_base.SymbolHandle, ihandles),
_ctypes.byref(handle)))
return Symbol(handle)
# Set the real symbol class to Symbol
_init_symbol_module(Symbol, "nnvm")
| |
from __future__ import annotations
from abc import (
ABC,
abstractmethod,
)
from collections import abc
import functools
from io import StringIO
from itertools import islice
from typing import (
Any,
Callable,
Mapping,
)
import numpy as np
import pandas._libs.json as json
from pandas._libs.tslibs import iNaT
from pandas._typing import (
CompressionOptions,
DtypeArg,
IndexLabel,
JSONSerializable,
StorageOptions,
)
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
deprecate_kwarg,
deprecate_nonkeyword_arguments,
doc,
)
from pandas.core.dtypes.common import (
ensure_str,
is_period_dtype,
)
from pandas import (
DataFrame,
MultiIndex,
Series,
isna,
notna,
to_datetime,
)
from pandas.core import generic
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.generic import NDFrame
from pandas.core.reshape.concat import concat
from pandas.io.common import (
IOHandles,
file_exists,
get_handle,
is_fsspec_url,
is_url,
stringify_path,
)
from pandas.io.json._normalize import convert_to_line_delimits
from pandas.io.json._table_schema import (
build_table_schema,
parse_table_schema,
)
from pandas.io.parsers.readers import validate_integer
loads = json.loads
dumps = json.dumps
TABLE_SCHEMA_VERSION = "0.20.0"
# interface to/from
def to_json(
path_or_buf,
obj: NDFrame,
orient: str | None = None,
date_format: str = "epoch",
double_precision: int = 10,
force_ascii: bool = True,
date_unit: str = "ms",
default_handler: Callable[[Any], JSONSerializable] | None = None,
lines: bool = False,
compression: CompressionOptions = "infer",
index: bool = True,
indent: int = 0,
storage_options: StorageOptions = None,
):
if not index and orient not in ["split", "table"]:
raise ValueError(
"'index=False' is only valid when 'orient' is 'split' or 'table'"
)
if lines and orient != "records":
raise ValueError("'lines' keyword only valid when 'orient' is records")
if orient == "table" and isinstance(obj, Series):
obj = obj.to_frame(name=obj.name or "values")
writer: type[Writer]
if orient == "table" and isinstance(obj, DataFrame):
writer = JSONTableWriter
elif isinstance(obj, Series):
writer = SeriesWriter
elif isinstance(obj, DataFrame):
writer = FrameWriter
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
s = writer(
obj,
orient=orient,
date_format=date_format,
double_precision=double_precision,
ensure_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
index=index,
indent=indent,
).write()
if lines:
s = convert_to_line_delimits(s)
if path_or_buf is not None:
# apply compression and byte/text conversion
with get_handle(
path_or_buf, "w", compression=compression, storage_options=storage_options
) as handles:
handles.handle.write(s)
else:
return s
class Writer(ABC):
_default_orient: str
def __init__(
self,
obj,
orient: str | None,
date_format: str,
double_precision: int,
ensure_ascii: bool,
date_unit: str,
index: bool,
default_handler: Callable[[Any], JSONSerializable] | None = None,
indent: int = 0,
):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.index = index
self.indent = indent
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise AbstractMethodError(self)
def write(self):
iso_dates = self.date_format == "iso"
return dumps(
self.obj_to_write,
orient=self.orient,
double_precision=self.double_precision,
ensure_ascii=self.ensure_ascii,
date_unit=self.date_unit,
iso_dates=iso_dates,
default_handler=self.default_handler,
indent=self.indent,
)
@property
@abstractmethod
def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
"""Object to write in JSON format."""
pass
class SeriesWriter(Writer):
_default_orient = "index"
@property
def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
if not self.index and self.orient == "split":
return {"name": self.obj.name, "data": self.obj.values}
else:
return self.obj
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == "index":
raise ValueError(f"Series index must be unique for orient='{self.orient}'")
class FrameWriter(Writer):
_default_orient = "columns"
@property
def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
if not self.index and self.orient == "split":
obj_to_write = self.obj.to_dict(orient="split")
del obj_to_write["index"]
else:
obj_to_write = self.obj
return obj_to_write
def _format_axes(self):
"""
Try to format axes if they are datelike.
"""
if not self.obj.index.is_unique and self.orient in ("index", "columns"):
raise ValueError(
f"DataFrame index must be unique for orient='{self.orient}'."
)
if not self.obj.columns.is_unique and self.orient in (
"index",
"columns",
"records",
):
raise ValueError(
f"DataFrame columns must be unique for orient='{self.orient}'."
)
class JSONTableWriter(FrameWriter):
_default_orient = "records"
def __init__(
self,
obj,
orient: str | None,
date_format: str,
double_precision: int,
ensure_ascii: bool,
date_unit: str,
index: bool,
default_handler: Callable[[Any], JSONSerializable] | None = None,
indent: int = 0,
):
"""
Adds a `schema` attribute with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
to know what the index is, forces orient to records, and forces
date_format to 'iso'.
"""
super().__init__(
obj,
orient,
date_format,
double_precision,
ensure_ascii,
date_unit,
index,
default_handler=default_handler,
indent=indent,
)
if date_format != "iso":
msg = (
"Trying to write with `orient='table'` and "
f"`date_format='{date_format}'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`"
)
raise ValueError(msg)
self.schema = build_table_schema(obj, index=self.index)
# NotImplemented on a column MultiIndex
if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
raise NotImplementedError(
"orient='table' is not supported for MultiIndex columns"
)
# TODO: Do this timedelta properly in objToJSON.c See GH #15137
if (
(obj.ndim == 1)
and (obj.name in set(obj.index.names))
or len(obj.columns.intersection(obj.index.names))
):
msg = "Overlapping names between the index and columns"
raise ValueError(msg)
obj = obj.copy()
timedeltas = obj.select_dtypes(include=["timedelta"]).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap(lambda x: x.isoformat())
# Convert PeriodIndex to datetimes before serializing
if is_period_dtype(obj.index.dtype):
obj.index = obj.index.to_timestamp()
# exclude index from obj if index=False
if not self.index:
self.obj = obj.reset_index(drop=True)
else:
self.obj = obj.reset_index(drop=False)
self.date_format = "iso"
self.orient = "records"
self.index = index
@property
def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
return {"schema": self.schema, "data": self.obj}
@doc(storage_options=generic._shared_docs["storage_options"])
@deprecate_kwarg(old_arg_name="numpy", new_arg_name=None)
@deprecate_nonkeyword_arguments(
version="2.0", allowed_args=["path_or_buf"], stacklevel=3
)
def read_json(
path_or_buf=None,
orient=None,
typ="frame",
dtype: DtypeArg | None = None,
convert_axes=None,
convert_dates=True,
keep_default_dates: bool = True,
numpy: bool = False,
precise_float: bool = False,
date_unit=None,
encoding=None,
encoding_errors: str | None = "strict",
lines: bool = False,
chunksize: int | None = None,
compression: CompressionOptions = "infer",
nrows: int | None = None,
storage_options: StorageOptions = None,
):
"""
Convert a JSON string to pandas object.
Parameters
----------
path_or_buf : a valid JSON str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.json``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
orient : str
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{{index -> [index], columns -> [columns], data -> [values]}}``
- ``'records'`` : list like
``[{{column -> value}}, ... , {{column -> value}}]``
- ``'index'`` : dict like ``{{index -> {{column -> value}}}}``
- ``'columns'`` : dict like ``{{column -> {{index -> value}}}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{{'split','records','index'}}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{{'split','records','index',
'columns','values', 'table'}}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
typ : {{'frame', 'series'}}, default 'frame'
The type of object to recover.
dtype : bool or dict, default None
If True, infer dtypes; if a dict of column to dtype, then use those;
if False, then don't infer dtypes at all, applies only to the data.
For all ``orient`` values except ``'table'``, default is True.
.. versionchanged:: 0.25.0
Not applicable for ``orient='table'``.
convert_axes : bool, default None
Try to convert the axes to the proper dtypes.
For all ``orient`` values except ``'table'``, default is True.
.. versionchanged:: 0.25.0
Not applicable for ``orient='table'``.
convert_dates : bool or list of str, default True
If True then default datelike columns may be converted (depending on
keep_default_dates).
If False, no dates will be converted.
If a list of column names, then those columns will be converted and
default datelike columns may also be converted (depending on
keep_default_dates).
keep_default_dates : bool, default True
If parsing dates (convert_dates is not False), then try to parse the
default datelike columns.
A column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``.
numpy : bool, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
.. deprecated:: 1.0.0
precise_float : bool, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality.
date_unit : str, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
encoding_errors : str, optional, default "strict"
How encoding errors are treated. `List of possible values
<https://docs.python.org/3/library/codecs.html#error-handlers>`_ .
.. versionadded:: 1.3.0
lines : bool, default False
Read the file as a json object per line.
chunksize : int, optional
Return JsonReader object for iteration.
See the `line-delimited json docs
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#line-delimited-json>`_
for more information on ``chunksize``.
This can only be passed if `lines=True`.
If this is None, the file will be read into memory all at once.
.. versionchanged:: 1.2
``JsonReader`` is a context manager.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, zip or xz if path_or_buf is a string ending in
'.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
otherwise. If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
nrows : int, optional
The number of lines from the line-delimited jsonfile that has to be read.
This can only be passed if `lines=True`.
If this is None, all the rows will be returned.
.. versionadded:: 1.1
{storage_options}
.. versionadded:: 1.2.0
Returns
-------
Series or DataFrame
The type returned depends on the value of `typ`.
See Also
--------
DataFrame.to_json : Convert a DataFrame to a JSON string.
Series.to_json : Convert a Series to a JSON string.
json_normalize : Normalize semi-structured JSON data into a flat table.
Notes
-----
Specific to ``orient='table'``, if a :class:`DataFrame` with a literal
:class:`Index` name of `index` gets written with :func:`to_json`, the
subsequent read operation will incorrectly set the :class:`Index` name to
``None``. This is because `index` is also used by :func:`DataFrame.to_json`
to denote a missing :class:`Index` name, and the subsequent
:func:`read_json` operation cannot distinguish between the two. The same
limitation is encountered with a :class:`MultiIndex` and any names
beginning with ``'level_'``.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'\
{{\
"columns":["col 1","col 2"],\
"index":["row 1","row 2"],\
"data":[["a","b"],["c","d"]]\
}}\
'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{{"row 1":{{"col 1":"a","col 2":"b"}},"row 2":{{"col 1":"c","col 2":"d"}}}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{{"col 1":"a","col 2":"b"}},{{"col 1":"c","col 2":"d"}}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
Encoding with Table Schema
>>> df.to_json(orient='table')
'\
{{"schema":{{"fields":[\
{{"name":"index","type":"string"}},\
{{"name":"col 1","type":"string"}},\
{{"name":"col 2","type":"string"}}],\
"primaryKey":["index"],\
"pandas_version":"0.20.0"}},\
"data":[\
{{"index":"row 1","col 1":"a","col 2":"b"}},\
{{"index":"row 2","col 1":"c","col 2":"d"}}]\
}}\
'
"""
if orient == "table" and dtype:
raise ValueError("cannot pass both dtype and orient='table'")
if orient == "table" and convert_axes:
raise ValueError("cannot pass both convert_axes and orient='table'")
if dtype is None and orient != "table":
# error: Incompatible types in assignment (expression has type "bool", variable
# has type "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float],
# Type[int], Type[complex], Type[bool], Type[object], Dict[Hashable,
# Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float],
# Type[int], Type[complex], Type[bool], Type[object]]], None]")
dtype = True # type: ignore[assignment]
if convert_axes is None and orient != "table":
convert_axes = True
json_reader = JsonReader(
path_or_buf,
orient=orient,
typ=typ,
dtype=dtype,
convert_axes=convert_axes,
convert_dates=convert_dates,
keep_default_dates=keep_default_dates,
numpy=numpy,
precise_float=precise_float,
date_unit=date_unit,
encoding=encoding,
lines=lines,
chunksize=chunksize,
compression=compression,
nrows=nrows,
storage_options=storage_options,
encoding_errors=encoding_errors,
)
if chunksize:
return json_reader
with json_reader:
return json_reader.read()
class JsonReader(abc.Iterator):
"""
JsonReader provides an interface for reading in a JSON file.
If initialized with ``lines=True`` and ``chunksize``, can be iterated over
``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the
whole document.
"""
def __init__(
self,
filepath_or_buffer,
orient,
typ,
dtype,
convert_axes,
convert_dates,
keep_default_dates: bool,
numpy: bool,
precise_float: bool,
date_unit,
encoding,
lines: bool,
chunksize: int | None,
compression: CompressionOptions,
nrows: int | None,
storage_options: StorageOptions = None,
encoding_errors: str | None = "strict",
):
self.orient = orient
self.typ = typ
self.dtype = dtype
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.keep_default_dates = keep_default_dates
self.numpy = numpy
self.precise_float = precise_float
self.date_unit = date_unit
self.encoding = encoding
self.compression = compression
self.storage_options = storage_options
self.lines = lines
self.chunksize = chunksize
self.nrows_seen = 0
self.nrows = nrows
self.encoding_errors = encoding_errors
self.handles: IOHandles | None = None
if self.chunksize is not None:
self.chunksize = validate_integer("chunksize", self.chunksize, 1)
if not self.lines:
raise ValueError("chunksize can only be passed if lines=True")
if self.nrows is not None:
self.nrows = validate_integer("nrows", self.nrows, 0)
if not self.lines:
raise ValueError("nrows can only be passed if lines=True")
data = self._get_data_from_filepath(filepath_or_buffer)
self.data = self._preprocess_data(data)
def _preprocess_data(self, data):
"""
At this point, the data either has a `read` attribute (e.g. a file
object or a StringIO) or is a string that is a JSON document.
If self.chunksize, we prepare the data for the `__next__` method.
Otherwise, we read it into memory for the `read` method.
"""
if hasattr(data, "read") and not (self.chunksize or self.nrows):
with self:
data = data.read()
if not hasattr(data, "read") and (self.chunksize or self.nrows):
data = StringIO(data)
return data
def _get_data_from_filepath(self, filepath_or_buffer):
"""
The function read_json accepts three input types:
1. filepath (string-like)
2. file-like object (e.g. open file object, StringIO)
3. JSON string
This method turns (1) into (2) to simplify the rest of the processing.
It returns input types (2) and (3) unchanged.
"""
# if it is a string but the file does not exist, it might be a JSON string
filepath_or_buffer = stringify_path(filepath_or_buffer)
if (
not isinstance(filepath_or_buffer, str)
or is_url(filepath_or_buffer)
or is_fsspec_url(filepath_or_buffer)
or file_exists(filepath_or_buffer)
):
self.handles = get_handle(
filepath_or_buffer,
"r",
encoding=self.encoding,
compression=self.compression,
storage_options=self.storage_options,
errors=self.encoding_errors,
)
filepath_or_buffer = self.handles.handle
return filepath_or_buffer
def _combine_lines(self, lines) -> str:
"""
Combines a list of JSON objects into one JSON object.
"""
return (
f'[{",".join((line for line in (line.strip() for line in lines) if line))}]'
)
def read(self):
"""
Read the whole JSON input into a pandas object.
"""
if self.lines:
if self.chunksize:
obj = concat(self)
elif self.nrows:
lines = list(islice(self.data, self.nrows))
lines_json = self._combine_lines(lines)
obj = self._get_object_parser(lines_json)
else:
data = ensure_str(self.data)
data_lines = data.split("\n")
obj = self._get_object_parser(self._combine_lines(data_lines))
else:
obj = self._get_object_parser(self.data)
self.close()
return obj
def _get_object_parser(self, json):
"""
Parses a json document into a pandas object.
"""
typ = self.typ
dtype = self.dtype
kwargs = {
"orient": self.orient,
"dtype": self.dtype,
"convert_axes": self.convert_axes,
"convert_dates": self.convert_dates,
"keep_default_dates": self.keep_default_dates,
"numpy": self.numpy,
"precise_float": self.precise_float,
"date_unit": self.date_unit,
}
obj = None
if typ == "frame":
obj = FrameParser(json, **kwargs).parse()
if typ == "series" or obj is None:
if not isinstance(dtype, bool):
kwargs["dtype"] = dtype
obj = SeriesParser(json, **kwargs).parse()
return obj
def close(self):
"""
If we opened a stream earlier, in _get_data_from_filepath, we should
close it.
If an open stream or file was passed, we leave it open.
"""
if self.handles is not None:
self.handles.close()
def __next__(self):
if self.nrows:
if self.nrows_seen >= self.nrows:
self.close()
raise StopIteration
lines = list(islice(self.data, self.chunksize))
if lines:
lines_json = self._combine_lines(lines)
obj = self._get_object_parser(lines_json)
# Make sure that the returned objects have the right index.
obj.index = range(self.nrows_seen, self.nrows_seen + len(obj))
self.nrows_seen += len(obj)
return obj
self.close()
raise StopIteration
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
class Parser:
_split_keys: tuple[str, ...]
_default_orient: str
_STAMP_UNITS = ("s", "ms", "us", "ns")
_MIN_STAMPS = {
"s": 31536000,
"ms": 31536000000,
"us": 31536000000000,
"ns": 31536000000000000,
}
def __init__(
self,
json,
orient,
dtype: DtypeArg | None = None,
convert_axes=True,
convert_dates=True,
keep_default_dates=False,
numpy=False,
precise_float=False,
date_unit=None,
):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError(f"date_unit must be one of {self._STAMP_UNITS}")
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS["s"]
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj: DataFrame | Series | None = None
def check_keys_split(self, decoded):
"""
Checks that dict has only the appropriate keys for orient='split'.
"""
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys_joined = ", ".join(bad_keys)
raise ValueError(f"JSON data had unexpected key(s): {bad_keys_joined}")
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _parse_numpy(self):
raise AbstractMethodError(self)
def _parse_no_numpy(self):
raise AbstractMethodError(self)
def _convert_axes(self):
"""
Try to convert axes.
"""
obj = self.obj
assert obj is not None # for mypy
for axis_name in obj._AXIS_ORDERS:
new_axis, result = self._try_convert_data(
name=axis_name,
data=obj._get_axis(axis_name),
use_dtypes=False,
convert_dates=True,
)
if result:
setattr(self.obj, axis_name, new_axis)
def _try_convert_types(self):
raise AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True):
"""
Try to parse a ndarray like into a column by inferring dtype.
"""
# don't try to coerce, unless a force conversion
if use_dtypes:
if not self.dtype:
if all(notna(data)):
return data, False
return data.fillna(np.nan), True
# error: Non-overlapping identity check (left operand type:
# "Union[ExtensionDtype, str, dtype[Any], Type[object],
# Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
# Type[str], Type[float], Type[int], Type[complex], Type[bool],
# Type[object]]]]", right operand type: "Literal[True]")
elif self.dtype is True: # type: ignore[comparison-overlap]
pass
else:
# dtype to force
dtype = (
self.dtype.get(name) if isinstance(self.dtype, dict) else self.dtype
)
if dtype is not None:
try:
# error: Argument 1 to "dtype" has incompatible type
# "Union[ExtensionDtype, str, dtype[Any], Type[object]]";
# expected "Type[Any]"
dtype = np.dtype(dtype) # type: ignore[arg-type]
return data.astype(dtype), True
except (TypeError, ValueError):
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
if data.dtype == "object":
# try float
try:
data = data.astype("float64")
except (TypeError, ValueError):
pass
if data.dtype.kind == "f":
if data.dtype != "float64":
# coerce floats to 64
try:
data = data.astype("float64")
except (TypeError, ValueError):
pass
# don't coerce 0-len data
if len(data) and (data.dtype == "float" or data.dtype == "object"):
# coerce ints if we can
try:
new_data = data.astype("int64")
if (new_data == data).all():
data = new_data
except (TypeError, ValueError, OverflowError):
pass
# coerce ints to 64
if data.dtype == "int":
# coerce floats to 64
try:
data = data.astype("int64")
except (TypeError, ValueError):
pass
# if we have an index, we want to preserve dtypes
if name == "index" and len(data):
if self.orient == "split":
return data, False
return data, True
def _try_convert_to_date(self, data):
"""
Try to parse a ndarray like into a date column.
Try to coerce object in epoch/iso formats and integer/float in epoch
formats. Return a boolean if parsing was successful.
"""
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == "object":
try:
new_data = data.astype("int64")
except (TypeError, ValueError, OverflowError):
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (
isna(new_data._values)
| (new_data > self.min_stamp)
| (new_data._values == iNaT)
)
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors="raise", unit=date_unit)
except (ValueError, OverflowError, TypeError):
continue
return new_data, True
return data, False
def _try_convert_dates(self):
raise AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = "index"
_split_keys = ("name", "index", "data")
def _parse_no_numpy(self):
data = loads(self.json, precise_float=self.precise_float)
if self.orient == "split":
decoded = {str(k): v for k, v in data.items()}
self.check_keys_split(decoded)
self.obj = create_series_with_explicit_dtype(**decoded)
else:
self.obj = create_series_with_explicit_dtype(data, dtype_if_empty=object)
def _parse_numpy(self):
load_kwargs = {
"dtype": None,
"numpy": True,
"precise_float": self.precise_float,
}
if self.orient in ["columns", "index"]:
load_kwargs["labelled"] = True
loads_ = functools.partial(loads, **load_kwargs)
data = loads_(self.json)
if self.orient == "split":
decoded = {str(k): v for k, v in data.items()}
self.check_keys_split(decoded)
self.obj = create_series_with_explicit_dtype(**decoded)
elif self.orient in ["columns", "index"]:
# error: "create_series_with_explicit_dtype"
# gets multiple values for keyword argument "dtype_if_empty
self.obj = create_series_with_explicit_dtype(
*data, dtype_if_empty=object
) # type:ignore[misc]
else:
self.obj = create_series_with_explicit_dtype(data, dtype_if_empty=object)
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
"data", self.obj, convert_dates=self.convert_dates
)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = "columns"
_split_keys = ("columns", "index", "data")
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(
json,
dtype=None,
numpy=True,
labelled=True,
precise_float=self.precise_float,
)
if len(args):
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(
json, dtype=None, numpy=True, precise_float=self.precise_float
)
decoded = {str(k): v for k, v in decoded.items()}
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(
loads(json, dtype=None, numpy=True, precise_float=self.precise_float)
)
else:
self.obj = DataFrame(
*loads(
json,
dtype=None,
numpy=True,
labelled=True,
precise_float=self.precise_float,
)
)
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None
)
elif orient == "split":
decoded = {
str(k): v
for k, v in loads(json, precise_float=self.precise_float).items()
}
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
self.obj = DataFrame.from_dict(
loads(json, precise_float=self.precise_float),
dtype=None,
orient="index",
)
elif orient == "table":
self.obj = parse_table_schema(json, precise_float=self.precise_float)
else:
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None
)
def _process_converter(self, f, filt=None):
"""
Take a conversion function and possibly recreate the frame.
"""
if filt is None:
filt = lambda col, c: True
obj = self.obj
assert obj is not None # for mypy
needs_new_obj = False
new_obj = {}
for i, (col, c) in enumerate(obj.items()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_frame = DataFrame(new_obj, index=obj.index)
new_frame.columns = obj.columns
self.obj = new_frame
def _try_convert_types(self):
if self.obj is None:
return
if self.convert_dates:
self._try_convert_dates()
self._process_converter(
lambda col, c: self._try_convert_data(col, c, convert_dates=False)
)
def _try_convert_dates(self):
if self.obj is None:
return
# our columns to parse
convert_dates = self.convert_dates
if convert_dates is True:
convert_dates = []
convert_dates = set(convert_dates)
def is_ok(col) -> bool:
"""
Return if this col is ok to try for a date parse.
"""
if not isinstance(col, str):
return False
col_lower = col.lower()
if (
col_lower.endswith("_at")
or col_lower.endswith("_time")
or col_lower == "modified"
or col_lower == "date"
or col_lower == "datetime"
or col_lower.startswith("timestamp")
):
return True
return False
self._process_converter(
lambda col, c: self._try_convert_to_date(c),
lambda col, c: (
(self.keep_default_dates and is_ok(col)) or col in convert_dates
),
)
| |
#!/usr/bin/python
#
# Useful information can be found at https://svn.planet-lab.org/wiki/NodeManager
#
# Faiyaz Ahmed <faiyaza at cs dot princeton dot edu>
# Copyright (C) 2008 The Trustees of Princeton University
"""Node Manager"""
import optparse
import time
#import xmlrpclib
#import socket
import os
import sys
#import glob
import pickle
import random
#import resource
import logger
import tools
from config import Config
from plcapi import PLCAPI
from node_config import Node_Config
import commands
class NodeManager:
PLUGIN_PATH = "/usr/share/NodeManager/plugins"
DB_FILE = "/var/lib/nodemanager/getslivers.pickle"
MAP_FILE = "/var/lib/nodemanager/slicemap.pickle"
ROUTER_FILE = "/var/lib/nodemanager/router.pickle"
VSLIVER_FILE = "/var/lib/nodemanager/vsliver.pickle"
VIP_FILE = "/var/lib/nodemanager/vip.pickle"
VMAC_FILE = "/var/lib/nodemanager/vmac.pickle"
VLANID_FILE = "/var/lib/nodemanager/vlanid.pickle"
PEARL_DEFAULT_CONFIG = "/etc/planetlab/pearl_default_config.xml"
#PEARL_DPID = 1
#NODE_ID = 32
#PEARL_API_URL = "http://192.168.1.40:8080?wsdl"
#PEARL_DEFAULT_CONFIG = "/etc/planetlab/pearl_default_config.xml"
# the modules in this directory that need to be run
# NOTE: modules listed here will also be loaded in this order
# once loaded, they get re-ordered after their priority (lower comes first)
# for determining the runtime order
###core_modules=['net', 'conf_files', 'slivermanager', 'bwmon']
#['net', 'conf_files', 'sliverauth', 'vsys_privs', 'rawdisk', 'privatebridge',
#'interfaces', 'hostmap', 'sfagids', 'syndicate', 'codemux', 'vsys',
#'specialaccounts', 'omf_resctl', 'reservation']
core_modules=['conf_files']
default_period=600
default_random=301
default_priority=100
def __init__ (self):
parser = optparse.OptionParser()
parser.add_option('-d', '--daemon', action='store_true', dest='daemon', default=False,
help='run daemonized')
parser.add_option('-f', '--config', action='store', dest='config', default='/etc/planetlab/plc_config',
help='PLC configuration file')
parser.add_option('-k', '--session', action='store', dest='session', default='/etc/planetlab/session',
help='API session key (or file)')
parser.add_option('-p', '--period', action='store', dest='period', default=NodeManager.default_period,
help='Polling interval (sec) - default %d'%NodeManager.default_period)
parser.add_option('-r', '--random', action='store', dest='random', default=NodeManager.default_random,
help='Range for additional random polling interval (sec) -- default %d'%NodeManager.default_random)
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
help='more verbose log')
parser.add_option('-P', '--path', action='store', dest='path', default=NodeManager.PLUGIN_PATH,
help='Path to plugins directory')
# NOTE: BUG the 'help' for this parser.add_option() wont list plugins from the --path argument
parser.add_option('-m', '--module', action='store', dest='user_module', default='', help='run a single module')
(self.options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
# determine the modules to be run
self.modules = NodeManager.core_modules
node_config = Node_Config()
self.NODE_TYPE = node_config.NODE_TYPE
logger.log("node type is %s"%self.NODE_TYPE)
#wangyang,get slice map from date fetched from myplc
def getslicemap(self,last_data):
slicemap = {}
slivers = []
#slivernum = 0
try:
for sliver in last_data['slivers']:
#slivernum += 1
#if self.NODE_TYPE == "exclusive" and slivernum >1:
# return 0
slices = {}
if sliver['slice_id'] > 4:
logfile = '/var/log/slice/getmap'
logger.logslice("---get slice %s from myplc"%sliver['slice_id'],logfile)
#wangyang,what things do we need to focus on , add them here!After this ,we should delete the db file(*.pickle)
#wangyang,get vlanid from myplc,vlanid of slivers in one slice should be same
#wangyang,get vip and vmac from myplc,vip and vmac of slivers in one slice should be different,just a global controller can make sure of this.
slices['slice_name'] = sliver['name']
slices['slice_id'] = sliver['slice_id']
slices['status'] = 'none'
slices['port'] = 0
slices['keys'] = sliver['keys']
if (slices['slice_id'] == 130):
#wangyang,just slice(name:ict_gateway id =130) can be deployed on this node
slivers.append(slices)
logger.logslice("---get slice %s successfully,"%slivers,logfile)
else:
logger.log("slice %s is forbidden to deploy on this node,"%slices['slice_name'])
slicemap['slivers'] = slivers
return slicemap
except:
return slicemap
#wangyang,compare date from myplc and db,update status
def handlemap(self,slicemap,slicemapdb):
for sliver in slicemap['slivers']:
isnewslice = 1
if sliver['slice_id'] > 4:
for sliverdb in slicemapdb['slivers']:
if sliverdb['slice_id'] == sliver['slice_id']:
logger.logslice("keys: %s"%sliver['keys'],'/var/log/slice/key1')
logger.logslice("keys: %s"%sliverdb['keys'],'/var/log/slice/key2')
if sliverdb['keys'] != sliver['keys']:
oldkeys = sliverdb['keys']
sliverdb['keys'] = sliver['keys']
sliverdb['status'] = 'update'
self.rupdatesliver(sliverdb,oldkeys)
else:
sliverdb['status'] = 'none'
isnewslice = 0
break
if isnewslice == 1:
sliver['status'] = 'new'
slicemapdb['slivers'].append(sliver)
sliverdb = {}
sliver = {}
return slicemapdb
#wangyang, delete or add or update slivers
def updatetoRouter(self,slicemapdb,plc):
logfile = '/var/log/slice/log'
logger.logslice("************************",logfile)
for sliver in slicemapdb['slivers'][:]:
if sliver['slice_id'] > 4:
if sliver['status'] == 'delete':
self.rdeletesliver(sliver)
slicemapdb['slivers'].remove(sliver)
elif sliver['status'] == 'new':
flag = self.rcreatesliver(sliver,plc)
if flag == 0:
slicemapdb['slivers'].remove(sliver)
self.rdeletesliver(sliver)
logger.log ("nodemanager: Create this Virtual Router next time")
elif sliver['status'] == 'update':
#do nothing,this work has been done in function "handlemap()"
pass
sliver['status'] = 'none'
sliver = {}
return slicemapdb
#wangyang,add,delete,update sliver to router,here just write log
def rcreatesliver(self,sliver,plc):
logger.log ("nodemanager: prepare to create slice,slice is %d - end"%sliver['slice_id'])
(flag,output) = commands.getstatusoutput('useradd -m -s /bin/bash -p "%s" %s'%(sliver['slice_name'],sliver['slice_name']))
#if (flag != 0):
logger.log ("command output:%s"%output)
self.rupdatesliver(sliver,oldkeys=[])
return 1
def addkeys(self,sliver,keys):
(flag,output) = commands.getstatusoutput('mkdir /home/%s/.ssh'%sliver['slice_name'])
logger.log ("command output:%s"%output)
(flag,output) = commands.getstatusoutput('touch /home/%s/.ssh/authorized_keys'%sliver['slice_name'])
logger.log ("command output:%s"%output)
(flag,output) = commands.getstatusoutput('chown %s:%s /home/%s/.ssh/authorized_keys'%(sliver['slice_name'],sliver['slice_name'],sliver['slice_name']))
logger.log ("command output:%s"%output)
(flag,output) = commands.getstatusoutput('chmod 600 /home/%s/.ssh/authorized_keys'%sliver['slice_name'])
logger.log ("command output:%s"%output)
logger.log ("keys:%s"%keys)
for keydata in keys:
(flag,output) = commands.getstatusoutput('echo "%s" >>/home/%s/.ssh/authorized_keys'%(keydata,sliver['slice_name']))
logger.log ("command output:%s"%output)
def rdeletesliver(self,sliver):
logger.log ("nodemanager: delete slice %s" %(sliver['slice_name']))
(flag,output) = commands.getstatusoutput('userdel -fr %s'%sliver['slice_name'])
logger.log ("command output:%s"%output)
#if (flag != 0):
# logger.log ("nodemanager: Slice %s delete failed ,not exists"%sliver['slice_name'])
def rupdatesliver(self,sliver,oldkeys):
#logger.logslice("slicename: %s"%sliver['name'],logfile)
#call router API
# update the user keys to vm
allkeys = []
try:
file = open("/home/%s/.ssh/authorized_keys"%sliver['slice_name'])
for line in file:
allkeys.append(line)
except:
pass
newkey_list = []
oldkey_list = []
for key1 in sliver['keys']:
newkey_list.append(key1['key'])
for key2 in oldkeys:
oldkey_list.append(key2['key'])
otherkeys = [val for val in allkeys if val not in oldkey_list]
addkeys = otherkeys + newkey_list
(flag,output) = commands.getstatusoutput('rm -r /home/%s/.ssh'%sliver['slice_name'])
logger.log ("command output:%s"%output)
self.addkeys(sliver,addkeys)
def GetSlivers(self, config, plc):
"""Retrieves GetSlivers at PLC and triggers callbacks defined in modules/plugins"""
try:
logger.log("nodemanager: Syncing w/ PLC")
# retrieve GetSlivers from PLC
data = plc.GetSlivers()
# logger.log("call Reportliver")
#plc.ReportSliverPort({'sliver_port':8765},{'node_id':1,'slice_id':33})
# use the magic 'default' slice to retrieve system-wide defaults
self.getPLCDefaults(data, config)
# tweak the 'vref' attribute from GetSliceFamily
self.setSliversVref (data)
# dump it too, so it can be retrieved later in case of comm. failure
self.dumpSlivers(data)
# log it for debug purposes, no matter what verbose is
logger.log_slivers(data)
logger.verbose("nodemanager: Sync w/ PLC done")
#last_data=data
logger.log("*************************************************")
logger.log("we should provide these information to PEARL TEAM")
logger.log_map({},"******************************************")
#wangyang,get slice map from date fetched from myplc
slicemap = self.getslicemap(data)
logger.log_map(slicemap,"slicemap")
#wangyang,get slice map from db
slicemapdb = self.loadmap(slicemap)
logger.log_map(slicemapdb,"slicedb")
#wangyang,compare two files
slicemapdb = self.handlemap(slicemap,slicemapdb)
logger.log_map(slicemapdb,"dbafter compare")
#wangyang,update to router
slicemapdb = self.updatetoRouter(slicemapdb,plc)
logger.log_map(slicemapdb,"db after update")
#wangyang,update to router
self.savemap(slicemapdb)
#wangyang,write into txt
logger.log_map(slicemapdb,"write to db")
except:
logger.log_exc("nodemanager: failed in GetSlivers")
# XXX So some modules can at least boostrap.
logger.log("nodemanager: Can't contact PLC to GetSlivers(). Continuing.")
data = {}
# for modules that request it though the 'persistent_data' property
#last_data=self.loadSlivers()
'''
for sliver in last_data['slivers']:
logger.log("sliceid is %s"%sliver['slice_id'])
if sliver['slice_id'] > 4:
logfile = '/var/log/slice/slice.'+sliver['name']
#logger.logslice("slicename: %s"%sliver['name'],logfile)
logger.logslice("sliceid: %s"%sliver['slice_id'],logfile)
vmid=self.createslver(sliver['slice_id'])
logger.log("vmid is %s"%vmid)
logger.logmap(sliver['slice_id'],vmid)
#logger.logslice("keys: %s"%sliver['keys'],logfile)
'''
logger.log("*************************************************")
# Invoke GetSlivers() functions from the callback modules
for module in self.loaded_modules:
logger.verbose('nodemanager: triggering %s.GetSlivers'%module.__name__)
try:
callback = getattr(module, 'GetSlivers')
#module_data=data
#if getattr(module,'persistent_data',False):
# module_data=last_data
callback(data, config, plc)
except:
logger.log_exc("nodemanager: GetSlivers failed to run callback for module %r"%module)
def getPLCDefaults(self, data, config):
"""
Get PLC wide defaults from _default system slice. Adds them to config class.
"""
for slice in data.get('slivers'):
if slice['name'] == config.PLC_SLICE_PREFIX+"_default":
attr_dict = {}
for attr in slice.get('attributes'): attr_dict[attr['tagname']] = attr['value']
if len(attr_dict):
logger.verbose("nodemanager: Found default slice overrides.\n %s" % attr_dict)
config.OVERRIDES = attr_dict
return
# NOTE: if an _default slice existed, it would have been found above and
# the routine would return. Thus, if we've gotten here, then no default
# slice is bound to this node.
if 'OVERRIDES' in dir(config): del config.OVERRIDES
def setSliversVref (self, data):
"""
Tweak the 'vref' attribute in all slivers based on the 'GetSliceFamily' key
"""
# GetSlivers exposes the result of GetSliceFamily() as an separate key in data
# It is safe to override the attributes with this, as this method has the right logic
for sliver in data.get('slivers'):
try:
slicefamily=sliver.get('GetSliceFamily')
for att in sliver['attributes']:
if att['tagname']=='vref':
att['value']=slicefamily
continue
sliver['attributes'].append({ 'tagname':'vref','value':slicefamily})
except:
logger.log_exc("nodemanager: Could not overwrite 'vref' attribute from 'GetSliceFamily'",name=sliver['name'])
def dumpSlivers (self, slivers):
f = open(NodeManager.DB_FILE, "w")
logger.log ("nodemanager: saving successfully fetched GetSlivers in %s" % NodeManager.DB_FILE)
pickle.dump(slivers, f)
f.close()
#wangyang,save sliver map to db
def savemap (self, slicemap):
f = open(NodeManager.MAP_FILE, "w")
logger.log ("nodemanager: saving successfully fetched slicemap in %s" % NodeManager.MAP_FILE)
pickle.dump(slicemap, f)
f.close()
logger.log_slicemap(slicemap)
#def savevlanid (self, vid):
# f = open(NodeManager.VLANID_FILE, "w")
# logger.log ("nodemanager: saving successfully vlan id in %s" % NodeManager.VLANID_FILE)
# pickle.dump(vid, f)
# f.close()
# logger.log_router(vid,"This is writed to db")
def loadSlivers (self):
try:
f = open(NodeManager.DB_FILE, "r+")
logger.log("nodemanager: restoring latest known GetSlivers from %s" % NodeManager.DB_FILE)
slivers = pickle.load(f)
f.close()
return slivers
except:
logger.log("Could not restore GetSlivers from %s" % NodeManager.DB_FILE)
return {}
#wangyang,load sliver map from db,otherwise return default config
def loadmap (self,slicemap):
try:
f = open(NodeManager.MAP_FILE, "r+")
logger.log("nodemanager: restoring latest known slicemap from %s" % NodeManager.MAP_FILE)
slicemapdb = pickle.load(f)
f.close()
for sliver in slicemapdb['slivers']:
if sliver['slice_id'] > 4:
sliver['status']='delete'
return slicemapdb
except:
logger.log("Could not restore sliver map from %s" % NodeManager.DB_FILE)
slicemapdb = {}
slicemapdb['slivers'] = []
return slicemapdb
#wangyang,load routerid from db,otherwise return default config
'''
def loadvlanid(self):
try:
f = open(NodeManager.VLANID_FILE, "r+")
logger.log("nodemanager: restoring latest known vlanid from %s" % NodeManager.VLANID_FILE)
vids = pickle.load(f)
f.close()
return vids
except:
logger.log("Could not restore vip from %s" % NodeManager.VLANID_FILE)
vids = []
for i in range(2000, 4095):
vid = {}
vid['vlanid'] = str(i)
vid['status'] = 'available'
vids.append(vid)
return vids
'''
def run(self):
# make sure to create /etc/planetlab/virt so others can read that
# used e.g. in vsys-scripts's sliceip
tools.get_node_virt()
try:
if self.options.daemon: tools.daemon()
# set log level
if (self.options.verbose):
logger.set_level(logger.LOG_VERBOSE)
# Load /etc/planetlab/plc_config
config = Config(self.options.config)
try:
other_pid = tools.pid_file()
if other_pid != None:
print """There might be another instance of the node manager running as pid %d.
If this is not the case, please remove the pid file %s. -- exiting""" % (other_pid, tools.PID_FILE)
return
except OSError, err:
print "Warning while writing PID file:", err
# load modules
self.loaded_modules = []
for module in self.modules:
try:
m = __import__(module)
logger.verbose("nodemanager: triggering %s.start"%m.__name__)
m.start()
self.loaded_modules.append(m)
except ImportError, err:
logger.log_exc ("ERROR while loading module %s - skipping:" % module)
# if we fail to load any of these, it's really no need to go on any further
if module in NodeManager.core_modules:
logger.log("FATAL : failed to load core module %s"%module)
except AttributeError, err:
# triggered when module doesn't have a 'start' method
logger.log_exc ("ERROR while starting module %s - skipping:" % module)
# if we fail to load any of these, it's really no need to go on any further
if module in NodeManager.core_modules:
logger.log("FATAL : failed to start core module %s"%module)
# sort on priority (lower first)
def sort_module_priority (m1,m2):
return getattr(m1,'priority',NodeManager.default_priority) - getattr(m2,'priority',NodeManager.default_priority)
self.loaded_modules.sort(sort_module_priority)
logger.log('ordered modules:')
for module in self.loaded_modules:
logger.log ('%s: %s'%(getattr(module,'priority',NodeManager.default_priority),module.__name__))
# Load /etc/planetlab/session
if os.path.exists(self.options.session):
session = file(self.options.session).read().strip()
else:
session = None
# get random periods
iperiod=int(self.options.period)
irandom=int(self.options.random)
# Initialize XML-RPC client
plc = PLCAPI(config.plc_api_uri, config.cacert, session, timeout=iperiod/2)
#check auth
logger.log("nodemanager: Checking Auth.")
while plc.check_authentication() != True:
try:
plc.update_session()
logger.log("nodemanager: Authentication Failure. Retrying")
except Exception,e:
logger.log("nodemanager: Retry Failed. (%r); Waiting.."%e)
time.sleep(iperiod)
logger.log("nodemanager: Authentication Succeeded!")
plc.__getattr__("GetSlices")
while True:
# Main nodemanager Loop
work_beg=time.time()
logger.log('nodemanager: mainloop - calling GetSlivers - period=%d random=%d'%(iperiod,irandom))
self.GetSlivers(config, plc)
delay=iperiod + random.randrange(0,irandom)
work_end=time.time()
work_duration=int(work_end-work_beg)
logger.log('nodemanager: mainloop has worked for %s s - sleeping for %d s'%(work_duration,delay))
time.sleep(delay)
except: logger.log_exc("nodemanager: failed in run")
def run():
logger.log("======================================== Entering nodemanager.py")
NodeManager().run()
if __name__ == '__main__':
run()
else:
# This is for debugging purposes. Open a copy of Python and import nodemanager
tools.as_daemon_thread(run)
| |
#: processes accounting transactions from text file, and prints the balance
#:
#: usage: python p.py [<options>] <file>
#: options:
#: -i : initial balance file
#: -d : date min/max/month/year
#: examples: 2016, 2016-04, 2016-04:2016-05, 2016-04-02:2016-04-08
#:
#: can be called from any folder, and can use wrapper script p.sh
#:
#: file contains accounting transactions with format ...
from __future__ import print_function
import sys
import getopt
from datetime import datetime, timedelta
# TODO : only define in 1 place valid account types (use dict in balance)
# TODO : process more than 1 movs file
# TODO : make mov date optional
# TODO : be less strict with lenghts
class Acct :
@staticmethod
def usage() :
liLine = 0
print( "" )
print( sys.argv[ 0 ] )
lF = open( sys.argv[ 0 ], 'r' )
lbFirstComm = True
lsL = lF.readline()
while lsL and lbFirstComm == True :
liLine += 1
if lsL.startswith( "#:" ) :
print( lsL[ 3 : -1 ] )
lsL = lF.readline()
else :
break
lF.close()
W_ACCT = 12
W_AMOUNT = 12
W_DATE = 10
W_LINE = 2 * W_ACCT + W_AMOUNT + W_DATE
W_BAL_LINE = 24
# valid account name types prefixes
gssTypes = ( 'S_', 'D_', 'C_', 'P_', 'F_', 'X_' )
gsFmtYear = "%Y"
gsFmtMonth = "%Y-%m"
gsFmtDate = "%Y-%m-%d"
gsFmtDateUI = "yyyy-mm-dd"
def __init__( self ) :
self.miMov = 0
self.miMovAcct = 0
self.mDictSaldo = {}
self.mDateIni = None
self.mDateFin = None
@staticmethod
def eprint( sErrorMsg ) :
print( sErrorMsg, file=sys.stderr )
@staticmethod
def valAcct( sVal ) :
liRet = 0 # OK, default
liLen = len( sVal )
if liLen > Acct.W_ACCT :
liRet = 1
Acct.eprint( 'account name %s too long (%d), max is %d' % ( sVal, liLen, Acct.W_ACCT ) )
elif sVal[ : 2 ] not in Acct.gssTypes :
liRet = 1
Acct.eprint( 'account name %s not valid, must start by %s' % ( sVal, Acct.gssTypes ) )
return liRet
@staticmethod
def valAmnt( sVal ) :
liRet = 0 # OK, default
liLen = len( sVal )
if liLen > Acct.W_AMOUNT :
liRet = 1
Acct.eprint( 'amount %s too long (%d), max is %d' % ( sVal, liLen, Acct.W_AMOUNT ) )
try :
lfAmount = float( sVal )
except :
liRet = 1
Acct.eprint( 'amount %s is not a floating point number' % ( sVal ) )
return liRet
@staticmethod
def valDate( sVal ) :
liRet = 0 # OK, default
liLen = len( sVal )
if liLen > Acct.W_DATE :
liRet += 1
Acct.eprint( 'date %s too long (%d), max is %d' % ( sVal, liLen, Acct.W_AMOUNT ) )
# TODO : check if ISO-format YYYY-MM-DD
try :
lDate = datetime.strptime( sVal, Acct.gsFmtDate )
#print( lDate.strftime( Acct.gsFmtDate ) )
except :
Acct.eprint( "%s not a valid date in %s format" % ( sVal, Acct.gsFmtDateUI ) )
liRet += 1
sys.exit( 2 )
return liRet
def parseBalLine( self, sLine0 ) :
liRet = 0 # OK
if sLine0[ 0 ] == "=" : # separator, discard this line
return liRet
# discard comment
sLine = sLine0.split( '#' )[ 0 ]
liLen = len( sLine )
if liLen <= 1 : pass # OK, empty or comment line
elif liLen < Acct.W_BAL_LINE :
print( "line lenght %d not enough, minimum %d" % ( liLen, Acct.W_BAL_LINE ) )
liRet = 1
else : # now a real line
lss = sLine.split( ":" )
lsAcctDeb = lss[ 0 ].strip()
lsAmount = lss[ 1 ].lstrip().strip()
liRet += Acct.valAcct( lsAcctDeb )
liRet += Acct.valAmnt( lsAmount )
if liRet > 0 :
Acct.eprint( "FATAL. rejected balance line, contains %d format/content errors" )
liRet = 1 # meaning 1 line with errors
sys.exit( 1 )
else :
self.accountIni( lsAcctDeb, lsAmount )
return liRet
def parseLine( self, sLine0 ) :
liRet = 0 # OK
# discard comment
sLine = sLine0.split( '#' )[ 0 ]
liLen = len( sLine )
if liLen <= 1 : pass # OK, empty or comment line
elif liLen < Acct.W_LINE :
print( "line lenght %d not enough, minimum %d" % ( liLen, Acct.W_LINE ) )
liRet = 1
else : # now a real line
lss = sLine.split()
lsAcctDeb = lss[ 0 ]
lsAcctCre = lss[ 1 ]
lsAmount = lss[ 2 ]
lsDate = lss[ 3 ]
liRet += Acct.valAcct( lsAcctDeb )
liRet += Acct.valAcct( lsAcctCre )
liRet += Acct.valAmnt( lsAmount )
liRet += Acct.valDate( lsDate )
if liRet > 0 :
Acct.eprint( "FATAL. rejected line, contains %d format/content errors" )
liRet = 1 # meaning 1 line with errors
sys.exit( 1 )
self.account( lsAcctDeb, lsAcctCre, lsAmount, lsDate )
return liRet
def accountIni( self, sAcctDeb, sAmount ) :
print( "INI amount :%s:, on %s" % ( sAmount, sAcctDeb ) )
lfAmount = float( sAmount ) # checked before
try :
lfSaldo = self.mDictSaldo[ sAcctDeb ]
Acct.eprint( "FATAL, duplicate initial for account %s", sAcctDeb )
sys.exit( 1 )
except :
lfSaldo = lfAmount
self.mDictSaldo[ sAcctDeb ] = lfSaldo
print( "%12s : %9.2f " % ( sAcctDeb, lfSaldo ) )
def account( self, sAcctDeb, sAcctCre, sAmount, sDate ) :
self.miMov += 1
print( "validating mov %d on file ..." % ( self.miMov ) )
# sDate is OK, was checked before
lDate = datetime.strptime( sDate, Acct.gsFmtDate )
# <, >= : 2n limit queda fora
if not Acct.gTupDateRange == None :
if lDate < Acct.gTupDateRange[ 0 ] or lDate >= Acct.gTupDateRange[ 1 ] :
print( "mov with date %s, outside date range limits" % sDate )
print( "-" )
return
print( "accounting mov %d" % ( self.miMovAcct ) )
if self.mDateIni == None : self.mDateIni = lDate
else :
if lDate < self.mDateIni : self.mDateIni = lDate
if self.mDateFin == None : self.mDateFin = lDate
else :
if lDate > self.mDateFin : self.mDateFin = lDate
print( "amount %s, on %s(D) - %s(H)" % ( sAmount, sAcctDeb, sAcctCre ) )
lfAmount = float( sAmount ) # checked before
try :
lfSaldo = self.mDictSaldo[ sAcctDeb ]
except :
lfSaldo = 0.0
lfSaldo2 = lfSaldo + lfAmount
self.mDictSaldo[ sAcctDeb ] = lfSaldo2
print( "%12s : %9.2f => %9.2f" % ( sAcctDeb, lfSaldo, lfSaldo2 ) )
# TODO : display mov method?
try :
lfSaldo = self.mDictSaldo[ sAcctCre ]
except :
lfSaldo = 0.0
lfSaldo2 = lfSaldo - lfAmount
self.mDictSaldo[ sAcctCre ] = lfSaldo2
print( "%12s : %9.2f => %9.2f" % ( sAcctCre, lfSaldo, lfSaldo2 ) )
self.miMovAcct += 1
print( "--" )
# TODO rename to balance()
def balance( self, byHolder = 1 ) :
if not byHolder == 0 : lDict = {}
lfGent = .0
lfGone = .0
lfCash = .0
lfPend = .0
lfStok = .0
lfExtl = .0
lsSepTitle = "========="
lsSepSaldo = "========================"
print( lsSepTitle[ : 4 ] ) # trick piped sed
print( lsSepTitle )
print( "BALANCES:" )
print( lsSepTitle )
print( "total read file transactions = %d" % self.miMov )
print( "total accounted transactions = %d" % self.miMovAcct )
if Acct.gTupDateRange == None :
print( "NO date limit for transactions set" )
else :
lsDate1 = Acct.gTupDateRange[ 0 ].strftime( Acct.gsFmtDate )
lsDate2 = (Acct.gTupDateRange[ 1 ] - timedelta( days = 1 )).strftime( Acct.gsFmtDate )
print( "date limit for transactions : %s - %s" % ( lsDate1, lsDate2 ) )
if self.miMovAcct > 0 :
lsDateIni = self.mDateIni.strftime( Acct.gsFmtDate )
lsDateFin = self.mDateFin.strftime( Acct.gsFmtDate )
print( "found transactions from %s to %s" % ( lsDateIni, lsDateFin ) )
else :
print( "NO accounted transactions" )
lListKeys = self.mDictSaldo.keys()
#print lListKeys
lListKeys2 = sorted( lListKeys )
#print lListKeys2
print( lsSepSaldo )
for lsAcct in lListKeys2 :
# no need to try:
lfSaldo = self.mDictSaldo[ lsAcct ]
print( "%-12s : %9.2f" % ( lsAcct, lfSaldo ) )
# TODO : use Acct.W_ACCT for formatting
lsAcctType = lsAcct[ 0 : 2 ]
# TODO : use a dictionary for this
if lsAcctType == "C_" : # caixa
lfCash += lfSaldo
elif lsAcctType == "D_" : # despesa
lfGone += lfSaldo
elif lsAcctType == "P_" : # people
lfGent += lfSaldo
elif lsAcctType == "F_" : # factures
lfPend += lfSaldo
elif lsAcctType == "S_" : # capital
lfStok += lfSaldo
elif lsAcctType == "X_" : # eXternal
lfExtl += lfSaldo
else :
Acct.eprint( "unknown acct type '%s'" % lsAcctType )
sys.exit( 1 )
if not byHolder == 0 :
lsAcctHolder = lsAcct[ 2 : ]
try :
lDict[ lsAcctHolder ] += lfSaldo
except :
lDict[ lsAcctHolder ] = lfSaldo
print( lsSepSaldo )
print( "%-12s : %9.2f" % ( "total stock", lfStok ) )
print( "%-12s : %9.2f" % ( "total people", lfGent ) )
print( "%-12s : %9.2f" % ( "total xtrnal", lfExtl ) )
print( "%-12s : %9.2f" % ( "total desp", lfGone ) )
print( "%-12s : %9.2f" % ( "total credit", lfPend ) )
print( "%-12s : %9.2f" % ( "total cash", lfCash ) )
if not byHolder == 0 :
print( lsSepSaldo )
for sHolder in lDict.keys() :
print( "%-12s : %9.2f" % ( sHolder, lDict[ sHolder ] ) )
def readMovs( self, sFile = None ) :
if sFile == None :
self.mFile = sys.stdin
print( "reading from stdin" )
else :
print( "reading from file %s" % sFile )
try :
self.mFile = open( sFile )
except :
print( "FATAL, could not open file " + sFile )
sys.exit( 1 )
liErrors = 0
print( "----------" )
for lsLine in self.mFile :
liErrors += self.parseLine( lsLine )
print( "file processed, lines with errors: %d" % liErrors )
if not sFile == None :
self.mFile.close()
def readBalance( self, sFile ) :
print( "----" )
print( "reading balance file " + sFile )
try :
lFile = open( sFile, "r" )
except :
lFile = open( sFile, "r" )
print( "FATAL, could not open balance file " + sFile )
sys.exit( 1 )
liErrors = 0
for lsLine in lFile :
liErrors += self.parseBalLine( lsLine )
print( "balance file processed, lines with errors: %d" % liErrors )
lFile.close()
@staticmethod
def parseFlexDate( sDate ) :
lDate = None
# check in this order!
for lsFmt in ( Acct.gsFmtDate, Acct.gsFmtMonth, Acct.gsFmtYear ) :
try :
lDate = datetime.strptime( sDate, lsFmt )
print( "%s : is a %s date! => %s" % ( sDate, lsFmt, str( lDate ) ) )
break
except :
print( "%s : not a %s date" % ( sDate, lsFmt ) )
continue
return lDate
@staticmethod
def parseDates( sDates ) :
lDates = None # return value, default
lList = []
lss = sDates.split( ':' )
print( lss )
liS = len( lss )
if liS == 1 : # 1 date (year/month)
lDate = Acct.parseFlexDate( lss[ 0 ] )
# TODO : add 1 day/month/year to 2nd date depending on len
lList.append( lDate )
lList.append( lDate )
elif liS == 2 : # 2 dates (year/month)
if len( lss[ 0 ] ) == len( lss[ 1 ] ) :
for lsDate in lss :
lDate = Acct.parseFlexDate( lsDate )
if not lDate == None :
lList.append( lDate )
else :
Acct.eprint( "FATAL: date range is not formed by 2 equal-length/format" )
if len( lList ) == 2 :
lDate0 = lList[ 0 ]
liLen = len( lss[ 0 ] )
if liLen == 10 :
lTimeDelta = timedelta( days = 1 )
lDate2 = lDate + lTimeDelta
elif liLen == 7 :
liMonth = lDate.month + 1
if liMonth > 12 :
liMonth -= 12
liYear = lDate.year + 1
lDate1 = lDate.replace( month = liMonth )
lDate2 = lDate1.replace( year = lDate.year )
elif liLen == 4 :
lDate2 = lDate.replace( year = lDate.year + 1 )
if lDate0 >= lDate2 :
Acct.eprint( "FATAL: date range start is later than end" )
else :
lDates = tuple( ( lDate0, lDate2 ) )
return lDates
@staticmethod
def checkOptions( pListParams ) :
print( "checkOptions, args:", pListParams )
try:
lOptList, lList = getopt.getopt( pListParams, 'd:i:' )
except getopt.GetoptError:
Acct.eprint( "FATAL : error analyzing command line options" )
Acct.eprint( "" )
Acct.usage()
sys.exit( 1 )
# TODO : use shift / setenv --
Acct.gsFileInitialBalance = None
#print( lOptList )
#print( lList )
lDateRange = None
for lOpt in lOptList :
#print( 'lOpt :' + str( lOpt )
if lOpt[0] == '-d':
lsVal = lOpt[1]
lDateRange = Acct.parseDates( lsVal )
if lDateRange == None :
Acct.eprint( 'FATAL: Invalid date/date range' )
Acct.usage()
sys.exit( 1 )
print( "date range: %s - %s" % ( lDateRange[ 0 ], lDateRange[ 1 ] ) )
if lOpt[0] == '-i':
lsVal = lOpt[1]
Acct.gsFileInitialBalance = lsVal
print( "initial balance file : %s" % Acct.gsFileInitialBalance )
if lOpt[0] == '-M':
lsVal = lOpt[1]
try :
liVal = int( lsVal )
self.miMaxDY = liVal
except :
Acct.eprint( 'FATAL: NON-numerical value for Max DY (Y diff)' )
Acct.usage()
sys.exit( 1 )
# TODO : only-1 exit point
Acct.gsFiles = lList
Acct.gTupDateRange = lDateRange
if __name__ == "__main__" :
Acct.checkOptions( sys.argv[ 1 : ] )
lAcct = Acct()
if not Acct.gsFileInitialBalance == None :
lAcct.readBalance( Acct.gsFileInitialBalance )
if len( Acct.gsFiles ) == 0 :
# use stdin
lAcct.readMovs()
else :
for lsFile in Acct.gsFiles :
lAcct.readMovs( lsFile )
lAcct.balance()
| |
from __future__ import division, print_function, absolute_import
import string
import warnings
from scipy import stats
from scipy.stats import norm, multivariate_normal
from scipy.linalg import toeplitz, pinv
from sklearn import cluster
from sklearn.utils import check_random_state
import autograd.numpy as np
from autograd import grad, value_and_grad
from scipy.optimize import minimize
from scipy.special import gamma
import statsmodels.api as smapi
from statsmodels.tsa.tsatools import lagmat
from .ar import ARTHMM
__all__ = ['STUDENT']
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
decoder_algorithms = frozenset(("viterbi", "map"))
class STUDENT(ARTHMM):
"""Hidden Markov Model with tied states and autoregressive observations
drawn from a student t distribution
Parameters
----------
n_unique : int
Number of unique components.
n_tied : int
Number of tied states for each component.
n_features : int
Number of features.
algorithm : string
Decoding algorithm.
params : string
Controls which parameters are updated in the training
process. Defaults to all parameters.
init_params : string
Controls which parameters are initialized prior to
training. Defaults to all parameters.
startprob_init : array, shape (``n_unique``)
Initial state occupation distribution.
startprob_prior : array, shape (``n_unique``)
Pseudo-observations (counts).
transmat_init : array, shape (``n_unique``, ``n_unique``)
Matrix of transition probabilities between states.
transmat_prior : array, shape (``n_unique``, ``n_unique``)
Pseudo-observations (counts).
mu_init : array, shape (``n_unique``, ``n_features``)
Initial mean parameters for each state.
mu_weight : int
Weight of mu prior, shared across components.
mu_prior : array, shape (``n_unique``, ``n_features``)
Prior on mu.
precision_init : array, shape (``n_unique``, ``n_features``, ``n_features``)
Initial precision (inverse variance) parameters for each state.
This is the final precision, will NOT be multiplied by scale factor
precision_weight : int
Weight of precision (inverse variance) prior.
precision_prior : array, shape (``n_unique``, ``n_features``, ``n_features``)
Prior on precision (inverse variance).
tol : float
Convergence threshold, below which EM will stop.
n_iter : int
Number of iterations to perform maximally.
n_iter_min : int
Number of iterations to perform minimally.
n_iter_update : int
Number of iterations per M-Step.
random_state : int
Sets seed.
verbose : bool
When ``True`` convergence reports are printed.
n_lags : int
Number of lags (order of AR).
shared_alpha : bool
If set to true, alpha is shared across states.
alpha_init : array, shape (``n_components``, ``n_lags``)
Initial alpha parameter per state.
mu_bounds : array, shape (``2``)
Upper and lower bound for mu [lower bound, upper bound].
precision_bounds : array, shape (``2``)
Upper and lower bound for precision [lower bound, upper bound].
alpha_bounds : array, shape(``2``)
Upper and lower bound for alpha [lower bound, upper bound].
degree_freedom : int
Degrees of freedom
Attributes
----------
n_components : int
Number of total components
mu_ : array, shape (``n_unique``, ``n_features``)
precision_ : array, shape (``n_unique``, ``n_features``, ``n_features``)
transmat_ : array, shape (``n_unique``, ``n_unique``)
startprob_ : array, shape (``n_unique``, ``n_unique``)
n_lags : int
n_inputs : int
alpha_ : array, shape (``n_components``, ``n_lags``)
degree_freedom : int, degrees of freedom
"""
def __init__(self, n_unique=2, n_lags=1, n_tied=0, n_features=1,
startprob_init=None, transmat_init=None, startprob_prior=1.0,
transmat_prior=None, algorithm="viterbi", random_state=None,
n_iter=25, n_iter_min=2, tol=1e-4,
params=string.ascii_letters,
init_params=string.ascii_letters, alpha_init=None,
mu_init=None, precision_init=None,
precision_prior=None, precision_weight=0.0, mu_prior=None,
mu_weight=0.0, shared_alpha=True,
n_iter_update=1, verbose=False,
mu_bounds=np.array([-1.0e5, 1.0e5]),
precision_bounds=np.array([-1.0e5, 1.0e5]),
alpha_bounds=np.array([-1.0e5, 1.0e5]),
degree_freedom=5):
super(STUDENT, self).__init__(n_unique=n_unique, n_tied=n_tied,
n_lags=n_lags,
n_features=n_features,
algorithm=algorithm,
params=params, init_params=init_params,
startprob_init=startprob_init,
startprob_prior=startprob_prior,
transmat_init=transmat_init,
transmat_prior=transmat_prior,
mu_init=mu_init, mu_weight=mu_weight,
mu_prior=mu_prior,
shared_alpha=shared_alpha,
precision_init=precision_init,
precision_weight=precision_weight,
precision_prior=precision_prior,
tol=tol, n_iter=n_iter,
n_iter_min=n_iter_min,
n_iter_update=n_iter_update,
random_state=random_state,
verbose=verbose, mu_bounds=mu_bounds,
precision_bounds=precision_bounds,
alpha_bounds=alpha_bounds)
if degree_freedom <= 2:
raise ValueError('Degrees of freedom has to be > 2')
else:
self.degree_freedom = degree_freedom
def _ll(self, m, p, a, xn, xln, **kwargs):
"""Computation of log likelihood
Dimensions
----------
m : n_unique x n_features
p : n_unique x n_features x n_features
a : n_unique x n_lags (shared_alpha=F)
OR 1 x n_lags (shared_alpha=T)
xn: N x n_features
xln: N x n_features x n_lags
"""
samples = xn.shape[0]
xn = xn.reshape(samples, 1, self.n_features)
m = m.reshape(1, self.n_unique, self.n_features)
det = np.linalg.det(np.linalg.inv(p))
det = det.reshape(1, self.n_unique)
lagged = np.dot(xln, a.T) # NFU
lagged = np.swapaxes(lagged, 1, 2) # NUF
xm = xn-(lagged + m)
tem = np.einsum('NUF,UFX,NUX->NU', xm, p, xm)
# TODO division in gamma function
res = np.log(gamma((self.degree_freedom + self.n_features)/2)) - \
np.log(gamma(self.degree_freedom/2)) - (self.n_features/2.0) * \
np.log(self.degree_freedom) - \
(self.n_features/2.0) * np.log(np.pi) - 0.5 * np.log(det) - \
((self.degree_freedom + self.n_features) / 2.0) * \
np.log(1 + (1/self.degree_freedom) * tem)
return res
def _init_params(self, data, lengths=None, params='stmpaw'):
super(STUDENT, self)._init_params(data, lengths, params)
if 'p' in params:
self.precision_ = self.precision_ * \
((self.degree_freedom - 2)/self.degree_freedom)
# Adapted from: https://github.com/statsmodels/
# statsmodels/blob/master/statsmodels/sandbox/distributions/multivariate.py
#written by Enzo Michelangeli, style changes by josef-pktd
# Student's T random variable
def multivariate_t_rvs(self, m, S, random_state = None):
'''generate random variables of multivariate t distribution
Parameters
----------
m : array_like
mean of random variable, length determines dimension of random variable
S : array_like
square array of covariance matrix
df : int or float
degrees of freedom
n : int
number of observations, return random array will be (n, len(m))
random_state : int
seed
Returns
-------
rvs : ndarray, (n, len(m))
each row is an independent draw of a multivariate t distributed
random variable
'''
np.random.rand(9)
m = np.asarray(m)
d = self.n_features
df = self.degree_freedom
n = 1
if df == np.inf:
x = 1.
else:
x = random_state.chisquare(df, n)/df
np.random.rand(90)
z = random_state.multivariate_normal(np.zeros(d),S,(n,))
return m + z/np.sqrt(x)[:,None]
# same output format as random.multivariate_normal
def sample(self, n_samples=2000, observed_states=None,
init_samples=None, init_state=None, random_state=None):
"""Generate random samples from the self.
Parameters
----------
n : int
Number of samples to generate.
observed_states : array
If provided, states are not sampled.
random_state: RandomState or an int seed
A random number generator instance. If None is given, the
object's random_state is used
init_state : int
If provided, initial state is not sampled.
init_samples : array, default: None
If provided, initial samples (for AR) are not sampled.
E : array-like, shape (n_samples, n_inputs)
Feature matrix of individual inputs.
Returns
-------
samples : array_like, length (``n_samples``, ``n_features``)
List of samples
states : array_like, shape (``n_samples``)
List of hidden states (accounting for tied states by giving
them the same index)
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
samples = np.zeros((n_samples, self.n_features))
states = np.zeros(n_samples)
order = self.n_lags
if init_state is None:
startprob_pdf = np.exp(np.copy(self._log_startprob))
start_dist = stats.rv_discrete(name='custm',
values=(np.arange(startprob_pdf.shape[0]),
startprob_pdf),
seed=random_state)
start_state = start_dist.rvs(size=1)[0]
else:
start_state = init_state
if self.n_lags > 0:
if init_samples is None:
init_samples = 0.01*np.ones((self.n_lags, self.n_features)) # TODO: better init
if observed_states is None:
transmat_pdf = np.exp(np.copy(self._log_transmat))
transmat_cdf = np.cumsum(transmat_pdf, 1)
states[0] = (transmat_cdf[start_state] >
random_state.rand()).argmax()
transmat_pdf = np.exp(self._log_transmat)
transmat_cdf = np.cumsum(transmat_pdf, 1)
nrand = random_state.rand(n_samples)
for idx in range(1,n_samples):
newstate = (transmat_cdf[states[idx-1]] > nrand[idx-1]).argmax()
states[idx] = newstate
else:
states = observed_states
precision = np.copy(self._precision_)
for idx in range(n_samples):
state_ = int(states[idx])
covar_ = np.linalg.inv(precision[state_])
if self.n_lags == 0:
mean_ = np.copy(self._mu_[state_])
else:
mean_ = np.copy(self._mu_[state_])
for lag in range(1, order+1):
if idx < lag:
prev_ = init_samples[len(init_samples)-lag]
else:
prev_ = samples[idx-lag]
mean_ += np.copy(self._alpha_[state_, lag-1])*prev_
samples[idx] = self.multivariate_t_rvs(mean_, covar_,
random_state)
states = self._process_sequence(states)
return samples, states
| |
"""Copyright 2012 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__ = 'Adam Stelmack'
__version__ = '2.1.8'
__date__ = 'May 17 2010'
import threading
from ctypes import *
from Phidgets.PhidgetLibrary import PhidgetLibrary
from Phidgets.Phidget import Phidget
from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException
from Phidgets.Events.Events import EncoderPositionChangeEventArgs, InputChangeEventArgs
import sys
class Encoder(Phidget):
"""This class represents a Phidget Encoder. All methods to read encoder data from an encoder are implemented in this class.
Phidget Encoder boards generally support 1 or more encoders with 0 or more digital inputs. Both high speed optical and low speed mechanical encoders are supported with this API.
See your device's User Guide for more specific API details, technical information, and revision details.
The User Guide, along with other resources, can be found on the product page for your device.
Extends:
Phidget
"""
def __init__(self):
"""The Constructor Method for the Encoder Class
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened.
"""
Phidget.__init__(self)
self.__inputChange = None
self.__positionChange = None
self.__onInputChange = None
self.__onPositionChange = None
try:
PhidgetLibrary.getDll().CPhidgetEncoder_create(byref(self.handle))
except RuntimeError:
raise
if sys.platform == 'win32':
self.__INPUTCHANGEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int)
self.__POSITIONCHANGEHANDLER = WINFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int, c_int)
elif sys.platform == 'darwin' or sys.platform == 'linux2':
self.__INPUTCHANGEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int)
self.__POSITIONCHANGEHANDLER = CFUNCTYPE(c_int, c_void_p, c_void_p, c_int, c_int, c_int)
def __del__(self):
"""The Destructor Method for the Encoder Class
"""
Phidget.dispose(self)
def getPosition(self, index):
"""Returns the position of an encoder.
This is an absolute position as calcutated since the encoder was plugged in.
This value can be reset to anything using setPosition.
Parameters:
index<int>: index of the encoder.
Returns:
The position of the encoder <int>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
position = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetEncoder_getPosition(self.handle, c_int(index), byref(position))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return position.value
def setPosition(self, index, position):
"""Sets the position of a specific encoder.
This resets the internal position count for an encoder.
This call in no way actually sends information to the device, as an absolute position is maintained only in the library.
After this call, position changes from the encoder will use the new value to calculate absolute position as reported by the change handler.
Parameters:
index<int>: index of the encoder.
position<position>: new position for this encoder.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
try:
result = PhidgetLibrary.getDll().CPhidgetEncoder_setPosition(self.handle, c_int(index), c_int(position))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getIndexPosition(self, index):
"""Gets the index position for an encoder that supports index.
For encoders that support index position, this function will return the index position for the encoder connected at the provided index.
Parameters:
index<int>: index of the encoder.
Returns:
The index position of the encoder <int>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
indexPositon = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetEncoder_getIndexPosition(self.handle, c_int(index), byref(indexPositon))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return indexPositon.value
def getEnabled(self, index):
"""Gets the enabled state of an encoder.
Returns whether power to the encoder is enabled or disabled.
Parameters:
index<int>: index of the encoder.
Returns:
The enabled state of the encoder <boolean>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
enabledState = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetEncoder_getEnabled(self.handle, c_int(index), byref(enabledState))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
if enabledState.value ==1:
return True
else:
return False
def setEnabled(self, index, state):
"""Sets the enabled state of an encoder.
The enabled state controls whether to enable or disable power to the encoder.
Parameters:
index<int>: Index of the motor.
state<boolean>: State to set the enabled state for this encoder to.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
if state == True:
value = 1
else:
value = 0
try:
result = PhidgetLibrary.getDll().CPhidgetEncoder_setEnabled(self.handle, c_int(index), c_int(value))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
def getInputState(self, index):
"""Returns the state of a digital input.
On the mechanical encoder this refers to the pushbutton.
The high speed encoder does not have any digital inputs. A value of true means that the input is active(the button is pushed).
Parameters:
index<int>: index of the input.
Returns:
The state of the input <boolean>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached, or if the index is out of range.
"""
inputState = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetEncoder_getInputState(self.handle, c_int(index), byref(inputState))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
if inputState.value ==1:
return True
else:
return False
def getEncoderCount(self):
"""Returns number of encoders.
All current encoder boards support one encoder.
Returns:
The number of encoders <int>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
encoderCount = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetEncoder_getEncoderCount(self.handle, byref(encoderCount))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return encoderCount.value
def getInputCount(self):
"""Returns number of digital inputs.
On the mechanical encoder this refers to the pushbutton.
The high speed encoder does not have any digital inputs.
Returns:
The number of inputs <int>.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException: If this Phidget is not opened and attached.
"""
inputCount = c_int()
try:
result = PhidgetLibrary.getDll().CPhidgetEncoder_getInputCount(self.handle, byref(inputCount))
except RuntimeError:
raise
if result > 0:
raise PhidgetException(result)
else:
return inputCount.value
def __nativeInputChangeEvent(self, handle, usrptr, index, value):
if self.__inputChange != None:
if value == 1:
state = True
else:
state = False
self.__inputChange(InputChangeEventArgs(self, index, state))
return 0
def setOnInputChangeHandler(self, inputChangeHandler):
"""Sets the input change event handler.
The input change handler is a method that will be called when an input on this Encoder board has changed.
Parameters:
inputChangeHandler: hook to the inputChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if inputChangeHandler == None:
self.__inputChange = None
self.__onInputChange = None
else:
self.__inputChange = inputChangeHandler
self.__onInputChange = self.__INPUTCHANGEHANDLER(self.__nativeInputChangeEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetEncoder_set_OnInputChange_Handler(self.handle, self.__onInputChange, None)
except RuntimeError:
self.__inputChange = None
self.__onInputChange = None
raise
if result > 0:
raise PhidgetException(result)
def __nativePositionChangeEvent(self, handle, usrptr, index, time, position):
if self.__positionChange != None:
self.__positionChange(EncoderPositionChangeEventArgs(self, index, time, position))
return 0
def setOnPositionChangeHandler(self, positionChangeHandler):
"""Sets the position change event handler.
The position change handler is a method that will be called when the position of an encoder changes.
The position change event provides data about how many ticks have occured, and how much time has passed since the last position change event,
but does not contain an absolute position.
This can be obtained from getEncoderPosition.
Parameters:
positionChangeHandler: hook to the positionChangeHandler callback function.
Exceptions:
RuntimeError - If current platform is not supported/phidget c dll cannot be found
PhidgetException
"""
if positionChangeHandler == None:
self.__positionChange = None
self.__onPositionChange = None
else:
self.__positionChange = positionChangeHandler
self.__onPositionChange = self.__POSITIONCHANGEHANDLER(self.__nativePositionChangeEvent)
try:
result = PhidgetLibrary.getDll().CPhidgetEncoder_set_OnPositionChange_Handler(self.handle, self.__onPositionChange, None)
except RuntimeError:
self.__positionChange = None
self.__onPositionChange = None
raise
if result > 0:
raise PhidgetException(result)
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2013, First Party Software
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from datetime import timedelta
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.contrib.sites.models import Site
from django.contrib.sites.managers import CurrentSiteManager
from django.core.urlresolvers import reverse
from servo import defaults
from servo.models.common import Location
class Queue(models.Model):
site = models.ForeignKey(
Site,
editable=False,
default=defaults.site_id
)
title = models.CharField(
max_length=255,
default=_('New Queue'),
verbose_name=_('Title')
)
keywords = models.TextField(
default='',
blank=True,
help_text=_('Orders with devices matching these keywords will be automatically assigned to this queue')
)
locations = models.ManyToManyField(
Location,
verbose_name=_('locations'),
help_text=_("Pick the locations you want this queue to appear in.")
)
description = models.TextField(
blank=True,
verbose_name=_('description')
)
PRIO_LOW = 0
PRIO_NORMAL = 1
PRIO_HIGH = 2
PRIORITIES = (
(PRIO_HIGH, _("High")),
(PRIO_NORMAL, _("Normal")),
(PRIO_LOW, _("Low"))
)
priority = models.IntegerField(
default=PRIO_NORMAL,
choices=PRIORITIES,
verbose_name=_("priority")
)
status_created = models.ForeignKey(
'QueueStatus',
null=True,
blank=True,
related_name='+',
verbose_name=_(u'Order Created'),
help_text=_("Order has ben placed to a queue")
)
status_assigned = models.ForeignKey(
'QueueStatus',
null=True,
blank=True,
related_name='+',
verbose_name=_(u'Order Assigned'),
help_text=_("Order has ben assigned to a user")
)
status_products_ordered = models.ForeignKey(
'QueueStatus',
null=True,
blank=True,
related_name='+',
verbose_name=_("Products Ordered"),
help_text=_("Purchase Order for this Service Order has been submitted")
)
status_products_received = models.ForeignKey(
'QueueStatus',
null=True,
blank=True,
related_name='+',
verbose_name=_("Products Received"),
help_text=_("Products have been received")
)
status_repair_completed = models.ForeignKey(
'QueueStatus',
null=True,
blank=True,
related_name='+',
verbose_name=_("Repair Completed"),
help_text=_("GSX repair completed")
)
status_dispatched = models.ForeignKey(
'QueueStatus',
null=True,
blank=True,
related_name='+',
verbose_name=_("Order Dispatched")
)
status_closed = models.ForeignKey(
'QueueStatus',
null=True,
blank=True,
related_name='+',
verbose_name=_("Order Closed")
)
gsx_soldto = models.CharField(
blank=True,
default='',
max_length=10,
verbose_name=_("Sold-To"),
help_text=_("GSX queries of an order in this queue will be made using this Sold-To")
)
order_template = models.FileField(
null=True,
blank=True,
upload_to="templates",
verbose_name=_("order template"),
help_text=_("HTML template for Service Order/Work Confirmation")
)
quote_template = models.FileField(
null=True,
blank=True,
upload_to="templates",
verbose_name=_("quote template"),
help_text=_("HTML template for cost estimate")
)
receipt_template = models.FileField(
null=True,
blank=True,
upload_to="templates",
verbose_name=_("receipt template"),
help_text=_("HTML template for Sales Order Receipt")
)
dispatch_template = models.FileField(
null=True,
blank=True,
upload_to="templates",
verbose_name=_("dispatch template"),
help_text=_("HTML template for dispatched order")
)
objects = CurrentSiteManager()
def get_admin_url(self):
return reverse('admin-edit_queue', args=[self.pk])
def get_order_count(self, max_state=2):
count = self.order_set.filter(state__lt=max_state).count()
return count if count > 0 else ''
def __unicode__(self):
return self.title
class Meta:
ordering = ['title']
app_label = "servo"
verbose_name = _("Queue")
verbose_name_plural = _("Queues")
unique_together = ('title', 'site',)
class Status(models.Model):
site = models.ForeignKey(
Site,
editable=False,
default=defaults.site_id
)
FACTORS = (
(60, _('Minutes')),
(3600, _('Hours')),
(86400, _('Days')),
(604800, _('Weeks')),
(2419200, _('Months')),
)
title = models.CharField(
max_length=255,
default=_(u'New Status'),
verbose_name=_(u'name')
)
description = models.TextField(
null=True,
blank=True,
verbose_name=_(u'description')
)
limit_green = models.IntegerField(
default=1,
verbose_name=_(u'green limit')
)
limit_yellow = models.IntegerField(
default=15,
verbose_name=_(u'yellow limit')
)
limit_factor = models.IntegerField(
choices=FACTORS,
default=FACTORS[0],
verbose_name=_(u'time unit')
)
queue = models.ManyToManyField(
Queue,
editable=False,
through='QueueStatus'
)
def is_enabled(self, queue):
return self in queue.queuestatus_set.all()
def get_admin_url(self):
return reverse('admin-edit_status', args=[self.pk])
def __unicode__(self):
return self.title
class Meta:
app_label = 'servo'
ordering = ('title',)
verbose_name = _('Status')
verbose_name_plural = _('Statuses')
unique_together = ('title', 'site',)
class QueueStatus(models.Model):
"""
A status bound to a queue.
This allows us to set time limits for each status per indiviudal queue
"""
queue = models.ForeignKey(Queue)
status = models.ForeignKey(Status)
limit_green = models.IntegerField(default=1, verbose_name=_(u'green limit'))
limit_yellow = models.IntegerField(default=15, verbose_name=_(u'yellow limit'))
limit_factor = models.IntegerField(
choices=Status().FACTORS,
verbose_name=_(u'time unit'),
default=Status().FACTORS[0][0]
)
def get_green_limit(self):
"""
Gets the green time limit for this QS
"""
return timezone.now() + timedelta(seconds=self.limit_green*self.limit_factor)
def get_yellow_limit(self):
return timezone.now() + timedelta(seconds=self.limit_yellow*self.limit_factor)
def __unicode__(self):
return self.status.title
class Meta:
app_label = 'servo'
# A status should only be defined once per queue
unique_together = ('queue', 'status',)
| |
#!/usr/bin/env python
# geo2d1.py - Tier 1 code to export geonetwork metadata into a DataONE GMN.
# The International Arctic Research Center (IARC) Data Archive uses the xml
# form of ISO 19115 metadata, ISO 19139. Not all researchers fill out
# metadata that validates, however, so this code xslt transforms the public
# ISO 19139 metadata records from IARC's geonetwork OAI-PMH endpoint into
# dublin core extended (dcx), and inserts the corresponding geonetwork page
# under the 'source' element. It then uploads the validated dcx into IARC's
# DataONE GMN as metadata, while the original ISO 19139 metadata is uploaded
# as data (text/xml). A resource map relating the two completes the package.
# Each time this script runs, it checks to see if a package update is required:
# the current GMN ISO 19139 data object (xml) is compared with the downloaded
# OAI-PMH version, and if different, triggers the package update.
# takes an optional argument, the record number to start with
# (default = 1), useful if the script needs to be restarted
# FORCE_UPDATE can also be set to True from False to force updates, useful in a
# situation where the XSLT transform changes, etc.
# requires python version < 2.7.9 if the GMN has no/invalid site certificate.
# Copyright (C) 2015, University of Alaska Fairbanks
# International Arctic Research Center
# Author: James Long
#-------------------------------------------------------------------------------
# GEO2D1 BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# # Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# # Neither the name of the University of Alaska Fairbanks nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# stdlib
#import logging
import hashlib
import lxml.etree as et
import os
import StringIO
from datetime import datetime
from sys import argv
from time import sleep
from urllib2 import urlopen
# 3rd party
import pyxb
# DataONE
import d1_common.types.generated.dataoneTypes as dataoneTypes
import d1_common.const
import d1_client.data_package
import d1_client.mnclient
import d1_common.types.exceptions
GEO_URL = 'http://climate.iarc.uaf.edu/geonetwork/srv/en/main.home/oaipmh'
GMN_URL = 'https://trusty.iarc.uaf.edu/mn'
FORCE_UPDATE = False
CERTIFICATE_FOR_CREATE = '/home/jlong/d1/keys/jl_cert.pem'
CERTIFICATE_FOR_CREATE_KEY = '/home/jlong/d1/keys/jl_key.pem'
SYSMETA_RIGHTSHOLDER = 'CN=jlong,O=International Arctic Research Center,ST=AK,C=US'
# Object format types. A complete list of valid formatIds
# can be found at https://cn.dataone.org/cn/v1/formats
DATA_FORMAT_ID = 'text/xml'
META_FORMAT_ID = 'http://ns.dataone.org/metadata/schema/onedcx/v1.0'
RMAP_FORMAT_ID = 'http://www.openarchives.org/ore/terms'
def main():
#logging.basicConfig()
#logging.getLogger('').setLevel(logging.DEBUG)
if len(argv) > 1 and not argv[1].isdigit():
print "the argument " + argv[1] + " is not composed of all digits, returning..."
return
if len(argv) > 1 and int(argv[1]) < 1:
print "the argument " + argv[1] + " is less than 1, returning..."
return
# get the list of ISO 19139 identifiers (fileIDs)
print "Downloading list of Identifiers from " + GEO_URL + "..."
try:
fo = urlopen(GEO_URL + "?verb=ListIdentifiers&metadataPrefix=iso19139")
except:
print "URL open failure for " + GEO_URL + ", halting (try running this script again)..."
return
try:
xmlDoc = fo.read()
except:
print "file read failure at " + GEO_URL + ", halting (try running this script again)..."
return
else:
root = et.fromstring(xmlDoc)
fileIDs = [ i.text for i in root.findall("./{http://www.openarchives.org/OAI/2.0/}ListIdentifiers/{http://www.openarchives.org/OAI/2.0/}header/{http://www.openarchives.org/OAI/2.0/}identifier") ]
rt = root.findall("./{http://www.openarchives.org/OAI/2.0/}ListIdentifiers/{http://www.openarchives.org/OAI/2.0/}resumptionToken")
print "downloading..."
if len(rt)==0:
print "Error retrieving ListIdentifiers on " + GEO_URL + " (check the OAI-PMH server), exiting..."
return
while rt[0].text:
sleep(0.2)
try:
fo = urlopen(GEO_URL + "?verb=ListIdentifiers&resumptionToken=" + rt[0].text)
except:
print "URL open failure for " + GEO_URL + ", halting (try running this script again)..."
return
try:
xmlDoc = fo.read()
except:
print "file read failure at " + GEO_URL + ", halting (try running this script again)..."
return
else:
root = et.fromstring(xmlDoc)
fileIDs = fileIDs + [ i.text for i in root.findall("./{http://www.openarchives.org/OAI/2.0/}ListIdentifiers/{http://www.openarchives.org/OAI/2.0/}header/{http://www.openarchives.org/OAI/2.0/}identifier") ]
print "downloading..."
rt = root.findall("./{http://www.openarchives.org/OAI/2.0/}ListIdentifiers/{http://www.openarchives.org/OAI/2.0/}resumptionToken")
# uniq the list
fileIDs = list(set(fileIDs))
if len(argv) > 1 and int(argv[1]) > len(fileIDs):
print "the argument " + argv[1] + " is larger than the number of records, " + str(len(fileIDs)) + ","
print "returning..."
return
print "number of unique Identifiers = ", len(fileIDs)
# xsl doc to xslt transform OAI-PMH ISO 19139 record to dcx
# test this on the command line by saving it in file 'test.xsl', and running
# $ xsltproc test.xsl <xml file to transform>
xslDoc = et.XML('''\
<?xml version="1.0" encoding="UTF-8"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0"
xmlns:gco="http://www.isotc211.org/2005/gco"
xmlns:gmd="http://www.isotc211.org/2005/gmd"
xmlns:gml="http://www.opengis.net/gml">
<xsl:output
indent="yes"
method="xml"
version="1.0"
/>
<xsl:template match="gmd:MD_Metadata">
<xsl:value-of select="concat('', ' ')"/>
<metadata xmlns="http://ns.dataone.org/metadata/schema/onedcx/v1.0"
xmlns:dc="http://purl.org/dc/terms/"
xmlns:dcterms="http://purl.org/dc/terms/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://ns.dataone.org/metadata/schema/onedcx/v1.0 http://ns.dataone.org/metadata/schema/onedcx/v1.0/onedcx_v1.0.xsd">
<simpleDc>
<xsl:for-each select="gmd:fileIdentifier">
<dc:identifier><xsl:value-of select="gco:CharacterString"/></dc:identifier>
<dc:source>http://climate.iarc.uaf.edu/geonetwork/srv/en/main.home?uuid=<xsl:value-of select="gco:CharacterString"/></dc:source>
</xsl:for-each>
<!-- DataIdentification - - - - - - - - - - - - - - - - - - - - - -->
<xsl:for-each select="gmd:identificationInfo/gmd:MD_DataIdentification">
<xsl:for-each select="gmd:citation/gmd:CI_Citation">
<xsl:for-each select="gmd:title/gco:CharacterString">
<dc:title><xsl:value-of select="."/></dc:title>
</xsl:for-each>
<xsl:for-each select="gmd:citedResponsibleParty/gmd:CI_ResponsibleParty[gmd:role/gmd:CI_RoleCode/@codeListValue='originator']/gmd:organisationName/gco:CharacterString">
<dc:creator><xsl:value-of select="."/></dc:creator>
</xsl:for-each>
<xsl:for-each select="gmd:citedResponsibleParty/gmd:CI_ResponsibleParty[gmd:role/gmd:CI_RoleCode/@codeListValue='publisher']/gmd:organisationName/gco:CharacterString">
<dc:publisher><xsl:value-of select="."/></dc:publisher>
</xsl:for-each>
<xsl:for-each select="gmd:citedResponsibleParty/gmd:CI_ResponsibleParty[gmd:role/gmd:CI_RoleCode/@codeListValue='author']/gmd:organisationName/gco:CharacterString">
<dc:contributor><xsl:value-of select="."/></dc:contributor>
</xsl:for-each>
</xsl:for-each>
<!-- subject -->
<xsl:for-each select="gmd:descriptiveKeywords/gmd:MD_Keywords/gmd:keyword/gco:CharacterString">
<dc:subject><xsl:value-of select="."/></dc:subject>
</xsl:for-each>
<!-- language -->
<xsl:for-each select="gmd:language/gco:CharacterString">
<dc:language><xsl:value-of select="."/></dc:language>
</xsl:for-each>
</xsl:for-each>
<!-- Type - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
<xsl:for-each select="gmd:hierarchyLevel/gmd:MD_ScopeCode/@codeListValue">
<dc:type><xsl:value-of select="."/></dc:type>
</xsl:for-each>
<!-- Distribution - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
<xsl:for-each select="gmd:distributionInfo/gmd:MD_Distribution">
<xsl:for-each select="gmd:distributionFormat/gmd:MD_Format/gmd:name/gco:CharacterString">
<dc:format><xsl:value-of select="."/></dc:format>
</xsl:for-each>
</xsl:for-each>
</simpleDc>
<dcTerms>
<dcterms:modified><xsl:value-of select="gmd:dateStamp/gco:DateTime"/></dcterms:modified>
<!-- DataIdentification - - - - - - - - - - - - - - - - - - - - - -->
<xsl:for-each select="gmd:identificationInfo/gmd:MD_DataIdentification">
<xsl:for-each select="gmd:citation/gmd:CI_Citation">
<xsl:for-each select="gmd:date/gmd:CI_Date[gmd:dateType/gmd:CI_DateTypeCode/@codeListValue='creation']/gmd:date/gco:DateTime">
<dcterms:created><xsl:value-of select="."/></dcterms:created>
</xsl:for-each>
<xsl:for-each select="gmd:date/gmd:CI_Date[gmd:dateType/gmd:CI_DateTypeCode/@codeListValue='publication']/gmd:date/gco:DateTime">
<dcterms:created><xsl:value-of select="."/></dcterms:created>
</xsl:for-each>
</xsl:for-each>
<!-- description -->
<xsl:for-each select="gmd:abstract/gco:CharacterString">
<dcterms:abstract><xsl:value-of select="."/></dcterms:abstract>
</xsl:for-each>
<!-- rights -->
<xsl:for-each select="gmd:resourceConstraints/gmd:MD_LegalConstraints">
<xsl:for-each select="*/gmd:MD_RestrictionCode/@codeListValue">
<dcterms:accessRights><xsl:value-of select="."/></dcterms:accessRights>
</xsl:for-each>
<xsl:for-each select="gmd:otherConstraints/gco:CharacterString">
<dcterms:accessRights><xsl:value-of select="."/></dcterms:accessRights>
</xsl:for-each>
</xsl:for-each>
<!-- bounding box -->
<xsl:for-each select="gmd:extent/gmd:EX_Extent/gmd:geographicElement/gmd:EX_GeographicBoundingBox">
<dcterms:spatial xsi:type="dcterms:Box">
<xsl:value-of select="concat('northlimit=', gmd:northBoundLatitude/gco:Decimal, '; ')"/>
<xsl:value-of select="concat('southlimit=', gmd:southBoundLatitude/gco:Decimal, '; ')"/>
<xsl:value-of select="concat('eastlimit=' , gmd:eastBoundLongitude/gco:Decimal, '; ')"/>
<xsl:value-of select="concat('westlimit=' , gmd:westBoundLongitude/gco:Decimal)"/>
</dcterms:spatial>
</xsl:for-each>
<!-- temporal bounds -->
<xsl:for-each select="gmd:extent/gmd:EX_Extent/gmd:temporalElement/gmd:EX_TemporalExtent/gmd:extent/gml:TimePeriod">
<dcterms:temporal>
<xsl:value-of select="concat('Begin ', gml:beginPosition, '; ')"/>
<xsl:value-of select="concat('End ' , gml:endPosition)"/>
</dcterms:temporal>
</xsl:for-each>
</xsl:for-each>
</dcTerms>
<!--
<otherElements>
</otherElements>
-->
</metadata>
</xsl:template>
<xsl:template match="*">
<xsl:apply-templates select="*"/>
</xsl:template>
</xsl:stylesheet>''')
try:
transform = et.XSLT(xslDoc)
except:
print "unable to generate transform, exiting..."
return
print ""
# client to interact with GMN
client = d1_client.mnclient.MemberNodeClient(
GMN_URL,
cert_path=CERTIFICATE_FOR_CREATE,
key_path=CERTIFICATE_FOR_CREATE_KEY)
# get the list of objects on the GMN
try:
objs = client.listObjects(
count=1,
objectFormat=RMAP_FORMAT_ID,
replicaStatus=False)
tot = objs.total
sleep(0.1)
objs = client.listObjects(
count=tot,
objectFormat=RMAP_FORMAT_ID,
replicaStatus=False)
except d1_common.types.exceptions.DataONEException as e:
print "listObjects() failed with exception:"
raise
# generate a list of pid strings
objStrings = [ obj.identifier.value() for obj in objs.objectInfo ]
# for each fileID, get the latest resource map
sleep(0.1)
count = 0
for fileID in fileIDs:
count += 1
# start at a higher number?
if len(argv) > 1 and int(argv[1]) > 0 and count < int(argv[1]):
continue
print "record number: " + str(count)
# get the ISO 19139 record
try:
sleep(0.2)
fo = urlopen(GEO_URL + "?verb=GetRecord&metadataPrefix=iso19139&identifier=" + str(fileID))
except:
print "error opening " + str(fileID)
print "continuing..."
continue
else:
# ingest metadata & xslt transform to dcx, the metadata format used on the GMN
isoDoc = et.parse(fo)
dcxDoc = transform(isoDoc)
dcxString = et.tostring(dcxDoc)
dcxString = '<?xml version="1.0" encoding="UTF-8"?>' + dcxString
#print dcxString
# validate dcxDoc, assumes onedcx_v1.0.xsd and associated xsd
# files are in same directory where this script is run:
# dcmitype.xsd
# dcterms.xsd <--- original, not LoC version
# dc.xsd
# onedcx_v1.0.xsd
# xml.xsd
dcxXsd = et.XMLSchema(et.parse("onedcx_v1.0.xsd"))
if dcxXsd.validate(dcxDoc):
# extract original ISO metadata from OAI-PMH wrapper to upload as data
isoXML = et.tostring(isoDoc.find("//{http://www.isotc211.org/2005/gmd}MD_Metadata"))
isoXML = '<?xml version="1.0" encoding="UTF-8"?>\n' + isoXML.replace("\n ","\n")
#print isoXML
# the package will consist of dcx metadata, with pid
# "dcx_" + fileID + "_" + version
# ISO 19139 metadata stored as data, text/xml, with pid
# "iso19139_" + fileID + "_" + version
# data, and a resource map tying the two together, with pid
# fileID + "_" + version
# the following assumes we keep all versions of the resource map objects
# so that we can walk the chain from the first one to the most recent.
# when SIDs become available in DataONE API v2, we'll use those to get to
# the most recent version, i.e. SID will equal fileID w/o version suffix.
# the whole reason to walk the chain is to get the latest index (idx),
# so that an update can have idx = idx + 1
idx = 0
packageJustCreated = 0
while True:
rmap = fileID + "_" + str(idx)
if rmap in objStrings:
idx += 1
continue
else:
if idx==0: # initial package creation
sleep(0.1)
if not createInitialPackage(dcxString, isoXML, fileID, client):
print "package creation failure for " + fileID + "_" + str(idx)
print "halting; either there is a network problem (try running this script again),"
print "and/or the package already exists (please investigate)..."
return
else:
packageJustCreated = 1
sleep(0.1)
break
else:
idx -= 1
break
# check if update required: get the latest ISO 19139 data object, compare
# with downloaded OAI-PMH version, and update package if different
if not packageJustCreated:
sleep(0.1)
try:
isoDO = client.get("iso19139_" + fileID + "_" + str(idx)).read()
except:
print "ISO metadata retrieval error for iso19139_" + fileID + "_" + str(idx)
print "halting; probably a network problem (try running this script again)."
return
else:
if isoDO != isoXML:
print "changes in " + "iso19139_" + fileID + "_" + str(idx) + " detected,"
print "updating package, new index is " + "_" + str(idx+1)
if not updatePackage(dcxString, isoXML, fileID, idx, client):
print "package update failure for " + fileID + "_" + str(idx)
print "halting; either there is a network problem (try running this script again),"
print "and/or the package already exists (please investigate)..."
return
elif FORCE_UPDATE:
print "update forced for " + "iso19139_" + fileID + "_" + str(idx)
print "new index is " + "_" + str(idx+1)
if not updatePackage(dcxString, isoXML, fileID, idx, client):
print "package update failure for " + fileID + "_" + str(idx)
print "halting; either there is a network problem (try running this script again),"
print "and/or the package already exists (please investigate)..."
return
else:
print "no update required for " + fileID + "_" + str(idx)
sleep(0.1)
else:
print str(fileID) + " did not validate for dcx, skipping..."
sleep(0.1)
print ""
return
## end main()
def createInitialPackage(dcxString, isoXML, fileID, client):
now = datetime.now()
# create metadata object
pids = ["dcx_" + fileID]
print "creating metadata object " + pids[0] + "_0"
sysMeta = create_sys_meta(
pids[0],
META_FORMAT_ID,
0,
len(dcxString),
dataoneTypes.checksum(hashlib.sha1(dcxString).hexdigest()),
now)
pids[0] = pids[0] + "_0"
try:
sleep(0.1)
client.create(pids[0], StringIO.StringIO(dcxString), sysMeta)
except:
print "creation of metadata object " + pids[0] + " failed"
return False
# create data object, the ISO 19139 metadata xml
pids = pids + ["iso19139_" + fileID]
print "creating data object " + pids[-1] + "_0"
sysMeta = create_sys_meta(
pids[-1],
DATA_FORMAT_ID,
0,
len(isoXML),
dataoneTypes.checksum(hashlib.sha1(isoXML).hexdigest()),
now)
pids[-1] = pids[-1] + "_0"
try:
sleep(0.1)
client.create(pids[-1], StringIO.StringIO(isoXML), sysMeta)
except:
print "creation of data object " + pids[-1] + " failed"
print "rolling back..."
try:
sleep(0.1)
client.delete(pids[0])
print "rollback deletion of metadata object " + pids[0] + " succeeded"
except:
try: # again
sleep(0.1)
client.delete(pids[0])
print "rollback deletion of metadata object " + pids[0] + " succeeded"
except:
print "rollback deletion of metadata object " + pids[0] + " failed"
print "manual intervention required to delete object."
return False
# create resource map
pid = fileID + "_0"
print "creating resource map " + pid
rmapGenerator = d1_client.data_package.ResourceMapGenerator()
rmap = rmapGenerator.simple_generate_resource_map(pid, pids[0], pids[1:])
sysMeta = create_sys_meta(
fileID,
RMAP_FORMAT_ID,
0,
len(rmap),
dataoneTypes.checksum(hashlib.sha1(rmap).hexdigest()),
now)
try:
sleep(0.1)
client.create(pid, StringIO.StringIO(rmap), sysMeta)
except:
print "creation of resource map " + pid + " failed"
print "rolling back..."
try:
sleep(0.1)
client.delete(pids[-1])
print "rollback deletion of data object " + pids[-1] + " succeeded"
except:
try: # again
sleep(0.1)
client.delete(pids[-1])
print "rollback deletion of data object " + pids[-1] + " succeeded"
except:
print "rollback deletion of data object " + pids[-1] + " failed"
print "manual intervention required to delete object."
try:
sleep(0.1)
client.delete(pids[0])
print "rollback deletion of metadata object " + pids[0] + " succeeded"
except:
try: # again
sleep(0.1)
client.delete(pids[0])
print "rollback deletion of metadata object " + pids[0] + " succeeded"
except:
print "rollback deletion of metadata object " + pids[0] + " failed"
print "manual intervention required to delete object."
return False
# creation of resource map succeeded
else:
print "package creation for " + pid + " successful."
return True
def create_sys_meta(pid, format_id, idx, size, sha1, when):
sysMeta = dataoneTypes.systemMetadata()
sysMeta.serialVersion = idx
sysMeta.identifier = pid + "_" + str(idx)
sysMeta.formatId = format_id
sysMeta.size = size
sysMeta.rightsHolder = SYSMETA_RIGHTSHOLDER
sysMeta.checksum = sha1
sysMeta.checksum.algorithm = 'SHA-1'
sysMeta.dateUploaded = when
sysMeta.dateSysMetadataModified = when
sysMeta.accessPolicy = generate_public_access_policy()
sysMeta.replicationPolicy = generate_replication_policy()
return sysMeta
def generate_public_access_policy():
accessPolicy = dataoneTypes.accessPolicy()
accessRule = dataoneTypes.AccessRule()
accessRule.subject.append(d1_common.const.SUBJECT_PUBLIC)
permission = dataoneTypes.Permission('read')
accessRule.permission.append(permission)
accessPolicy.append(accessRule)
return accessPolicy
def generate_replication_policy():
replicationPolicy = dataoneTypes.replicationPolicy()
replicationPolicy.replicationAllowed = True
replicationPolicy.numberReplicas = d1_common.const.DEFAULT_NUMBER_OF_REPLICAS
return replicationPolicy
def updatePackage(dcxString, isoXML, fileID, idx, client):
now = datetime.now()
# update metadata object
pids = ["dcx_" + fileID]
print "updating: " + pids[0] + "_" + str(idx)
sysMeta = create_sys_meta(
pids[0],
META_FORMAT_ID,
idx+1,
len(dcxString),
dataoneTypes.checksum(hashlib.sha1(dcxString).hexdigest()),
now)
oldpid = pids[0] + "_" + str(idx)
pids[0] = pids[0] + "_" + str(idx+1)
try:
sleep(0.1)
client.update(oldpid, StringIO.StringIO(dcxString), pids[0], sysMeta)
except d1_common.types.exceptions.DataONEException as e:
print "update of " + oldpid + " failed with exception:"
raise
else:
print "update of " + oldpid + " succeeded"
# update data object, the ISO 19139 metadata xml
pids = pids + ["iso19139_" + fileID]
print "updating: " + pids[-1] + "_" + str(idx)
sysMeta = create_sys_meta(
pids[-1],
DATA_FORMAT_ID,
idx+1,
len(isoXML),
dataoneTypes.checksum(hashlib.sha1(isoXML).hexdigest()),
now)
oldpid = pids[-1] + "_" + str(idx)
pids[-1] = pids[-1] + "_" + str(idx+1)
try:
sleep(0.1)
client.update(oldpid, StringIO.StringIO(isoXML), pids[-1], sysMeta)
except d1_common.types.exceptions.DataONEException as e:
print "manual intervention required due to inconsistent package state:"
print pids[0] + "has obsoleted dcx_" + fileID + str(idx) + ", but"
print "update of " + oldpid + " failed with exception:"
raise
else:
print "update of " + oldpid + " succeeded"
# update resource map
oldpid = fileID + "_" + str(idx)
newpid = fileID + "_" + str(idx+1)
print "updating: " + oldpid
rmapGenerator = d1_client.data_package.ResourceMapGenerator()
rmap = rmapGenerator.simple_generate_resource_map(newpid, pids[0], pids[1:])
sysMeta = create_sys_meta(
fileID,
RMAP_FORMAT_ID,
idx+1,
len(rmap),
dataoneTypes.checksum(hashlib.sha1(rmap).hexdigest()),
now)
try:
sleep(0.1)
client.update(oldpid, StringIO.StringIO(rmap), newpid, sysMeta)
except d1_common.types.exceptions.DataONEException as e:
print "manual intervention required due to inconsistent package state:"
print pids[0] + "has obsoleted dcx_" + fileID + str(idx) + ", and"
print pids[-1] + "has obsoleted iso19139_" + fileID + str(idx) + ", but"
print "update of " + oldpid + " failed with exception:"
raise
else:
print "update of " + oldpid + " succeeded"
print "package update for " + oldpid + " successful."
return True
if __name__ == '__main__':
main()
| |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Load fonts and render text.
This is a fairly-low level interface to text rendering. Obtain a font using
`load`::
from pyglet import font
arial = font.load('Arial', 14, bold=True, italic=False)
pyglet will load any system-installed fonts. You can add additional fonts
(for example, from your program resources) using `add_file` or
`add_directory`.
Obtain a list of `Glyph` objects for a string of text using the `Font`
object::
text = 'Hello, world!'
glyphs = arial.get_glyphs(text)
The most efficient way to render these glyphs is with a `GlyphString`::
glyph_string = GlyphString(text, glyphs)
glyph_string.draw()
There are also a variety of methods in both `Font` and
`GlyphString` to facilitate word-wrapping.
A convenient way to render a string of text is with a `Text`::
text = Text(font, text)
text.draw()
See the `pyglet.font.base` module for documentation on the base classes used
by this package.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import sys
import os
import math
import weakref
import pyglet
from pyglet.gl import *
from pyglet import gl
from pyglet import image
from pyglet import window
class GlyphString(object):
'''An immutable string of glyphs that can be rendered quickly.
This class is ideal for quickly rendering single or multi-line strings
of text that use the same font. To wrap text using a glyph string,
call `get_break_index` to find the optimal breakpoint for each line,
the repeatedly call `draw` for each breakpoint.
:deprecated: Use `pyglet.text.layout` classes.
'''
def __init__(self, text, glyphs, x=0, y=0):
'''Create a glyph string.
The `text` string is used to determine valid breakpoints; all glyphs
must have already been determined using
`pyglet.font.base.Font.get_glyphs`. The string
will be positioned with the baseline of the left-most glyph at the
given coordinates.
:Parameters:
`text` : str or unicode
String to represent.
`glyphs` : list of `pyglet.font.base.Glyph`
Glyphs representing `text`.
`x` : float
X coordinate of the left-side bearing of the left-most glyph.
`y` : float
Y coordinate of the baseline.
'''
# Create an interleaved array in GL_T2F_V3F format and determine
# state changes required.
lst = []
texture = None
self.text = text
self.states = []
self.cumulative_advance = [] # for fast post-string breaking
state_from = 0
state_length = 0
for i, glyph in enumerate(glyphs):
if glyph.owner != texture:
if state_length:
self.states.append((state_from, state_length, texture))
texture = glyph.owner
state_from = i
state_length = 0
state_length += 1
t = glyph.tex_coords
lst += [t[0], t[1], t[2], 1.,
x + glyph.vertices[0], y + glyph.vertices[1], 0., 1.,
t[3], t[4], t[5], 1.,
x + glyph.vertices[2], y + glyph.vertices[1], 0., 1.,
t[6], t[7], t[8], 1.,
x + glyph.vertices[2], y + glyph.vertices[3], 0., 1.,
t[9], t[10], t[11], 1.,
x + glyph.vertices[0], y + glyph.vertices[3], 0., 1.]
x += glyph.advance
self.cumulative_advance.append(x)
self.states.append((state_from, state_length, texture))
self.array = (c_float * len(lst))(*lst)
self.width = x
def get_break_index(self, from_index, width):
'''Find a breakpoint within the text for a given width.
Returns a valid breakpoint after `from_index` so that the text
between `from_index` and the breakpoint fits within `width` pixels.
This method uses precomputed cumulative glyph widths to give quick
answer, and so is much faster than
`pyglet.font.base.Font.get_glyphs_for_width`.
:Parameters:
`from_index` : int
Index of text to begin at, or 0 for the beginning of the
string.
`width` : float
Maximum width to use.
:rtype: int
:return: the index of text which will be used as the breakpoint, or
`from_index` if there is no valid breakpoint.
'''
to_index = from_index
if from_index >= len(self.text):
return from_index
if from_index:
width += self.cumulative_advance[from_index-1]
for i, (c, w) in enumerate(
zip(self.text[from_index:],
self.cumulative_advance[from_index:])):
if c in u'\u0020\u200b':
to_index = i + from_index + 1
if c == '\n':
return i + from_index + 1
if w > width:
return to_index
return to_index
def get_subwidth(self, from_index, to_index):
'''Return the width of a slice of this string.
:Parameters:
`from_index` : int
The start index of the string to measure.
`to_index` : int
The end index (exclusive) of the string to measure.
:rtype: float
'''
if to_index <= from_index:
return 0
width = self.cumulative_advance[to_index-1]
if from_index:
width -= self.cumulative_advance[from_index-1]
return width
def draw(self, from_index=0, to_index=None):
'''Draw a region of the glyph string.
Assumes texture state is enabled. To enable the texture state::
from pyglet.gl import *
glEnable(GL_TEXTURE_2D)
:Parameters:
`from_index` : int
Start index of text to render.
`to_index` : int
End index (exclusive) of text to render.
'''
if from_index >= len(self.text) or \
from_index == to_index or \
not self.text:
return
# XXX Safe to assume all required textures will use same blend state I
# think. (otherwise move this into loop)
self.states[0][2].apply_blend_state()
if from_index:
glPushMatrix()
glTranslatef(-self.cumulative_advance[from_index-1], 0, 0)
if to_index is None:
to_index = len(self.text)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glInterleavedArrays(GL_T4F_V4F, 0, self.array)
for state_from, state_length, texture in self.states:
if state_from + state_length < from_index:
continue
state_from = max(state_from, from_index)
state_length = min(state_length, to_index - state_from)
if state_length <= 0:
break
glBindTexture(GL_TEXTURE_2D, texture.id)
glDrawArrays(GL_QUADS, state_from * 4, state_length * 4)
glPopClientAttrib()
if from_index:
glPopMatrix()
class _TextZGroup(pyglet.graphics.Group):
z = 0
def set_state(self):
glTranslatef(0, 0, self.z)
def unset_state(self):
glTranslatef(0, 0, -self.z)
class Text(object):
'''Simple displayable text.
This is a convenience class for rendering strings of text. It takes
care of caching the vertices so the text can be rendered every frame with
little performance penalty.
Text can be word-wrapped by specifying a `width` to wrap into. If the
width is not specified, it gives the width of the text as laid out.
:Ivariables:
`x` : int
X coordinate of the text
`y` : int
Y coordinate of the text
:deprecated: Use `pyglet.text.Label`.
'''
# Alignment constants
#: Align the left edge of the text to the given X coordinate.
LEFT = 'left'
#: Align the horizontal center of the text to the given X coordinate.
CENTER = 'center'
#: Align the right edge of the text to the given X coordinate.
RIGHT = 'right'
#: Align the bottom of the descender of the final line of text with the
#: given Y coordinate.
BOTTOM = 'bottom'
#: Align the baseline of the first line of text with the given Y
#: coordinate.
BASELINE = 'baseline'
#: Align the top of the ascender of the first line of text with the given
#: Y coordinate.
TOP = 'top'
# None: no multiline
# 'width': multiline, wrapped to width
# 'multiline': multiline, no wrap
_wrap = None
# Internal bookkeeping for wrap only.
_width = None
def __init__(self, font, text='', x=0, y=0, z=0, color=(1,1,1,1),
width=None, halign=LEFT, valign=BASELINE):
'''Create displayable text.
:Parameters:
`font` : `Font`
Font to render the text in.
`text` : str
Initial string to render.
`x` : float
X coordinate of the left edge of the text.
`y` : float
Y coordinate of the baseline of the text. If the text is
word-wrapped, this refers to the first line of text.
`z` : float
Z coordinate of the text plane.
`color` : 4-tuple of float
Color to render the text in. Alpha values can be specified
in the fourth component.
`width` : float
Width to limit the rendering to. Text will be word-wrapped
if necessary.
`halign` : str
Alignment of the text. See `Text.halign` for details.
`valign` : str
Controls positioning of the text based off the y coordinate.
One of BASELINE, BOTTOM, CENTER or TOP. Defaults to BASELINE.
'''
multiline = False
if width is not None:
self._width = width
self._wrap = 'width'
multiline = True
elif '\n' in text:
self._wrap = 'multiline'
multiline = True
self._group = _TextZGroup()
self._document = pyglet.text.decode_text(text)
self._layout = pyglet.text.layout.TextLayout(self._document,
width=width,
multiline=multiline,
wrap_lines=width is not None,
dpi=font.dpi,
group=self._group)
self._layout.begin_update()
if self._wrap == 'multiline':
self._document.set_style(0, len(text), dict(wrap=False))
self.font = font
self.color = color
self._x = x
self.y = y
self.z = z
self.width = width
self.halign = halign
self.valign = valign
self._update_layout_halign()
self._layout.end_update()
def _get_font(self):
return self._font
def _set_font(self, font):
self._font = font
self._layout.begin_update()
self._document.set_style(0, len(self._document.text), {
'font_name': font.name,
'font_size': font.size,
'bold': font.bold,
'italic': font.italic,
})
self._layout._dpi = font.dpi
self._layout.end_update()
font = property(_get_font, _set_font)
def _get_color(self):
color = self._document.get_style('color')
if color is None:
return (1., 1., 1., 1.)
return tuple([c/255. for c in color])
def _set_color(self, color):
color = [int(c * 255) for c in color]
self._document.set_style(0, len(self._document.text), {
'color': color,
})
color = property(_get_color, _set_color)
def _update_layout_halign(self):
if self._layout.multiline:
# TextLayout has a different interpretation of halign that doesn't
# consider the width to be a special factor; here we emulate the
# old behaviour by fudging the layout x value.
if self._layout.anchor_x == 'left':
self._layout.x = self.x
elif self._layout.anchor_x == 'center':
self._layout.x = self.x + self._layout.width - \
self._layout.content_width // 2
elif self._layout.anchor_x == 'right':
self._layout.x = self.x + 2 * self._layout.width - \
self._layout.content_width
else:
self._layout.x = self.x
def _get_x(self):
return self._x
def _set_x(self, x):
self._x = x
self._update_layout_halign()
x = property(_get_x, _set_x)
def _get_y(self):
return self._layout.y
def _set_y(self, y):
self._layout.y = y
y = property(_get_y, _set_y)
def _get_z(self):
return self._group.z
def _set_z(self, z):
self._group.z = z
z = property(_get_z, _set_z)
def _update_wrap(self):
if self._width is not None:
self._wrap = 'width'
elif '\n' in self.text:
self._wrap = 'multiline'
self._layout.begin_update()
if self._wrap == None:
self._layout.multiline = False
elif self._wrap == 'width':
self._layout.width = self._width
self._layout.multiline = True
self._document.set_style(0, len(self.text), dict(wrap=True))
elif self._wrap == 'multiline':
self._layout.multiline = True
self._document.set_style(0, len(self.text), dict(wrap=False))
self._update_layout_halign()
self._layout.end_update()
def _get_width(self):
if self._wrap == 'width':
return self._layout.width
else:
return self._layout.content_width
def _set_width(self, width):
self._width = width
self._layout._wrap_lines_flag = width is not None
self._update_wrap()
width = property(_get_width, _set_width,
doc='''Width of the text.
When set, this enables word-wrapping to the specified width.
Otherwise, the width of the text as it will be rendered can be
determined.
:type: float
''')
def _get_height(self):
return self._layout.content_height
height = property(_get_height,
doc='''Height of the text.
This property is the ascent minus the descent of the font, unless
there is more than one line of word-wrapped text, in which case
the height takes into account the line leading. Read-only.
:type: float
''')
def _get_text(self):
return self._document.text
def _set_text(self, text):
self._document.text = text
self._update_wrap()
text = property(_get_text, _set_text,
doc='''Text to render.
The glyph vertices are only recalculated as needed, so multiple
changes to the text can be performed with no performance penalty.
:type: str
''')
def _get_halign(self):
return self._layout.anchor_x
def _set_halign(self, halign):
self._layout.anchor_x = halign
self._update_layout_halign()
halign = property(_get_halign, _set_halign,
doc='''Horizontal alignment of the text.
The text is positioned relative to `x` and `width` according to this
property, which must be one of the alignment constants `LEFT`,
`CENTER` or `RIGHT`.
:type: str
''')
def _get_valign(self):
return self._layout.anchor_y
def _set_valign(self, valign):
self._layout.anchor_y = valign
valign = property(_get_valign, _set_valign,
doc='''Vertical alignment of the text.
The text is positioned relative to `y` according to this property,
which must be one of the alignment constants `BOTTOM`, `BASELINE`,
`CENTER` or `TOP`.
:type: str
''')
def _get_leading(self):
return self._document.get_style('leading') or 0
def _set_leading(self, leading):
self._document.set_style(0, len(self._document.text), {
'leading': leading,
})
leading = property(_get_leading, _set_leading,
doc='''Vertical space between adjacent lines, in pixels.
:type: int
''')
def _get_line_height(self):
return self._font.ascent - self._font.descent + self.leading
def _set_line_height(self, line_height):
self.leading = line_height - (self._font.ascent - self._font.descent)
line_height = property(_get_line_height, _set_line_height,
doc='''Vertical distance between adjacent baselines, in pixels.
:type: int
''')
def draw(self):
self._layout.draw()
if not getattr(sys, 'is_epydoc', False):
if sys.platform == 'darwin':
if pyglet.options['darwin_cocoa']:
from pyglet.font.quartz import QuartzFont
_font_class = QuartzFont
else:
from pyglet.font.carbon import CarbonFont
_font_class = CarbonFont
elif sys.platform in ('win32', 'cygwin'):
if pyglet.options['font'][0] == 'win32':
from pyglet.font.win32 import Win32Font
_font_class = Win32Font
elif pyglet.options['font'][0] == 'gdiplus':
from pyglet.font.win32 import GDIPlusFont
_font_class = GDIPlusFont
else:
assert False, 'Unknown font driver'
else:
from pyglet.font.freetype import FreeTypeFont
_font_class = FreeTypeFont
def load(name=None, size=None, bold=False, italic=False, dpi=None):
'''Load a font for rendering.
:Parameters:
`name` : str, or list of str
Font family, for example, "Times New Roman". If a list of names
is provided, the first one matching a known font is used. If no
font can be matched to the name(s), a default font is used. In
pyglet 1.1, the name may be omitted.
`size` : float
Size of the font, in points. The returned font may be an exact
match or the closest available. In pyglet 1.1, the size may be
omitted, and defaults to 12pt.
`bold` : bool
If True, a bold variant is returned, if one exists for the given
family and size.
`italic` : bool
If True, an italic variant is returned, if one exists for the given
family and size.
`dpi` : float
The assumed resolution of the display device, for the purposes of
determining the pixel size of the font. Defaults to 96.
:rtype: `Font`
'''
# Arbitrary default size
if size is None:
size = 12
if dpi is None:
dpi = 96
# Find first matching name
if type(name) in (tuple, list):
for n in name:
if _font_class.have_font(n):
name = n
break
else:
name = None
# Locate or create font cache
shared_object_space = gl.current_context.object_space
if not hasattr(shared_object_space, 'pyglet_font_font_cache'):
shared_object_space.pyglet_font_font_cache = \
weakref.WeakValueDictionary()
shared_object_space.pyglet_font_font_hold = []
font_cache = shared_object_space.pyglet_font_font_cache
font_hold = shared_object_space.pyglet_font_font_hold
# Look for font name in font cache
descriptor = (name, size, bold, italic, dpi)
if descriptor in font_cache:
return font_cache[descriptor]
# Not in cache, create from scratch
font = _font_class(name, size, bold=bold, italic=italic, dpi=dpi)
# Save parameters for new-style layout classes to recover
font.name = name
font.size = size
font.bold = bold
font.italic = italic
font.dpi = dpi
# Cache font in weak-ref dictionary to avoid reloading while still in use
font_cache[descriptor] = font
# Hold onto refs of last three loaded fonts to prevent them being
# collected if momentarily dropped.
del font_hold[3:]
font_hold.insert(0, font)
return font
def add_file(font):
'''Add a font to pyglet's search path.
In order to load a font that is not installed on the system, you must
call this method to tell pyglet that it exists. You can supply
either a filename or any file-like object.
The font format is platform-dependent, but is typically a TrueType font
file containing a single font face. Note that to load this file after
adding it you must specify the face name to `load`, not the filename.
:Parameters:
`font` : str or file
Filename or file-like object to load fonts from.
'''
if type(font) in (str, unicode):
font = open(font, 'rb')
if hasattr(font, 'read'):
font = font.read()
_font_class.add_font_data(font)
def add_directory(dir):
'''Add a directory of fonts to pyglet's search path.
This function simply calls `add_file` for each file with a ``.ttf``
extension in the given directory. Subdirectories are not searched.
:Parameters:
`dir` : str
Directory that contains font files.
'''
for file in os.listdir(dir):
if file[-4:].lower() == '.ttf':
add_file(os.path.join(dir, file))
| |
# coding: utf-8
""" General utilities """
__author__ = "Andy Casey <arc@ast.cam.ac.uk>"
__all__ = ["atomic_number", "element", "reflect_about", "latexify"]
import logging
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
import numpy as np
from scipy.special import wofz
logger = logging.getLogger("oracle")
def invert_mask(mask, data=None, limits=(0, 10e5), padding=0):
"""
Invert a mask to return regions that should be considered. If ``data`` are
provided then they will instruct on the wavelenth limits to be considered.
Otherwise, ``limits`` will be used.
:param mask:
The regions to mask.
:type mask:
:class:`numpy.ndarray`
:param data: [optional]
The observed spectra.
:type data:
list of :class:`oracle.specutils.Spectrum1D` objects
:param limits: [optional]
The wavelength limits to consider.
:type limits:
tuple
"""
if data is not None:
limits = np.sort(np.array([[s.disp[0], s.disp[-1]] for s in data]).flatten())
limits = [np.min(limits), np.max(limits)]
points = []
points.extend(limits)
points.extend(mask.flatten())
points = np.sort(points)
# Find the start of the limts
i = points.searchsorted(limits)
inverted_mask = points[i[0] + (i[0] % 2):i[1] + (i[1] % 2)].reshape(-1, 2)
inverted_mask[:, 0] -= padding
inverted_mask[:, 1] += padding
return inverted_mask
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = {int, str, frozenset, type(None)},
sorted=sorted, tuple=tuple, type=type, len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=128, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
logger.debug("Using cache")
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
oldvalue = root[RESULT]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
def rounder(*decimals, **decimal_kwargs):
def decorator(function):
def wrapper(*args, **kwargs):
rounded_args = []
for arg, decimal in zip(args, decimals):
if decimal is None:
if isinstance(arg, (np.core.records.record, )):
rounded_args.append(tuple(arg))
else:
rounded_args.append(arg)
else:
if isinstance(arg, (float, int)):
rounded_args.append(np.round(arg, decimal))
else:
rounded_args.append(tuple(np.round(arg, decimal)))
# Extend with arguments that don't have precision requirements
missing_args = len(args) - len(rounded_args)
if missing_args > 0:
rounded_args.extend(args[-missing_args:])
return function(*rounded_args, **kwargs)
return wrapper
return decorator
# return ("teff", "xi", "logg", "[M/H]")
# teff, logg, xi, [M/H]
def stellar_jacobian(stellar_parameters, *args):
""" Approximate the Jacobian of the stellar parameters and
minimisation parameters, based on calculations from the Sun """
logger.info("Updating approximation of Jacobian matrix for stellar parameter determination")
""""
teff, vt, logg, feh = stellar_parameters[:4]
original_jacobian = np.array([
[ 5.4393e-08*teff - 4.8623e-04, -7.2560e-02*vt + 1.2853e-01, 1.6258e-02*logg - 8.2654e-02, 1.0897e-02*feh - 2.3837e-02],
[ 4.2613e-08*teff - 4.2039e-04, -4.3985e-01*vt + 8.0592e-02, -5.7948e-02*logg - 1.2402e-01, -1.1533e-01*feh - 9.2341e-02],
[-3.2710e-07*teff + 2.8178e-03, 3.8185e-02*vt - 1.6601e-01, -1.2006e-01*logg - 3.5816e-02, -2.8592e-04*feh + 1.4257e-02],
[-1.7822e-07*teff + 1.8250e-03, 3.5564e-01*vt - 1.1024e-00, -1.2114e-01*logg + 4.1779e-01, -1.8847e-01*feh - 1.0949e-00]
])
"""
teff, logg, feh, vt = stellar_parameters[:4]
# This is total black magic. Like, wizard style.
return np.array([
[+5.4393e-08*teff - 4.8623e-04, +1.6258e-02*logg - 8.2654e-02, +1.0897e-02*feh - 2.3837e-02, -7.2560e-02*vt + 1.2853e-01],
[+4.2613e-08*teff - 4.2039e-04, -5.7948e-02*logg - 1.2402e-01, -1.1533e-01*feh - 9.2341e-02, -4.3985e-01*vt + 8.0592e-02],
[-3.2710e-07*teff + 2.8178e-03, -1.2006e-01*logg - 3.5816e-02, -2.8592e-04*feh + 1.4257e-02, +3.8185e-02*vt - 1.6601e-01],
[-1.7822e-07*teff + 1.8250e-03, -1.2114e-01*logg + 4.1779e-01, -1.8847e-01*feh - 1.0949e-00, +3.5564e-01*vt - 1.1024e-00]
]).T
def reflect_about(a, limits):
"""
Similar to :func:`numpy.clip`, except it just reflects about some limiting axes.
:param a:
The array of values to reflect.
:type a:
:class:`numpy.array`
:param limits:
The upper and lower limits to reflect about. Use ``None`` for no limit.
:type limits:
A two length tuple or list-type.
:returns:
The reflected array.
:rtype:
:class:`numpy.array`
"""
lower, upper = limits
if lower is not None:
a[a < lower] = lower + (lower - a[a < lower])
if upper is not None:
a[a > upper] = upper - (a[a > upper] - upper)
return a
def latexify(labels, default_latex_labels=None):
"""
Return a LaTeX-ified label.
Args:
labels (str or list-type of str objects): The label(s) to latexify.
default_latex_labels (dict): Dictionary of common labels to use.
Returns:
LaTeX-ified label.
"""
common_labels = {
"teff": "$T_{\\rm eff}$ (K)",
"feh": "[Fe/H]",
"logg": "$\log{g}$",
"alpha": "[$\\alpha$/Fe]",
"xi": "$\\xi$ (km s$^{-1}$)"
}
if default_latex_labels is not None:
common_labels.update(default_latex_labels)
listify = True
if isinstance(labels, str):
listify = False
labels = [labels]
latex_labels = []
for label in labels:
if label.startswith("doppler_sigma_"):
color = ["blue", "green", "red", "ir"][int(label.split("_")[-1])]
latex_labels.append("$\sigma_{\\rm doppler," + color + "}$ ($\\AA{}$)")
else:
latex_labels.append(common_labels.get(label, label))
if not listify:
return latex_labels[0]
return latex_labels
def atomic_number(element):
"""
Return the atomic number of a given element.
:param element:
The short-hand notation for the element (e.g., Fe).
:type element:
str
:returns:
The atomic number for a given element.
:rtype:
int
"""
if not isinstance(element, (unicode, str)):
raise TypeError("element must be represented by a string-type")
periodic_table = """H He
Li Be B C N O F Ne
Na Mg Al Si P S Cl Ar
K Ca Sc Ti V Cr Mn Fe Co Ni Cu Zn Ga Ge As Se Br Kr
Rb Sr Y Zr Nb Mo Tc Ru Rh Pd Ag Cd In Sn Sb Te I Xe
Cs Ba Lu Hf Ta W Re Os Ir Pt Au Hg Tl Pb Bi Po At Rn
Fr Ra Lr Rf Db Sg Bh Hs Mt Ds Rg Cn UUt"""
lanthanoids = "La Ce Pr Nd Pm Sm Eu Gd Tb Dy Ho Er Tm Yb"
actinoids = "Ac Th Pa U Np Pu Am Cm Bk Cf Es Fm Md No"
periodic_table = periodic_table.replace(" Ba ", " Ba " + lanthanoids + " ") \
.replace(" Ra ", " Ra " + actinoids + " ").split()
del actinoids, lanthanoids
if element not in periodic_table:
return ValueError("element '{0}' is not known".format(element))
return periodic_table.index(element) + 1
def element(atomic_number):
"""
Return the element of a given atomic number.
:param atomic_number:
The atomic number for the element in question (e.g., 26).
:type atomic_number:
int-like
:returns:
The short-hand element for a given atomic number.
:rtype:
str
"""
atomic_number = int(atomic_number)
periodic_table = """H He
Li Be B C N O F Ne
Na Mg Al Si P S Cl Ar
K Ca Sc Ti V Cr Mn Fe Co Ni Cu Zn Ga Ge As Se Br Kr
Rb Sr Y Zr Nb Mo Tc Ru Rh Pd Ag Cd In Sn Sb Te I Xe
Cs Ba Lu Hf Ta W Re Os Ir Pt Au Hg Tl Pb Bi Po At Rn
Fr Ra Lr Rf Db Sg Bh Hs Mt Ds Rg Cn UUt"""
lanthanoids = "La Ce Pr Nd Pm Sm Eu Gd Tb Dy Ho Er Tm Yb"
actinoids = "Ac Th Pa U Np Pu Am Cm Bk Cf Es Fm Md No"
periodic_table = periodic_table.replace(" Ba ", " Ba " + lanthanoids + " ") \
.replace(" Ra ", " Ra " + actinoids + " ").split()
del actinoids, lanthanoids
return periodic_table[atomic_number - 1]
| |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
from sleekxmpp.xmlstream import JID
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.plugins.base import BasePlugin
from sleekxmpp.plugins.xep_0060 import stanza
log = logging.getLogger(__name__)
class XEP_0060(BasePlugin):
"""
XEP-0060 Publish Subscribe
"""
name = 'xep_0060'
description = 'XEP-0060: Publish-Subscribe'
dependencies = set(['xep_0030', 'xep_0004', 'xep_0082', 'xep_0131'])
stanza = stanza
def plugin_init(self):
self.node_event_map = {}
self.xmpp.register_handler(
Callback('Pubsub Event: Items',
StanzaPath('message/pubsub_event/items'),
self._handle_event_items))
self.xmpp.register_handler(
Callback('Pubsub Event: Purge',
StanzaPath('message/pubsub_event/purge'),
self._handle_event_purge))
self.xmpp.register_handler(
Callback('Pubsub Event: Delete',
StanzaPath('message/pubsub_event/delete'),
self._handle_event_delete))
self.xmpp.register_handler(
Callback('Pubsub Event: Configuration',
StanzaPath('message/pubsub_event/configuration'),
self._handle_event_configuration))
self.xmpp.register_handler(
Callback('Pubsub Event: Subscription',
StanzaPath('message/pubsub_event/subscription'),
self._handle_event_subscription))
self.xmpp['xep_0131'].supported_headers.add('SubID')
def plugin_end(self):
self.xmpp.remove_handler('Pubsub Event: Items')
self.xmpp.remove_handler('Pubsub Event: Purge')
self.xmpp.remove_handler('Pubsub Event: Delete')
self.xmpp.remove_handler('Pubsub Event: Configuration')
self.xmpp.remove_handler('Pubsub Event: Subscription')
def _handle_event_items(self, msg):
"""Raise events for publish and retraction notifications."""
node = msg['pubsub_event']['items']['node']
multi = len(msg['pubsub_event']['items']) > 1
values = {}
if multi:
values = msg.values
del values['pubsub_event']
for item in msg['pubsub_event']['items']:
event_name = self.node_event_map.get(node, None)
event_type = 'publish'
if item.name == 'retract':
event_type = 'retract'
if multi:
condensed = self.xmpp.Message()
condensed.values = values
condensed['pubsub_event']['items']['node'] = node
condensed['pubsub_event']['items'].append(item)
self.xmpp.event('pubsub_%s' % event_type, msg)
if event_name:
self.xmpp.event('%s_%s' % (event_name, event_type),
condensed)
else:
self.xmpp.event('pubsub_%s' % event_type, msg)
if event_name:
self.xmpp.event('%s_%s' % (event_name, event_type), msg)
def _handle_event_purge(self, msg):
"""Raise events for node purge notifications."""
node = msg['pubsub_event']['purge']['node']
event_name = self.node_event_map.get(node, None)
self.xmpp.event('pubsub_purge', msg)
if event_name:
self.xmpp.event('%s_purge' % event_name, msg)
def _handle_event_delete(self, msg):
"""Raise events for node deletion notifications."""
node = msg['pubsub_event']['delete']['node']
event_name = self.node_event_map.get(node, None)
self.xmpp.event('pubsub_delete', msg)
if event_name:
self.xmpp.event('%s_delete' % event_name, msg)
def _handle_event_configuration(self, msg):
"""Raise events for node configuration notifications."""
node = msg['pubsub_event']['configuration']['node']
event_name = self.node_event_map.get(node, None)
self.xmpp.event('pubsub_config', msg)
if event_name:
self.xmpp.event('%s_config' % event_name, msg)
def _handle_event_subscription(self, msg):
"""Raise events for node subscription notifications."""
node = msg['pubsub_event']['subscription']['node']
event_name = self.node_event_map.get(node, None)
self.xmpp.event('pubsub_subscription', msg)
if event_name:
self.xmpp.event('%s_subscription' % event_name, msg)
def map_node_event(self, node, event_name):
"""
Map node names to events.
When a pubsub event is received for the given node,
raise the provided event.
For example::
map_node_event('http://jabber.org/protocol/tune',
'user_tune')
will produce the events 'user_tune_publish' and 'user_tune_retract'
when the respective notifications are received from the node
'http://jabber.org/protocol/tune', among other events.
Arguments:
node -- The node name to map to an event.
event_name -- The name of the event to raise when a
notification from the given node is received.
"""
self.node_event_map[node] = event_name
def create_node(self, jid, node, config=None, ntype=None, ifrom=None,
block=True, callback=None, timeout=None):
"""
Create and configure a new pubsub node.
A server MAY use a different name for the node than the one provided,
so be sure to check the result stanza for a server assigned name.
If no configuration form is provided, the node will be created using
the server's default configuration. To get the default configuration
use get_node_config().
Arguments:
jid -- The JID of the pubsub service.
node -- Optional name of the node to create. If no name is
provided, the server MAY generate a node ID for you.
The server can also assign a different name than the
one you provide; check the result stanza to see if
the server assigned a name.
config -- Optional XEP-0004 data form of configuration settings.
ntype -- The type of node to create. Servers typically default
to using 'leaf' if no type is provided.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub']['create']['node'] = node
if config is not None:
form_type = 'http://jabber.org/protocol/pubsub#node_config'
if 'FORM_TYPE' in config['fields']:
config.field['FORM_TYPE']['value'] = form_type
else:
config.add_field(var='FORM_TYPE',
ftype='hidden',
value=form_type)
if ntype:
if 'pubsub#node_type' in config['fields']:
config.field['pubsub#node_type']['value'] = ntype
else:
config.add_field(var='pubsub#node_type', value=ntype)
iq['pubsub']['configure'].append(config)
return iq.send(block=block, callback=callback, timeout=timeout)
def subscribe(self, jid, node, bare=True, subscribee=None, options=None,
ifrom=None, block=True, callback=None, timeout=None):
"""
Subscribe to updates from a pubsub node.
The rules for determining the JID that is subscribing to the node are:
1. If subscribee is given, use that as provided.
2. If ifrom was given, use the bare or full version based on bare.
3. Otherwise, use self.xmpp.boundjid based on bare.
Arguments:
jid -- The pubsub service JID.
node -- The node to subscribe to.
bare -- Indicates if the subscribee is a bare or full JID.
Defaults to True for a bare JID.
subscribee -- The JID that is subscribing to the node.
options --
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a
response before exiting the send call if blocking
is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub']['subscribe']['node'] = node
if subscribee is None:
if ifrom:
if bare:
subscribee = JID(ifrom).bare
else:
subscribee = ifrom
else:
if bare:
subscribee = self.xmpp.boundjid.bare
else:
subscribee = self.xmpp.boundjid
iq['pubsub']['subscribe']['jid'] = subscribee
if options is not None:
iq['pubsub']['options'].append(options)
return iq.send(block=block, callback=callback, timeout=timeout)
def unsubscribe(self, jid, node, subid=None, bare=True, subscribee=None,
ifrom=None, block=True, callback=None, timeout=None):
"""
Unubscribe from updates from a pubsub node.
The rules for determining the JID that is unsubscribing
from the node are:
1. If subscribee is given, use that as provided.
2. If ifrom was given, use the bare or full version based on bare.
3. Otherwise, use self.xmpp.boundjid based on bare.
Arguments:
jid -- The pubsub service JID.
node -- The node to subscribe to.
subid -- The specific subscription, if multiple subscriptions
exist for this JID/node combination.
bare -- Indicates if the subscribee is a bare or full JID.
Defaults to True for a bare JID.
subscribee -- The JID that is subscribing to the node.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a
response before exiting the send call if blocking
is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub']['unsubscribe']['node'] = node
if subscribee is None:
if ifrom:
if bare:
subscribee = JID(ifrom).bare
else:
subscribee = ifrom
else:
if bare:
subscribee = self.xmpp.boundjid.bare
else:
subscribee = self.xmpp.boundjid
iq['pubsub']['unsubscribe']['jid'] = subscribee
iq['pubsub']['unsubscribe']['subid'] = subid
return iq.send(block=block, callback=callback, timeout=timeout)
def get_subscriptions(self, jid, node=None, ifrom=None, block=True,
callback=None, timeout=None):
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
iq['pubsub']['subscriptions']['node'] = node
return iq.send(block=block, callback=callback, timeout=timeout)
def get_affiliations(self, jid, node=None, ifrom=None, block=True,
callback=None, timeout=None):
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
iq['pubsub']['affiliations']['node'] = node
return iq.send(block=block, callback=callback, timeout=timeout)
def get_subscription_options(self, jid, node=None, user_jid=None,
ifrom=None, block=True, callback=None,
timeout=None):
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
if user_jid is None:
iq['pubsub']['default']['node'] = node
else:
iq['pubsub']['options']['node'] = node
iq['pubsub']['options']['jid'] = user_jid
return iq.send(block=block, callback=callback, timeout=timeout)
def set_subscription_options(self, jid, node, user_jid, options,
ifrom=None, block=True, callback=None,
timeout=None):
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
iq['pubsub']['options']['node'] = node
iq['pubsub']['options']['jid'] = user_jid
iq['pubsub']['options'].append(options)
return iq.send(block=block, callback=callback, timeout=timeout)
def get_node_config(self, jid, node=None, ifrom=None, block=True,
callback=None, timeout=None):
"""
Retrieve the configuration for a node, or the pubsub service's
default configuration for new nodes.
Arguments:
jid -- The JID of the pubsub service.
node -- The node to retrieve the configuration for. If None,
the default configuration for new nodes will be
requested. Defaults to None.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
if node is None:
iq['pubsub_owner']['default']
else:
iq['pubsub_owner']['configure']['node'] = node
return iq.send(block=block, callback=callback, timeout=timeout)
def get_node_subscriptions(self, jid, node, ifrom=None, block=True,
callback=None, timeout=None):
"""
Retrieve the subscriptions associated with a given node.
Arguments:
jid -- The JID of the pubsub service.
node -- The node to retrieve subscriptions from.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
iq['pubsub_owner']['subscriptions']['node'] = node
return iq.send(block=block, callback=callback, timeout=timeout)
def get_node_affiliations(self, jid, node, ifrom=None, block=True,
callback=None, timeout=None):
"""
Retrieve the affiliations associated with a given node.
Arguments:
jid -- The JID of the pubsub service.
node -- The node to retrieve affiliations from.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
iq['pubsub_owner']['affiliations']['node'] = node
return iq.send(block=block, callback=callback, timeout=timeout)
def delete_node(self, jid, node, ifrom=None, block=True,
callback=None, timeout=None):
"""
Delete a a pubsub node.
Arguments:
jid -- The JID of the pubsub service.
node -- The node to delete.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub_owner']['delete']['node'] = node
return iq.send(block=block, callback=callback, timeout=timeout)
def set_node_config(self, jid, node, config, ifrom=None, block=True,
callback=None, timeout=None):
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub_owner']['configure']['node'] = node
iq['pubsub_owner']['configure'].append(config)
return iq.send(block=block, callback=callback, timeout=timeout)
def publish(self, jid, node, id=None, payload=None, options=None,
ifrom=None, block=True, callback=None, timeout=None):
"""
Add a new item to a node, or edit an existing item.
For services that support it, you can use the publish command
as an event signal by not including an ID or payload.
When including a payload and you do not provide an ID then
the service will generally create an ID for you.
Publish options may be specified, and how those options
are processed is left to the service, such as treating
the options as preconditions that the node's settings
must match.
Arguments:
jid -- The JID of the pubsub service.
node -- The node to publish the item to.
id -- Optionally specify the ID of the item.
payload -- The item content to publish.
options -- A form of publish options.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub']['publish']['node'] = node
if id is not None:
iq['pubsub']['publish']['item']['id'] = id
if payload is not None:
iq['pubsub']['publish']['item']['payload'] = payload
iq['pubsub']['publish_options'] = options
return iq.send(block=block, callback=callback, timeout=timeout)
def retract(self, jid, node, id, notify=None, ifrom=None, block=True,
callback=None, timeout=None):
"""
Delete a single item from a node.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub']['retract']['node'] = node
iq['pubsub']['retract']['notify'] = notify
iq['pubsub']['retract']['item']['id'] = id
return iq.send(block=block, callback=callback, timeout=timeout)
def purge(self, jid, node, ifrom=None, block=True, callback=None,
timeout=None):
"""
Remove all items from a node.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub_owner']['purge']['node'] = node
return iq.send(block=block, callback=callback, timeout=timeout)
def get_nodes(self, *args, **kwargs):
"""
Discover the nodes provided by a Pubsub service, using disco.
"""
return self.xmpp['xep_0030'].get_items(*args, **kwargs)
def get_item(self, jid, node, item_id, ifrom=None, block=True,
callback=None, timeout=None):
"""
Retrieve the content of an individual item.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
item = stanza.Item()
item['id'] = item_id
iq['pubsub']['items']['node'] = node
iq['pubsub']['items'].append(item)
return iq.send(block=block, callback=callback, timeout=timeout)
def get_items(self, jid, node, item_ids=None, max_items=None,
iterator=False, ifrom=None, block=False,
callback=None, timeout=None):
"""
Request the contents of a node's items.
The desired items can be specified, or a query for the last
few published items can be used.
Pubsub services may use result set management for nodes with
many items, so an iterator can be returned if needed.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
iq['pubsub']['items']['node'] = node
iq['pubsub']['items']['max_items'] = max_items
if item_ids is not None:
for item_id in item_ids:
item = stanza.Item()
item['id'] = item_id
iq['pubsub']['items'].append(item)
if iterator:
return self.xmpp['xep_0059'].iterate(iq, 'pubsub')
else:
return iq.send(block=block, callback=callback, timeout=timeout)
def get_item_ids(self, jid, node, ifrom=None, block=True,
callback=None, timeout=None, iterator=False):
"""
Retrieve the ItemIDs hosted by a given node, using disco.
"""
return self.xmpp['xep_0030'].get_items(jid, node,
ifrom=ifrom,
block=block,
callback=callback,
timeout=timeout,
iterator=iterator)
def modify_affiliations(self, jid, node, affiliations=None, ifrom=None,
block=True, callback=None, timeout=None):
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub_owner']['affiliations']['node'] = node
if affiliations is None:
affiliations = []
for jid, affiliation in affiliations:
aff = stanza.OwnerAffiliation()
aff['jid'] = jid
aff['affiliation'] = affiliation
iq['pubsub_owner']['affiliations'].append(aff)
return iq.send(block=block, callback=callback, timeout=timeout)
def modify_subscriptions(self, jid, node, subscriptions=None, ifrom=None,
block=True, callback=None, timeout=None):
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub_owner']['subscriptions']['node'] = node
if subscriptions is None:
subscriptions = []
for jid, subscription in subscriptions:
sub = stanza.OwnerSubscription()
sub['jid'] = jid
sub['subscription'] = subscription
iq['pubsub_owner']['subscriptions'].append(sub)
return iq.send(block=block, callback=callback, timeout=timeout)
| |
# -*- coding: utf-8 -*-
"""
Django settings for {{cookiecutter.project_name}} project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # ({{ cookiecutter.project_slug }}/config/settings/common.py - 3 = {{ cookiecutter.project_slug }}/)
APPS_DIR = ROOT_DIR.path('{{ cookiecutter.project_slug }}')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
# custom users app
'{{ cookiecutter.project_slug }}.users.apps.UsersConfig',
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': '{{ cookiecutter.project_slug }}.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""{{cookiecutter.author_name}}""", '{{cookiecutter.email}}'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres://{% if cookiecutter.windows == 'y' %}localhost{% endif %}/{{cookiecutter.project_slug}}'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = '{{ cookiecutter.timezone }}'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = '{{cookiecutter.project_slug}}.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = '{{cookiecutter.project_slug}}.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
{% if cookiecutter.use_celery == 'y' %}
########## CELERY
INSTALLED_APPS += ('{{cookiecutter.project_slug}}.taskapp.celery.CeleryConfig',)
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ('kombu.transport.django',)
BROKER_URL = env('CELERY_BROKER_URL', default='django://')
if BROKER_URL == 'django://':
CELERY_RESULT_BACKEND = 'redis://'
else:
CELERY_RESULT_BACKEND = BROKER_URL
########## END CELERY
{% endif %}
{%- if cookiecutter.use_compressor == 'y'-%}
# django-compressor
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("compressor", )
STATICFILES_FINDERS += ("compressor.finders.CompressorFinder", )
{%- endif %}
# Location of root django.contrib.admin URL, use {% raw %}{% url 'admin:index' %}{% endraw %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from webnotes.utils.minify import JavascriptMinify
"""
Build the `public` folders and setup languages
"""
import os, sys, webnotes
from cssmin import cssmin
def bundle(no_compress, cms_make=True):
"""concat / minify js files"""
# build js files
webnotes.validate_versions()
check_public()
check_lang()
bundle = Bundle()
bundle.no_compress = no_compress
bundle.make()
if cms_make:
try:
from startup.event_handlers import on_build
on_build()
except ImportError, e:
pass
clear_pyc_files()
def watch(no_compress):
"""watch and rebuild if necessary"""
import time
bundle = Bundle()
bundle.no_compress = no_compress
while True:
if bundle.dirty():
bundle.make()
time.sleep(3)
def check_public():
from webnotes.install_lib.setup_public_folder import make
make()
def check_lang():
from webnotes.translate import update_translations
update_translations()
def clear_pyc_files():
from webnotes.utils import get_base_path
for path, folders, files in os.walk(get_base_path()):
if 'locale' in folders: folders.remove('locale')
for f in files:
if f.decode("utf-8").endswith(".pyc"):
os.remove(os.path.join(path, f))
class Bundle:
"""
Concatenate, compress and mix (if required) js+css files from build.json
"""
no_compress = False
timestamps = {}
path = '.'
def concat(self, filelist, outfile=None):
"""
Concat css and js files into a bundle
"""
from cStringIO import StringIO
out_type = outfile and outfile.split('.')[-1] or 'js'
outtxt = ''
for f in filelist:
suffix = None
if ':' in f:
f, suffix = f.split(':')
if not os.path.exists(f) or os.path.isdir(f):
continue
self.timestamps[f] = os.path.getmtime(f)
# get datas
try:
with open(f, 'r') as infile:
# get file type
ftype = f.split('.')[-1]
data = unicode(infile.read(), 'utf-8', errors='ignore')
outtxt += ('\n/*\n *\t%s\n */' % f)
# append
if suffix=='concat' or out_type != 'js' or self.no_compress or ('.min.' in f):
outtxt += '\n' + data + '\n'
else:
jsm = JavascriptMinify()
tmpin = StringIO(data.encode('utf-8'))
tmpout = StringIO()
jsm.minify(tmpin, tmpout)
tmpmin = unicode(tmpout.getvalue() or '', 'utf-8')
tmpmin.strip('\n')
outtxt += tmpmin
except Exception, e:
print "--Error in:" + f + "--"
print webnotes.getTraceback()
if not self.no_compress and out_type == 'css':
outtxt = cssmin(outtxt)
with open(outfile, 'w') as f:
f.write(outtxt.encode("utf-8"))
print "Wrote %s - %sk" % (outfile, str(int(os.path.getsize(outfile)/1024)))
def dirty(self):
"""check if build files are dirty"""
self.make_build_data()
for builddict in self.bdata:
for f in self.get_infiles(builddict):
if ':' in f:
f, suffix = f.split(':')
if not os.path.exists(f) or os.path.isdir(f):
continue
if os.path.getmtime(f) != self.timestamps.get(f):
print f + ' dirty'
return True
else:
return False
def make(self):
"""Build (stitch + compress) the file defined in build.json"""
print "Building js and css files..."
self.make_build_data()
for builddict in self.bdata:
outfile = builddict.keys()[0]
infiles = self.get_infiles(builddict)
self.concat(infiles, os.path.relpath(os.path.join(self.path, outfile), os.curdir))
self.reset_app_html()
def reset_app_html(self):
import webnotes
if os.path.exists("public/app.html"):
os.remove("public/app.html")
splash = ""
if os.path.exists("public/app/images/splash.svg"):
with open("public/app/images/splash.svg") as splash_file:
splash = splash_file.read()
with open('lib/public/html/app.html', 'r') as app_html:
data = app_html.read()
data = data % {
"_version_number": webnotes.generate_hash(),
"splash": splash
}
with open('public/app.html', 'w') as new_app_html:
new_app_html.write(data)
def get_infiles(self, builddict):
"""make list of files to merge"""
outfile = builddict.keys()[0]
infiles = builddict[outfile]
# add app js and css to the list
if outfile in self.appfiles:
for f in self.appfiles[outfile]:
if f not in infiles:
infiles.append(f)
fl = []
for f in infiles:
## load files from directory
if f.endswith('/'):
# add init js first
fl += [os.path.relpath(os.path.join(f, 'init.js'), os.curdir)]
# files other than init.js and beginning with "_"
fl += [os.path.relpath(os.path.join(f, tmp), os.curdir) \
for tmp in os.listdir(f) if (tmp != 'init.js' and not tmp.startswith('_'))]
else:
fl.append(os.path.relpath(os.path.join(self.path, f), os.curdir))
return fl
def make_build_data(self):
"""merge build.json and lib/build.json"""
# framework js and css files
with open('lib/public/build.json', 'r') as bfile:
bdata = eval(bfile.read())
# app js and css files
if os.path.exists('app/public/build.json'):
with open('app/public/build.json', 'r') as bfile:
appfiles = eval(bfile.read())
else:
appfiles = {}
# add additional app files in bdata
buildfile_list = [builddict.keys()[0] for builddict in bdata]
for f in appfiles:
if f not in buildfile_list:
bdata.append({f: appfiles[f]})
self.appfiles = appfiles
self.bdata = bdata
| |
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import os
import re
import pep8
import six
"""
Guidelines for writing new hacking checks
- Use only for Nova specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to nova/tests/unit/test_hacking.py
"""
UNDERSCORE_IMPORT_FILES = []
session_check = re.compile(r"\w*def [a-zA-Z0-9].*[(].*session.*[)]")
cfg_re = re.compile(r".*\scfg\.")
# Excludes oslo.config OptGroup objects
cfg_opt_re = re.compile(r".*[\s\[]cfg\.[a-zA-Z]*Opt\(")
rule_default_re = re.compile(r".*RuleDefault\(")
policy_enforce_re = re.compile(r".*_ENFORCER\.enforce\(")
virt_file_re = re.compile(r"\./nova/(?:tests/)?virt/(\w+)/")
virt_import_re = re.compile(
r"^\s*(?:import|from) nova\.(?:tests\.)?virt\.(\w+)")
virt_config_re = re.compile(
r"CONF\.import_opt\('.*?', 'nova\.virt\.(\w+)('|.)")
asse_trueinst_re = re.compile(
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
"(\w|\.|\'|\"|\[|\])+\)\)")
asse_equal_type_re = re.compile(
r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
"(\w|\.|\'|\"|\[|\])+\)")
asse_equal_in_end_with_true_or_false_re = re.compile(r"assertEqual\("
r"(\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)")
asse_equal_in_start_with_true_or_false_re = re.compile(r"assertEqual\("
r"(True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)")
# NOTE(snikitin): Next two regexes weren't united to one for more readability.
# asse_true_false_with_in_or_not_in regex checks
# assertTrue/False(A in B) cases where B argument has no spaces
# asse_true_false_with_in_or_not_in_spaces regex checks cases
# where B argument has spaces and starts/ends with [, ', ".
# For example: [1, 2, 3], "some string", 'another string'.
# We have to separate these regexes to escape a false positives
# results. B argument should have spaces only if it starts
# with [, ", '. Otherwise checking of string
# "assertFalse(A in B and C in D)" will be false positives.
# In this case B argument is "B and C in D".
asse_true_false_with_in_or_not_in = re.compile(r"assert(True|False)\("
r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)")
asse_true_false_with_in_or_not_in_spaces = re.compile(r"assert(True|False)"
r"\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+"
r"[\[|'|\"](, .*)?\)")
asse_raises_regexp = re.compile(r"assertRaisesRegexp\(")
conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w")
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|critical|exception)"
"\(\s*_\(\s*('|\")")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _(.)*")
import_translation_for_log_or_exception = re.compile(
r"(.)*(from\snova.i18n\simport)\s_")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
api_version_re = re.compile(r"@.*api_version")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
decorator_re = re.compile(r"@.*")
http_not_implemented_re = re.compile(r"raise .*HTTPNotImplemented\(")
spawn_re = re.compile(
r".*(eventlet|greenthread)\.(?P<spawn_part>spawn(_n)?)\(.*\)")
contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(")
doubled_words_re = re.compile(
r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b")
log_remove_context = re.compile(
r"(.)*LOG\.(.*)\(.*(context=[_a-zA-Z0-9].*)+.*\)")
return_not_followed_by_space = re.compile(r"^\s*return(?:\(|{|\"|'|#).*$")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def import_no_db_in_virt(logical_line, filename):
"""Check for db calls from nova/virt
As of grizzly-2 all the database calls have been removed from
nova/virt, and we want to keep it that way.
N307
"""
if "nova/virt" in filename and not filename.endswith("fake.py"):
if logical_line.startswith("from nova import db"):
yield (0, "N307: nova.db import not allowed in nova/virt/*")
def no_db_session_in_public_api(logical_line, filename):
if "db/api.py" in filename:
if session_check.match(logical_line):
yield (0, "N309: public db api methods may not accept session")
def use_timeutils_utcnow(logical_line, filename):
# tools are OK to use the standard datetime module
if "/tools/" in filename:
return
msg = "N310: timeutils.utcnow() must be used instead of datetime.%s()"
datetime_funcs = ['now', 'utcnow']
for f in datetime_funcs:
pos = logical_line.find('datetime.%s' % f)
if pos != -1:
yield (pos, msg % f)
def _get_virt_name(regex, data):
m = regex.match(data)
if m is None:
return None
driver = m.group(1)
# Ignore things we mis-detect as virt drivers in the regex
if driver in ["test_virt_drivers", "driver", "firewall",
"disk", "api", "imagecache", "cpu", "hardware",
"image"]:
return None
return driver
def import_no_virt_driver_import_deps(physical_line, filename):
"""Check virt drivers' modules aren't imported by other drivers
Modules under each virt driver's directory are
considered private to that virt driver. Other drivers
in Nova must not access those drivers. Any code that
is to be shared should be refactored into a common
module
N311
"""
thisdriver = _get_virt_name(virt_file_re, filename)
thatdriver = _get_virt_name(virt_import_re, physical_line)
if (thatdriver is not None and
thisdriver is not None and
thisdriver != thatdriver):
return (0, "N311: importing code from other virt drivers forbidden")
def import_no_virt_driver_config_deps(physical_line, filename):
"""Check virt drivers' config vars aren't used by other drivers
Modules under each virt driver's directory are
considered private to that virt driver. Other drivers
in Nova must not use their config vars. Any config vars
that are to be shared should be moved into a common module
N312
"""
thisdriver = _get_virt_name(virt_file_re, filename)
thatdriver = _get_virt_name(virt_config_re, physical_line)
if (thatdriver is not None and
thisdriver is not None and
thisdriver != thatdriver):
return (0, "N312: using config vars from other virt drivers forbidden")
def capital_cfg_help(logical_line, tokens):
msg = "N313: capitalize help string"
if cfg_re.match(logical_line):
for t in range(len(tokens)):
if tokens[t][1] == "help":
txt = tokens[t + 2][1]
if len(txt) > 1 and txt[1].islower():
yield(0, msg)
def assert_true_instance(logical_line):
"""Check for assertTrue(isinstance(a, b)) sentences
N316
"""
if asse_trueinst_re.match(logical_line):
yield (0, "N316: assertTrue(isinstance(a, b)) sentences not allowed")
def assert_equal_type(logical_line):
"""Check for assertEqual(type(A), B) sentences
N317
"""
if asse_equal_type_re.match(logical_line):
yield (0, "N317: assertEqual(type(A), B) sentences not allowed")
def check_python3_xrange(logical_line):
if re.search(r"\bxrange\s*\(", logical_line):
yield(0, "N327: Do not use xrange(). 'xrange()' is not compatible "
"with Python 3. Use range() or six.moves.range() instead.")
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
N319
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "N319 Don't translate debug level logs")
def no_import_translation_in_tests(logical_line, filename):
"""Check for 'from nova.i18n import _'
N337
"""
if 'nova/tests/' in filename:
res = import_translation_for_log_or_exception.match(logical_line)
if res:
yield(0, "N337 Don't import translation in tests")
def no_setting_conf_directly_in_tests(logical_line, filename):
"""Check for setting CONF.* attributes directly in tests
The value can leak out of tests affecting how subsequent tests run.
Using self.flags(option=value) is the preferred method to temporarily
set config options in tests.
N320
"""
if 'nova/tests/' in filename:
res = conf_attribute_set_re.match(logical_line)
if res:
yield (0, "N320: Setting CONF.* attributes directly in tests is "
"forbidden. Use self.flags(option=value) instead")
def no_mutable_default_args(logical_line):
msg = "N322: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield(0, "N323: Found use of _() without explicit import of _ !")
def use_jsonutils(logical_line, filename):
# the code below that path is not meant to be executed from neutron
# tree where jsonutils module is present, so don't enforce its usage
# for this subdirectory
if "plugins/xenserver" in filename:
return
# tools are OK to use the standard json module
if "/tools/" in filename:
return
msg = "N324: jsonutils.%(fun)s must be used instead of json.%(fun)s"
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
def check_api_version_decorator(logical_line, previous_logical, blank_before,
filename):
msg = ("N332: the api_version decorator must be the first decorator"
" on a method.")
if blank_before == 0 and re.match(api_version_re, logical_line) \
and re.match(decorator_re, previous_logical):
yield(0, msg)
class CheckForStrUnicodeExc(BaseASTChecker):
"""Checks for the use of str() or unicode() on an exception.
This currently only handles the case where str() or unicode()
is used in the scope of an exception handler. If the exception
is passed into a function, returned from an assertRaises, or
used on an exception created in the same scope, this does not
catch it.
"""
CHECK_DESC = ('N325 str() and unicode() cannot be used on an '
'exception. Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrUnicodeExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
# Python 2 produces ast.TryExcept and ast.TryFinally nodes, but Python 3
# only produces ast.Try nodes.
if six.PY2:
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
else:
def visit_Try(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str', 'unicode']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrUnicodeExc, self).generic_visit(node)
class CheckForTransAdd(BaseASTChecker):
"""Checks for the use of concatenation on a translated string.
Translations should not be concatenated with other strings, but
should instead include the string being added to the translated
string to give the translators the most information.
"""
CHECK_DESC = ('N326 Translated messages cannot be concatenated. '
'String should be included in translated message.')
TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC']
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
if self._check_call_names(node.left, self.TRANS_FUNC):
self.add_error(node.left)
elif self._check_call_names(node.right, self.TRANS_FUNC):
self.add_error(node.right)
super(CheckForTransAdd, self).generic_visit(node)
class _FindVariableReferences(ast.NodeVisitor):
def __init__(self):
super(_FindVariableReferences, self).__init__()
self._references = []
def visit_Name(self, node):
if isinstance(node.ctx, ast.Load):
# This means the value of a variable was loaded. For example a
# variable 'foo' was used like:
# mocked_thing.bar = foo
# foo()
# self.assertRaises(exception, foo)
self._references.append(node.id)
super(_FindVariableReferences, self).generic_visit(node)
class CheckForUncalledTestClosure(BaseASTChecker):
"""Look for closures that are never called in tests.
A recurring pattern when using multiple mocks is to create a closure
decorated with mocks like:
def test_thing(self):
@mock.patch.object(self.compute, 'foo')
@mock.patch.object(self.compute, 'bar')
def _do_test(mock_bar, mock_foo):
# Test things
_do_test()
However it is easy to leave off the _do_test() and have the test pass
because nothing runs. This check looks for methods defined within a test
method and ensures that there is a reference to them. Only methods defined
one level deep are checked. Something like:
def test_thing(self):
class FakeThing:
def foo(self):
would not ensure that foo is referenced.
N349
"""
def __init__(self, tree, filename):
super(CheckForUncalledTestClosure, self).__init__(tree, filename)
self._filename = filename
def visit_FunctionDef(self, node):
# self._filename is 'stdin' in the unit test for this check.
if (not os.path.basename(self._filename).startswith('test_') and
not 'stdin'):
return
closures = []
references = []
# Walk just the direct nodes of the test method
for child_node in ast.iter_child_nodes(node):
if isinstance(child_node, ast.FunctionDef):
closures.append(child_node.name)
# Walk all nodes to find references
find_references = _FindVariableReferences()
find_references.generic_visit(node)
references = find_references._references
missed = set(closures) - set(references)
if missed:
self.add_error(node, 'N349: Test closures not called: %s'
% ','.join(missed))
def assert_true_or_false_with_in(logical_line):
"""Check for assertTrue/False(A in B), assertTrue/False(A not in B),
assertTrue/False(A in B, message) or assertTrue/False(A not in B, message)
sentences.
N334
"""
res = (asse_true_false_with_in_or_not_in.search(logical_line) or
asse_true_false_with_in_or_not_in_spaces.search(logical_line))
if res:
yield (0, "N334: Use assertIn/NotIn(A, B) rather than "
"assertTrue/False(A in/not in B) when checking collection "
"contents.")
def assert_raises_regexp(logical_line):
"""Check for usage of deprecated assertRaisesRegexp
N335
"""
res = asse_raises_regexp.search(logical_line)
if res:
yield (0, "N335: assertRaisesRegex must be used instead "
"of assertRaisesRegexp")
def dict_constructor_with_list_copy(logical_line):
msg = ("N336: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
def assert_equal_in(logical_line):
"""Check for assertEqual(A in B, True), assertEqual(True, A in B),
assertEqual(A in B, False) or assertEqual(False, A in B) sentences
N338
"""
res = (asse_equal_in_start_with_true_or_false_re.search(logical_line) or
asse_equal_in_end_with_true_or_false_re.search(logical_line))
if res:
yield (0, "N338: Use assertIn/NotIn(A, B) rather than "
"assertEqual(A in B, True/False) when checking collection "
"contents.")
def check_http_not_implemented(logical_line, physical_line, filename):
msg = ("N339: HTTPNotImplemented response must be implemented with"
" common raise_feature_not_supported().")
if pep8.noqa(physical_line):
return
if ("nova/api/openstack/compute" not in filename):
return
if re.match(http_not_implemented_re, logical_line):
yield(0, msg)
def check_greenthread_spawns(logical_line, physical_line, filename):
"""Check for use of greenthread.spawn(), greenthread.spawn_n(),
eventlet.spawn(), and eventlet.spawn_n()
N340
"""
msg = ("N340: Use nova.utils.%(spawn)s() rather than "
"greenthread.%(spawn)s() and eventlet.%(spawn)s()")
if "nova/utils.py" in filename or "nova/tests/" in filename:
return
match = re.match(spawn_re, logical_line)
if match:
yield (0, msg % {'spawn': match.group('spawn_part')})
def check_no_contextlib_nested(logical_line, filename):
msg = ("N341: contextlib.nested is deprecated. With Python 2.7 and later "
"the with-statement supports multiple nested objects. See https://"
"docs.python.org/2/library/contextlib.html#contextlib.nested for "
"more information. nova.test.nested() is an alternative as well.")
if contextlib_nested.match(logical_line):
yield(0, msg)
def check_config_option_in_central_place(logical_line, filename):
msg = ("N342: Config options should be in the central location "
"'/nova/conf/*'. Do not declare new config options outside "
"of that folder.")
# That's the correct location
if "nova/conf/" in filename:
return
# (macsz) All config options (with exceptions that are clarified
# in the list below) were moved to the central place. List below is for
# all options that were impossible to move without doing a major impact
# on code. Add full path to a module or folder.
conf_exceptions = [
# CLI opts are allowed to be outside of nova/conf directory
'nova/cmd/manage.py',
'nova/cmd/policy_check.py',
'nova/cmd/status.py',
# config options should not be declared in tests, but there is
# another checker for it (N320)
'nova/tests',
]
if any(f in filename for f in conf_exceptions):
return
if cfg_opt_re.match(logical_line):
yield(0, msg)
def check_policy_registration_in_central_place(logical_line, filename):
msg = ('N350: Policy registration should be in the central location '
'"/nova/policies/*".')
# This is where registration should happen
if "nova/policies/" in filename:
return
# A couple of policy tests register rules
if "nova/tests/unit/test_policy.py" in filename:
return
if rule_default_re.match(logical_line):
yield(0, msg)
def check_policy_enforce(logical_line, filename):
"""Look for uses of nova.policy._ENFORCER.enforce()
Now that policy defaults are registered in code the _ENFORCER.authorize
method should be used. That ensures that only registered policies are used.
Uses of _ENFORCER.enforce could allow unregistered policies to be used, so
this check looks for uses of that method.
N351
"""
msg = ('N351: nova.policy._ENFORCER.enforce() should not be used. '
'Use the authorize() method instead.')
if policy_enforce_re.match(logical_line):
yield(0, msg)
def check_doubled_words(physical_line, filename):
"""Check for the common doubled-word typos
N343
"""
msg = ("N343: Doubled word '%(word)s' typo found")
match = re.search(doubled_words_re, physical_line)
if match:
return (0, msg % {'word': match.group(1)})
def check_python3_no_iteritems(logical_line):
msg = ("N344: Use items() instead of dict.iteritems().")
if re.search(r".*\.iteritems\(\)", logical_line):
yield(0, msg)
def check_python3_no_iterkeys(logical_line):
msg = ("N345: Use six.iterkeys() instead of dict.iterkeys().")
if re.search(r".*\.iterkeys\(\)", logical_line):
yield(0, msg)
def check_python3_no_itervalues(logical_line):
msg = ("N346: Use six.itervalues() instead of dict.itervalues().")
if re.search(r".*\.itervalues\(\)", logical_line):
yield(0, msg)
def no_os_popen(logical_line):
"""Disallow 'os.popen('
Deprecated library function os.popen() Replace it using subprocess
https://bugs.launchpad.net/tempest/+bug/1529836
N348
"""
if 'os.popen(' in logical_line:
yield(0, 'N348 Deprecated library function os.popen(). '
'Replace it using subprocess module. ')
def no_log_warn(logical_line):
"""Disallow 'LOG.warn('
Deprecated LOG.warn(), instead use LOG.warning
https://bugs.launchpad.net/senlin/+bug/1508442
N352
"""
msg = ("N352: LOG.warn is deprecated, please use LOG.warning!")
if "LOG.warn(" in logical_line:
yield (0, msg)
def check_context_log(logical_line, physical_line, filename):
"""check whether context is being passed to the logs
Not correct: LOG.info(_LI("Rebooting instance"), context=context)
Correct: LOG.info(_LI("Rebooting instance"))
https://bugs.launchpad.net/nova/+bug/1500896
N353
"""
if "nova/tests" in filename:
return
if pep8.noqa(physical_line):
return
if log_remove_context.match(logical_line):
yield(0,
"N353: Nova is using oslo.context's RequestContext "
"which means the context object is in scope when "
"doing logging using oslo.log, so no need to pass it as"
"kwarg.")
def no_assert_equal_true_false(logical_line):
"""Enforce use of assertTrue/assertFalse.
Prevent use of assertEqual(A, True|False), assertEqual(True|False, A),
assertNotEqual(A, True|False), and assertNotEqual(True|False, A).
N355
"""
_start_re = re.compile(r'assert(Not)?Equal\((True|False),')
_end_re = re.compile(r'assert(Not)?Equal\(.*,\s+(True|False)\)$')
if _start_re.search(logical_line) or _end_re.search(logical_line):
yield (0, "N355: assertEqual(A, True|False), "
"assertEqual(True|False, A), assertNotEqual(A, True|False), "
"or assertEqual(True|False, A) sentences must not be used. "
"Use assertTrue(A) or assertFalse(A) instead")
def no_assert_true_false_is_not(logical_line):
"""Enforce use of assertIs/assertIsNot.
Prevent use of assertTrue(A is|is not B) and assertFalse(A is|is not B).
N356
"""
_re = re.compile(r'assert(True|False)\(.+\s+is\s+(not\s+)?.+\)$')
if _re.search(logical_line):
yield (0, "N356: assertTrue(A is|is not B) or "
"assertFalse(A is|is not B) sentences must not be used. "
"Use assertIs(A, B) or assertIsNot(A, B) instead")
def check_uuid4(logical_line):
"""Generating UUID
Use oslo_utils.uuidutils or uuidsentinel(in case of test cases) to generate
UUID instead of uuid4().
N357
"""
msg = ("N357: Use oslo_utils.uuidutils or uuidsentinel(in case of test "
"cases) to generate UUID instead of uuid4().")
if "uuid4()." in logical_line:
return
if "uuid4()" in logical_line:
yield (0, msg)
def return_followed_by_space(logical_line):
"""Return should be followed by a space.
Return should be followed by a space to clarify that return is
not a function. Adding a space may force the developer to rethink
if there are unnecessary parentheses in the written code.
Not correct: return(42), return(a, b)
Correct: return 42, return (a, b), return a, b
N358
"""
if return_not_followed_by_space.match(logical_line):
yield (0,
"N357: Return keyword should be followed by a space.")
def factory(register):
register(import_no_db_in_virt)
register(no_db_session_in_public_api)
register(use_timeutils_utcnow)
register(import_no_virt_driver_import_deps)
register(import_no_virt_driver_config_deps)
register(capital_cfg_help)
register(no_import_translation_in_tests)
register(assert_true_instance)
register(assert_equal_type)
register(assert_raises_regexp)
register(no_translate_debug_logs)
register(no_setting_conf_directly_in_tests)
register(no_mutable_default_args)
register(check_explicit_underscore_import)
register(use_jsonutils)
register(check_api_version_decorator)
register(CheckForStrUnicodeExc)
register(CheckForTransAdd)
register(assert_true_or_false_with_in)
register(dict_constructor_with_list_copy)
register(assert_equal_in)
register(check_http_not_implemented)
register(check_no_contextlib_nested)
register(check_greenthread_spawns)
register(check_config_option_in_central_place)
register(check_policy_registration_in_central_place)
register(check_policy_enforce)
register(check_doubled_words)
register(check_python3_no_iteritems)
register(check_python3_no_iterkeys)
register(check_python3_no_itervalues)
register(check_python3_xrange)
register(no_os_popen)
register(no_log_warn)
register(CheckForUncalledTestClosure)
register(check_context_log)
register(no_assert_equal_true_false)
register(no_assert_true_false_is_not)
register(check_uuid4)
register(return_followed_by_space)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import itertools
import numpy as np
import itertools
import mxnet as mx
import mxnet.lr_scheduler as lr_scheduler
from mxnet import gluon
import unittest
import pytest
import math
from mxnet.test_utils import *
from common import with_seed, retry, xfail_when_nonstandard_decimal_separator
@with_seed()
def test_learning_rate():
o1 = mx.optimizer.Optimizer(learning_rate=0.01)
o1.set_learning_rate(0.2)
assert o1.learning_rate == 0.2
lr_s = lr_scheduler.FactorScheduler(step=1)
o2 = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3)
assert o2.learning_rate == 0.3
o2.lr_scheduler.base_lr = 0.4
assert o2.learning_rate == 0.4
lr_s = lr_scheduler.FactorScheduler(step=1, base_lr=1024)
o3 = mx.optimizer.Optimizer(lr_scheduler=lr_s)
assert o3.learning_rate == 1024
@with_seed()
def test_learning_rate_expect_user_warning():
lr_s = lr_scheduler.FactorScheduler(step=1)
o = mx.optimizer.Optimizer(lr_scheduler=lr_s, learning_rate=0.3)
with pytest.raises(UserWarning):
o.set_learning_rate(0.5)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_sgd():
opt1 = mx.optimizer.SGD
opt2 = mx.optimizer.SGD
shapes = [(3, 4, 5), (10, 4), (7,)]
mom_options = [{}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(mom_options, cg_options, rg_options,
wd_options, mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and ('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
if dtype == np.float16:
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, **kwarg),
shapes, dtype, rtol=1e-3, atol=1e-4)
else:
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, **kwarg),
shapes, dtype)
# test operator fallback on cpu
if dtype != np.float16:
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, **kwarg),
[shapes[0][:2], shapes[1]],
dtype, w_stype='csr', g_stype='csr')
class PySparseSGD(mx.optimizer.Optimizer):
"""python reference implemenation of sgd"""
def __init__(self, learning_rate=0.1, momentum=0.0, **kwargs):
super(PySparseSGD, self).__init__(learning_rate=learning_rate, **kwargs)
self.momentum = momentum
def create_state(self, index, weight):
"""Create additional optimizer state: momentum
Parameters
----------
weight : NDArray
The weight data
"""
if self.momentum == 0.0:
return None
else:
return mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)
def step(self, indices, weights, grads, states):
"""Perform an optimization step using gradients and states.
Parameters
----------
indices : list of int
List of unique indices of the parameters into the individual learning rates
and weight decays. Learning rates and weight decay may be set via `set_lr_mult()`
and `set_wd_mult()`, respectively.
weights : list of NDArray
List of parameters to be updated.
grads : list of NDArray
List of gradients of the objective with respect to this parameter.
states : List of any obj
List of state returned by `create_state()`.
"""
for index, weight, grad, state in zip(indices, weights, grads, states):
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
num_rows = weight.shape[0]
if self.momentum == 0.0:
# Update on a per row basis, skip all-zero rows
for row in range(num_rows):
grad_row = grad[row].asnumpy()
all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))
if all_zeros:
continue
grad[row] *= self.rescale_grad
if self.clip_gradient is not None:
grad[row] = mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient)
grad[row] += wd * weight[row]
weight[row] -= lr * grad[row]
else:
mom = state
for row in range(num_rows):
grad_row = grad[row].asnumpy()
all_zeros = mx.test_utils.almost_equal(grad_row, np.zeros_like(grad_row))
if all_zeros:
continue
grad[row] *= self.rescale_grad
if self.clip_gradient is not None:
grad[row] = mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient)
grad[row] += wd * weight[row]
mom[row] *= self.momentum
mom[row] -= lr * grad[row]
weight[row] += mom[row]
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_sparse_sgd():
opt1 = PySparseSGD
opt2 = mx.optimizer.SGD
shapes = [(3, 4, 5), (10, 4), (7,)]
mom_options = [{}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float32]:
for params in itertools.product(mom_options, cg_options, rg_options,
wd_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
compare_optimizer(opt1(**kwarg),
opt2(use_fused_step=True, lazy_update=True, **kwarg), shapes, dtype,
w_stype='row_sparse', g_stype='row_sparse')
compare_optimizer(opt1(**kwarg),
opt2(use_fused_step=True, lazy_update=True, **kwarg), shapes, dtype,
w_stype='default', g_stype='row_sparse')
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_std_sparse_sgd():
opt1 = mx.optimizer.SGD
opt2 = mx.optimizer.SGD
shapes = [(3, 4, 5), (10, 4), (7,)]
mom_options = [{}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float32]:
for params in itertools.product(mom_options, cg_options, rg_options,
wd_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, lazy_update=False, **kwarg), shapes, dtype,
w_stype='row_sparse', g_stype='row_sparse')
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, lazy_update=False, **kwarg), shapes, dtype,
w_stype='default', g_stype='row_sparse')
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_nag():
opt1 = mx.optimizer.NAG
opt2 = mx.optimizer.NAG
shapes = [(3, 4, 5), (10, 4), (7,)]
mom_options = [{}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(mom_options, cg_options, rg_options,
wd_options, mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and ('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, **kwarg),
shapes, dtype, rtol=1e-3, atol=1e-4)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_lars():
opt1 = mx.optimizer.LARS
opt2 = mx.optimizer.LARS
shapes = [(3, 4, 5), (10, 4), (7,)]
eta_options = [{}, {'eta': 0.002}, {'eta': 0.01}]
mom_options = [{'momentum': 0.0}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}]
rg_options = [{}, {'rescale_grad': 0.14}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(eta_options, mom_options, cg_options, rg_options,
wd_options, mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and ('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, **kwarg),
shapes, dtype, rtol=1e-3, atol=1e-3)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_lamb():
opt1 = mx.optimizer.LAMB
opt2 = mx.optimizer.LAMB
shapes = [(3, 4, 5), (10, 4), (7,)]
beta1_options = [{}, {'beta1': 0.5}]
beta2_options = [{}, {'beta2': 0.8}]
cg_options = [{}, {'clip_gradient': 0.4}]
rg_options = [{}, {'rescale_grad': 0.14}]
wd_options = [{}, {'wd': 0.03}]
bc_options = [{'bias_correction': False}, {'bias_correction': True}]
lb_options = [{'lower_bound': None}, {'lower_bound': 1e-3}]
ub_options = [{'upper_bound': None}, {'upper_bound': 10}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(beta1_options, beta2_options, cg_options, rg_options,
wd_options, bc_options, lb_options, ub_options,
mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and ('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, **kwarg),
shapes, dtype, rtol=1e-3, atol=1e-3)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_lans():
opt1 = mx.optimizer.LANS
opt2 = mx.optimizer.LANS
shapes = [(3, 4, 5), (10, 4), (7,)]
beta1_options = [{}, {'beta1': 0.5}]
beta2_options = [{}, {'beta2': 0.8}]
cg_options = [{}, {'clip_gradient': 0.4}]
rg_options = [{}, {'rescale_grad': 0.14}]
wd_options = [{}, {'wd': 0.03}]
lb_options = [{'lower_bound': None}, {'lower_bound': 1e-3}]
ub_options = [{'upper_bound': None}, {'upper_bound': 10}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(beta1_options, beta2_options, cg_options, rg_options,
wd_options, lb_options, ub_options,
mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and ('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, **kwarg),
shapes, dtype, rtol=1e-3, atol=1e-3)
@with_seed()
def test_sgld():
opt1 = mx.optimizer.SGLD
opt2 = mx.optimizer.SGLD
shapes = [(3, 4, 5), (10, 4), (7,)]
ns_options = [1234, 42]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for seed in ns_options:
for dtype in [np.float16, np.float32]:
for params in itertools.product(cg_options, wd_options, mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and ('multi_precision' not in kwarg
or not kwarg['multi_precision'])):
continue
atol = 1e-2 if dtype == np.float16 else 1e-3
rtol = 1e-4 if dtype == np.float16 else 1e-5
compare_optimizer_noise_seeded(opt1(**kwarg),
opt2(**kwarg),
shapes, dtype, seed, atol=atol, rtol=rtol)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_ftml():
opt1 = mx.optimizer.FTML
opt2 = mx.optimizer.FTML
shapes = [(3, 4, 5), (10, 4), (7,)]
beta1_options = [{}, {'beta1': 0.5}, {'beta1': 0.7}]
beta2_options = [{}, {'beta2': 0.8}, {'beta2': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(beta1_options, beta2_options, cg_options,
rg_options, wd_options, mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and ('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, **kwarg),
shapes, dtype, rtol=1e-3, atol=1e-4)
# Sparse ADAM
class PySparseAdam(mx.optimizer.Optimizer):
"""python reference implemenation of sparse adam"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
lazy_update=False, **kwargs):
super(PySparseAdam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.lazy_update = lazy_update
def create_state(self, index, weight):
"""Create additional optimizer state: mean, variance
Parameters
----------
weight : NDArray
The weight data
"""
return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def step(self, indices, weights, grads, states):
"""Perform an optimization step using gradients and states.
Parameters
----------
indices : list of int
List of unique indices of the parameters into the individual learning rates
and weight decays. Learning rates and weight decay may be set via `set_lr_mult()`
and `set_wd_mult()`, respectively.
weights : list of NDArray
List of parameters to be updated.
grads : list of NDArray
List of gradients of the objective with respect to this parameter.
states : List of any obj
List of state returned by `create_state()`.
"""
for index, weight, grad, state in zip(indices, weights, grads, states):
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
mean, variance = state
num_rows = weight.shape[0]
coef1 = 1. - self.beta1 ** t
coef2 = 1. - self.beta2 ** t
lr *= math.sqrt(coef2) / coef1
for row in range(num_rows):
# check row slices of all zeros
all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(),
np.zeros_like(grad[row].asnumpy()))
# skip zeros during lazy update
if all_zeros and self.lazy_update:
continue
grad[row] *= self.rescale_grad
# clip gradients
if self.clip_gradient is not None:
mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])
grad[row] += wd * weight[row]
# update mean
mean[row] *= self.beta1
mean[row] += grad[row] * (1. - self.beta1)
# update variance
variance[row] *= self.beta2
variance[row] += (1 - self.beta2) * mx.nd.square(grad[row], out=grad[row])
# update weight
weight[row] -= lr * mean[row] / (mx.nd.sqrt(variance[row]) + self.epsilon)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_adam():
opt1 = mx.optimizer.Adam
opt2 = mx.optimizer.Adam
shapes = [(3, 4, 5), (10, 4), (7,)]
beta1_options = [{}, {'beta1': 0.5}, {'beta1': 0.7}]
beta2_options = [{}, {'beta2': 0.8}, {'beta2': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(beta1_options, beta2_options, cg_options,
rg_options, wd_options, mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and ('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
# atol 2e-5 needed to pass with seed 1248389097
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, **kwarg), shapes, dtype,
rtol=1e-4, atol=2e-5)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_sparse_adam():
opt1 = PySparseAdam
opt2 = mx.optimizer.Adam
shapes = [(3, 4, 5), (10, 4), (7,)]
beta1_options = [{}, {'beta1': 0.5}]
beta2_options = [{}, {'beta2': 0.8}]
cg_options = [{}, {'clip_gradient': 0.4}]
rg_options = [{}, {'rescale_grad': 0.14}]
wd_options = [{}, {'wd': 0.03}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(beta1_options, beta2_options, cg_options,
rg_options, wd_options, mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and ('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
# atol 2e-5 needed to pass with seed 1248389097
compare_optimizer(opt1(lazy_update=False, **kwarg),
opt2(use_fused_step=True, lazy_update=False, **kwarg), shapes, dtype,
rtol=1e-4, atol=2e-5)
# atol 2e-5 needed to pass with seed 781809840
compare_optimizer(opt1(lazy_update=True, **kwarg),
opt2(use_fused_step=True, lazy_update=True, **kwarg), shapes,
dtype, w_stype='row_sparse', g_stype='row_sparse',
rtol=1e-4, atol=2e-5)
compare_optimizer(opt1(lazy_update=False, **kwarg),
opt2(use_fused_step=True, lazy_update=False, **kwarg), shapes,
dtype, w_stype='row_sparse', g_stype='row_sparse',
rtol=1e-4, atol=2e-5)
compare_optimizer(opt1(lazy_update=True, **kwarg),
opt2(use_fused_step=True, lazy_update=True, **kwarg), shapes,
dtype, w_stype='default', g_stype='row_sparse',
rtol=1e-4, atol=2e-5)
compare_optimizer(opt1(lazy_update=False, **kwarg),
opt2(use_fused_step=True, lazy_update=False, **kwarg), shapes,
dtype, w_stype='default', g_stype='row_sparse',
rtol=1e-4, atol=2e-5)
@xfail_when_nonstandard_decimal_separator
@with_seed()
@pytest.mark.skip(reason="Flaky test https://github.com/apache/incubator-mxnet/issues/18400")
def test_adamax():
opt1 = mx.optimizer.Adamax
opt2 = mx.optimizer.Adamax
shapes = [(3, 4, 5), (10, 4), (7,)]
beta1_options = [{}, {'beta1': 0.5}, {'beta1': 0.7}]
beta2_options = [{}, {'beta2': 0.8}, {'beta2': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(beta1_options, beta2_options, cg_options,
rg_options, wd_options, mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and
('multi_precision' not in kwarg or not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shapes, dtype)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_signum():
opt1 = mx.optimizer.Signum
opt2 = mx.optimizer.Signum
shapes = [(3, 4, 5), (10, 4), (7,)]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
wd_lh_options = [{}, {'wd_lh': 0.015}, {'wd_lh': 0.0}]
mom_options = [{}, {'momentum': 0.9}]
lr_options = [{'learning_rate': 0.05},{'learning_rate': 0.01}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(cg_options, rg_options, wd_options,
wd_lh_options, mom_options, lr_options,
mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and
('multi_precision' not in kwarg or not kwarg['multi_precision'])):
continue
rtol, atol = (1e-3, 1e-4) if dtype is np.float16 else (1e-4, 1e-5)
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, **kwarg), shapes, dtype,
rtol=rtol, atol=atol)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_rms():
opt1 = mx.optimizer.RMSProp
opt2 = mx.optimizer.RMSProp
shapes = [(3, 4, 5), (10, 4), (7,)]
rho_options = [{}, {'rho': 0.5}]
cg_options = [{}, {'clip_gradient': 0.4}]
cw_options = [{}, {'clip_weights': 0.01}]
center_options = [{'centered': False}, {'centered': True}]
rg_options = [{}, {'rescale_grad': 0.14}]
wd_options = [{}, {'wd': 0.03}]
mom_options = [{'momentum': 0.0}, {'momentum': 0.9}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
# Reduce foating point compare tolerance to avoid flaky test failure.
rtol, atol = (1e-1, 1e-1) if dtype is np.float16 else (1e-2, 1e-2)
for params in itertools.product(rho_options, cg_options, cw_options,
center_options, rg_options, wd_options,
mom_options, mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and
('multi_precision' not in kwarg or not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, **kwarg), shapes, dtype,
rtol=rtol, atol=atol)
if default_context() == mx.cpu():
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, **kwarg),
shapes, dtype, g_stype='row_sparse', rtol=rtol, atol=atol)
class PySparseFtrl(mx.optimizer.Optimizer):
"""python reference implemenation of sparse Ftrl optimizer.
Referenced from *Ad Click Prediction: a View from the Trenches*, available at
http://dl.acm.org/citation.cfm?id=2488200.
Parameters
----------
lamda1 : float, optional
L1 regularization coefficient.
learning_rate : float, optional
The initial learning rate.
beta : float, optional
Per-coordinate learning rate correlation parameter.
eta :
.. math::
\\eta_{t,i} = \\frac{learningrate}{\\beta+\\sqrt{\\sum_{s=1}^tg_{s,i}^t}}
"""
def __init__(self, lamda1=0.01, learning_rate=0.1, beta=1, **kwargs):
super(PySparseFtrl, self).__init__(**kwargs)
self.lamda1 = lamda1
self.beta = beta
self.lr = learning_rate
def create_state(self, index, weight):
return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # z
mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # n
def step(self, indices, weights, grads, states):
"""Perform an optimization step using gradients and states.
Parameters
----------
indices : list of int
List of unique indices of the parameters into the individual learning rates
and weight decays. Learning rates and weight decay may be set via `set_lr_mult()`
and `set_wd_mult()`, respectively.
weights : list of NDArray
List of parameters to be updated.
grads : list of NDArray
List of gradients of the objective with respect to this parameter.
states : List of any obj
List of state returned by `create_state()`.
"""
for index, weight, grad, state in zip(indices, weights, grads, states):
self._update_count(index)
wd = self._get_wd(index)
lr = self._get_lr(index)
num_rows = weight.shape[0]
z, n = state
for row in range(num_rows):
all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))
if all_zeros:
continue
grad[row] *= self.rescale_grad
if self.clip_gradient is not None:
mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])
# update z[row], n[row]
sigma = - mx.nd.sqrt(n[row])
n[row] += mx.nd.square(grad[row])
denom = mx.nd.sqrt(n[row])
sigma += denom
sigma /= lr
z[row] += grad[row] - sigma * weight[row]
# update weight
denom += self.beta
denom /= lr
denom += wd
d = mx.nd.sign(z[row]) * mx.nd.maximum(mx.nd.abs(z[row]) - self.lamda1, 0)
weight[row] = - d / denom
@xfail_when_nonstandard_decimal_separator
@with_seed()
@retry(3)
def test_ftrl():
opt1 = mx.optimizer.Ftrl
opt2 = mx.optimizer.Ftrl
shapes = [(3, 4, 5), (10, 4), (7,)]
lamda1_options = [{'lamda1': 0.}, {'lamda1': 0.1}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(lamda1_options, cg_options,
rg_options, wd_options,
mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and
('multi_precision' not in kwarg or not kwarg['multi_precision'])):
continue
rtol, atol = (1e-3, 1e-3) if dtype is np.float16 else (1e-4, 1e-4)
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, **kwarg), shapes, dtype,
rtol=rtol, atol=atol)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_sparse_ftrl():
opt1 = PySparseFtrl
opt2 = mx.optimizer.Ftrl
shapes = [(3, 4, 5), (10, 4), (7,)]
lamda1_options = [{'lamda1': 0.}, {'lamda1': 0.1}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(lamda1_options, cg_options,
rg_options, wd_options,
mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and
('multi_precision' not in kwarg or not kwarg['multi_precision'])):
continue
rtol, atol = (1e-3, 1e-3) if dtype is np.float16 else (1e-4, 1e-4)
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shapes,
dtype, w_stype='row_sparse', g_stype='row_sparse',
rtol=rtol, atol=atol)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_nadam():
opt1 = mx.optimizer.Nadam
opt2 = mx.optimizer.Nadam
shapes = [(3, 4, 5), (10, 4), (7,)]
beta1_options = [{}, {'beta1': 0.5}]
beta2_options = [{}, {'beta2': 0.8}]
schedule_decay_options = [{}, {'schedule_decay': 0.008}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}]
mp_options = [{'multi_precision': False}, {'multi_precision': True}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(beta1_options, beta2_options, cg_options,
schedule_decay_options, rg_options, wd_options,
mp_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if (dtype == np.float16 and
('multi_precision' not in kwarg or not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shapes, dtype)
class PySparseAdaGrad(mx.optimizer.Optimizer):
"""python reference implemenation of sparse Adagrad optimizer.
This class implements the AdaGrad optimizer described in *Adaptive Subgradient
Methods for Online Learning and Stochastic Optimization*, and available at
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.
Parameters
----------
learning_rate : float, default 0.01
The initial learning rate. If None, the optimization will use the
learning rate from ``lr_scheduler``. If not None, it will overwrite
the learning rate in ``lr_scheduler``. If None and ``lr_scheduler``
is also None, then it will be set to 0.01 by default.
epsilon : float, default 1e-6
Small value to avoid division by 0.
"""
def __init__(self, learning_rate=0.01, epsilon=1e-6, **kwargs):
super(PySparseAdaGrad, self).__init__(learning_rate=learning_rate,
**kwargs)
self.epsilon = epsilon
def create_state(self, index, weight):
return mx.nd.zeros(weight.shape, weight.context, stype=weight.stype) # history
def step(self, indices, weights, grads, states):
"""Perform an optimization step using gradients and states.
Parameters
----------
indices : list of int
List of unique indices of the parameters into the individual learning rates
and weight decays. Learning rates and weight decay may be set via `set_lr_mult()`
and `set_wd_mult()`, respectively.
weights : list of NDArray
List of parameters to be updated.
grads : list of NDArray
List of gradients of the objective with respect to this parameter.
states : List of any obj
List of state returned by `create_state()`.
"""
for index, weight, grad, state in zip(indices, weights, grads, states):
self._update_count(index)
wd = self._get_wd(index)
lr = self._get_lr(index)
num_rows = weight.shape[0]
history = state
for row in range(num_rows):
all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))
if all_zeros:
continue
grad[row] *= self.rescale_grad
if self.clip_gradient is not None:
mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])
grad[row] += wd * weight[row]
# update history[row]
history[row] += mx.nd.square(grad[row])
denom = mx.nd.sqrt(history[row])
denom += self.epsilon
# update weight
weight[row] -= lr * grad[row] / denom
@with_seed()
def test_adagrad():
opt1 = mx.optimizer.AdaGrad
opt2 = mx.optimizer.AdaGrad
shapes = [(3, 4, 5), (10, 4), (7,)]
eps_options = [{}, {'epsilon': 1e-8}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.0}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(eps_options, cg_options,
rg_options, wd_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if dtype is np.float16:
kwarg.update({'multi_precision': True})
compare_optimizer(opt1(use_fused_step=False, **kwarg),
opt2(use_fused_step=True, **kwarg), shapes, dtype)
@xfail_when_nonstandard_decimal_separator
@with_seed()
def test_sparse_adagrad():
opt1 = PySparseAdaGrad
opt2 = mx.optimizer.AdaGrad
shapes = [(3, 4, 5), (10, 4), (7,)]
eps_options = [{}, {'epsilon': 1e-8}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.0}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(eps_options, cg_options,
rg_options, wd_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if dtype is np.float16:
kwarg.update({'multi_precision': True})
if kwarg.get('wd', 0.0) == 0.0:
compare_optimizer(opt1(**kwarg), opt2(use_fused_step=True, **kwarg), shapes, dtype,
w_stype='row_sparse', g_stype='row_sparse')
compare_optimizer(opt1(**kwarg), opt2(use_fused_step=True, **kwarg), shapes, dtype,
g_stype='row_sparse')
@with_seed()
def test_adadelta():
opt1 = mx.optimizer.AdaDelta
opt2 = mx.optimizer.AdaDelta
shapes = [(3, 4, 5), (10, 4), (7,)]
rho_options = [{'rho': 0.9}]
eps_options = [{}, {'epsilon': 1e-8}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(rho_options, eps_options, cg_options,
rg_options, wd_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if dtype is np.float16:
kwarg.update({'multi_precision': True})
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shapes, dtype)
@with_seed()
def test_dcasgd():
opt1 = mx.optimizer.DCASGD
opt2 = mx.optimizer.DCASGD
shapes = [(3, 4, 5), (10, 4), (7,)]
lamda_options = [{}, {'lamda': 0.01}, {'lamda': 0.1}]
mom_options = [{}, {'momentum': 0.0}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
agg_options = [{'aggregate_num': 0}, {'aggregate_num': 1},
{'aggregate_num': 4}, {'aggregate_num': np.inf}]
for dtype in [np.float16, np.float32]:
for params in itertools.product(lamda_options, mom_options, cg_options,
rg_options, wd_options, agg_options):
kwarg = {k: v for param in params for k, v in param.items()}
if dtype is np.float16:
kwarg.update({'multi_precision': True})
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shapes, dtype)
def test_factor_scheduler():
base_lr = 1
step = 100
factor = 0.1
sched = mx.lr_scheduler.FactorScheduler(step, factor, stop_factor_lr=1e-4, base_lr=base_lr,
warmup_steps=20, warmup_begin_lr=0.1, warmup_mode='constant')
assert (sched(0) == 0.1)
np.testing.assert_almost_equal(sched(10), 0.1)
assert (sched(21) == base_lr), sched(21)
np.testing.assert_almost_equal(sched(101), base_lr * factor)
np.testing.assert_almost_equal(sched(201), base_lr * factor * factor)
np.testing.assert_almost_equal(sched(1000), 1e-4)
def test_multifactor_scheduler():
base_lr = 0.1
steps = [15, 25]
factor = 0.1
sched = mx.lr_scheduler.MultiFactorScheduler(steps, factor, base_lr=base_lr,
warmup_steps=10, warmup_begin_lr=0.05, warmup_mode='linear')
assert sched(0) == 0.05
np.testing.assert_almost_equal(sched(5), 0.05 + (base_lr - 0.05)/2)
np.testing.assert_almost_equal(sched(15), base_lr)
np.testing.assert_almost_equal(sched(16), base_lr * factor)
np.testing.assert_almost_equal(sched(20), base_lr * factor)
np.testing.assert_almost_equal(sched(26), base_lr * factor * factor)
np.testing.assert_almost_equal(sched(100), base_lr * factor * factor)
def test_poly_scheduler():
base_lr = 3
final_lr = 0
steps = 1000
poly_sched = mx.lr_scheduler.PolyScheduler(steps, base_lr=base_lr, pwr=2, final_lr=final_lr,
warmup_steps=100, warmup_begin_lr=0, warmup_mode='linear')
np.testing.assert_almost_equal(poly_sched(0), 0)
np.testing.assert_almost_equal(poly_sched(50), float(base_lr)/2)
np.testing.assert_almost_equal(poly_sched(100), base_lr)
assert (poly_sched(101) < poly_sched(100))
assert (poly_sched(500) < 1.6)
np.testing.assert_almost_equal(poly_sched(steps), final_lr)
def test_cosine_scheduler():
# also tests case without warmup
base_lr = 3
final_lr = 0.1
steps = 1000
cosine_sched = mx.lr_scheduler.CosineScheduler(steps, base_lr=base_lr, final_lr=final_lr)
np.testing.assert_almost_equal(cosine_sched(0), base_lr)
np.testing.assert_almost_equal(cosine_sched(steps), final_lr)
assert (cosine_sched(500) > 1.5)
| |
import sublime
import operator
import os
import re
# Helper module
try:
from .helper import H
except:
from helper import H
# Settings variables
try:
from . import settings as S
except:
import settings as S
# DBGp protocol constants
try:
from . import dbgp
except:
import dbgp
# Config module
from .config import get_value, get_window_value, set_window_value
# Util module
from .util import get_real_path, get_region_icon, save_watch_data
DATA_BREAKPOINT = 'breakpoint'
DATA_CONTEXT = 'context'
DATA_STACK = 'stack'
DATA_WATCH = 'watch'
TITLE_WINDOW_BREAKPOINT = "Xdebug Breakpoint"
TITLE_WINDOW_CONTEXT = "Xdebug Context"
TITLE_WINDOW_STACK = "Xdebug Stack"
TITLE_WINDOW_WATCH = "Xdebug Watch"
def close_debug_windows():
"""
Close all debugging related views in active window.
"""
window = sublime.active_window()
for view in window.views():
if is_debug_view(view):
window.focus_view(view)
window.run_command('close')
window.run_command('hide_panel', {"panel": 'output.xdebug'})
def generate_breakpoint_output():
"""
Generate output with all configured breakpoints.
"""
# Get breakpoints for files
values = H.unicode_string('')
if S.BREAKPOINT is None:
return values
for filename, breakpoint_data in sorted(S.BREAKPOINT.items()):
breakpoint_entry = ''
if breakpoint_data:
breakpoint_entry += "=> %s\n" % filename
# Sort breakpoint data by line number
for lineno, bp in sorted(breakpoint_data.items(), key=lambda item: (int(item[0]) if isinstance(item[0], int) or H.is_digit(item[0]) else float('inf'), item[0])):
# Do not show temporary breakpoint
if S.BREAKPOINT_RUN is not None and S.BREAKPOINT_RUN['filename'] == filename and S.BREAKPOINT_RUN['lineno'] == lineno:
continue
# Whether breakpoint is enabled or disabled
breakpoint_entry += '\t'
if bp['enabled']:
breakpoint_entry += '|+|'
else:
breakpoint_entry += '|-|'
# Line number
breakpoint_entry += ' %s' % lineno
# Conditional expression
if bp['expression'] is not None:
breakpoint_entry += ' -- "%s"' % bp['expression']
breakpoint_entry += "\n"
values += H.unicode_string(breakpoint_entry)
return values
def generate_context_output(context, indent=0):
"""
Generate readable context from dictionary with context data.
Keyword arguments:
context -- Dictionary with context data.
indent -- Indent level.
"""
# Generate output text for values
values = H.unicode_string('')
if not isinstance(context, dict):
return values
for variable in context.values():
has_children = False
property_text = ''
# Set indentation
for i in range(indent): property_text += '\t'
# Property with value
if variable['value'] is not None:
if variable['name']:
property_text += '{name} = '
property_text += '({type}) {value}\n'
# Property with children
elif isinstance(variable['children'], dict) and variable['numchildren'] is not None:
has_children = True
if variable['name']:
property_text += '{name} = '
property_text += '{type}[{numchildren}]\n'
# Unknown property
else:
if variable['name']:
property_text += '{name} = '
property_text += '<{type}>\n'
# Remove newlines in value to prevent incorrect indentation
value = ''
if variable['value'] and len(variable['value']) > 0:
value = variable['value'].replace("\r\n", "\n").replace("\n", " ")
# Format string and append to output
values += H.unicode_string(property_text \
.format(value=value, type=variable['type'], name=variable['name'], numchildren=variable['numchildren']))
# Append property children to output
if has_children:
# Get children for property (no need to convert, already unicode)
values += generate_context_output(variable['children'], indent+1)
# Use ellipsis to indicate that results have been truncated
limited = False
if isinstance(variable['numchildren'], int) or H.is_digit(variable['numchildren']):
if int(variable['numchildren']) != len(variable['children']):
limited = True
elif len(variable['children']) > 0 and not variable['numchildren']:
limited = True
if limited:
for i in range(indent+1): values += H.unicode_string('\t')
values += H.unicode_string('...\n')
return values
def generate_stack_output(response):
values = H.unicode_string('')
# Display exception name and message
if S.BREAKPOINT_EXCEPTION:
values += H.unicode_string('[{name}] {message}\n' \
.format(name=S.BREAKPOINT_EXCEPTION['name'], message=S.BREAKPOINT_EXCEPTION['message']))
# Walk through elements in response
has_output = False
try:
for child in response:
# Get stack attribute values
if child.tag == dbgp.ELEMENT_STACK or child.tag == dbgp.ELEMENT_PATH_STACK:
stack_level = child.get(dbgp.STACK_LEVEL, 0)
stack_type = child.get(dbgp.STACK_TYPE)
stack_file = H.url_decode(child.get(dbgp.STACK_FILENAME))
stack_line = child.get(dbgp.STACK_LINENO, 0)
stack_where = child.get(dbgp.STACK_WHERE, '{unknown}')
# Append values
values += H.unicode_string('[{level}] {filename}.{where}:{lineno}\n' \
.format(level=stack_level, type=stack_type, where=stack_where, lineno=stack_line, filename=stack_file))
has_output = True
except:
pass
# When no stack use values from exception
if not has_output and S.BREAKPOINT_EXCEPTION:
values += H.unicode_string('[{level}] {filename}.{where}:{lineno}\n' \
.format(level=0, where='{unknown}', lineno=S.BREAKPOINT_EXCEPTION['lineno'], filename=S.BREAKPOINT_EXCEPTION['filename']))
return values
def generate_watch_output():
"""
Generate output with all watch expressions.
"""
values = H.unicode_string('')
if S.WATCH is None:
return values
for watch_data in S.WATCH:
watch_entry = ''
if watch_data and isinstance(watch_data, dict):
# Whether watch expression is enabled or disabled
if 'enabled' in watch_data.keys():
if watch_data['enabled']:
watch_entry += '|+|'
else:
watch_entry += '|-|'
# Watch expression
if 'expression' in watch_data.keys():
watch_entry += ' "%s"' % watch_data['expression']
# Evaluated value
if watch_data['value'] is not None:
watch_entry += ' = ' + generate_context_output(watch_data['value'])
else:
watch_entry += "\n"
values += H.unicode_string(watch_entry)
return values
def get_context_variable(context, variable_name):
"""
Find a variable in the context data.
Keyword arguments:
context -- Dictionary with context data to search.
variable_name -- Name of variable to find.
"""
if isinstance(context, dict):
if variable_name in context:
return context[variable_name]
for variable in context.values():
if isinstance(variable['children'], dict):
children = get_context_variable(variable['children'], variable_name)
if children:
return children
def get_debug_index(name=None):
"""
Retrieve configured group/index position of of debug view(s) within active window.
Returns list with tuple entries for all debug views or single tuple when specified name of debug view.
Structure of tuple entry for debug view is as followed:
(group position in window, index position in group, name/title of debug view)
Keyword arguments:
name -- Name of debug view to get group/index position.
"""
# Set group and index for each debug view
breakpoint_group = get_value(S.KEY_BREAKPOINT_GROUP, -1)
breakpoint_index = get_value(S.KEY_BREAKPOINT_INDEX, 0)
context_group = get_value(S.KEY_CONTEXT_GROUP, -1)
context_index = get_value(S.KEY_CONTEXT_INDEX, 0)
stack_group = get_value(S.KEY_STACK_GROUP, -1)
stack_index = get_value(S.KEY_STACK_INDEX, 0)
watch_group = get_value(S.KEY_WATCH_GROUP, -1)
watch_index = get_value(S.KEY_WATCH_INDEX, 0)
# Create list with all debug views and sort by group/index
debug_list = []
debug_list.append((breakpoint_group, breakpoint_index, TITLE_WINDOW_BREAKPOINT))
debug_list.append((context_group, context_index, TITLE_WINDOW_CONTEXT))
debug_list.append((stack_group, stack_index, TITLE_WINDOW_STACK))
debug_list.append((watch_group, watch_index, TITLE_WINDOW_WATCH))
debug_list.sort(key=operator.itemgetter(0,1))
# Recalculate group/index position within boundaries of active window
window = sublime.active_window()
group_limit = window.num_groups()-1
sorted_list = []
last_group = None
last_index = 0
for debug in debug_list:
group, index, title = debug
# Set group position
if group > group_limit:
group = group_limit
# Set index position
if group == last_group:
last_index += 1
else:
index_limit = len(window.views_in_group(group))
if index > index_limit:
index = index_limit
last_group = group
last_index = index
# Add debug view with new group/index
sorted_list.append((group, last_index, title))
# Sort recalculated list by group/index
sorted_list.sort(key=operator.itemgetter(0,1))
# Find specified view by name/title of debug view
if name is not None:
try:
return [view[2] for view in sorted_list].index(name)
except ValueError:
return None
# List with all debug views
return sorted_list
def get_response_properties(response, default_key=None):
"""
Return a dictionary with available properties from response.
Keyword arguments:
response -- Response from debugger engine.
default_key -- Index key to use when property has no name.
"""
properties = H.new_dictionary()
# Walk through elements in response
for child in response:
# Read property elements
if child.tag == dbgp.ELEMENT_PROPERTY or child.tag == dbgp.ELEMENT_PATH_PROPERTY:
# Get property attribute values
property_name_short = child.get(dbgp.PROPERTY_NAME)
property_name = child.get(dbgp.PROPERTY_FULLNAME, property_name_short)
property_type = child.get(dbgp.PROPERTY_TYPE)
property_children = child.get(dbgp.PROPERTY_CHILDREN)
property_numchildren = child.get(dbgp.PROPERTY_NUMCHILDREN)
property_classname = child.get(dbgp.PROPERTY_CLASSNAME)
property_encoding = child.get(dbgp.PROPERTY_ENCODING)
property_value = None
# Set property value
if child.text:
property_value = child.text
# Try to decode property value when encoded with base64
if property_encoding is not None and property_encoding == 'base64':
try:
property_value = H.base64_decode(child.text)
except:
pass
if property_name is not None and len(property_name) > 0:
property_key = property_name
# Ignore following properties
if property_name == "::":
continue
# Avoid nasty static functions/variables from turning in an infinitive loop
if property_name.count("::") > 1:
continue
# Filter password values
if get_value(S.KEY_HIDE_PASSWORD, True) and property_name.lower().find('password') != -1 and property_value is not None:
property_value = '******'
else:
property_key = default_key
# Store property
if property_key:
properties[property_key] = { 'name': property_name, 'type': property_type, 'value': property_value, 'numchildren': property_numchildren, 'children' : None }
# Get values for children
if property_children:
properties[property_key]['children'] = get_response_properties(child, default_key)
# Set classname, if available, as type for object
if property_classname and property_type == 'object':
properties[property_key]['type'] = property_classname
# Handle error elements
elif child.tag == dbgp.ELEMENT_ERROR or child.tag == dbgp.ELEMENT_PATH_ERROR:
message = 'error'
for step_child in child:
if step_child.tag == dbgp.ELEMENT_MESSAGE or step_child.tag == dbgp.ELEMENT_PATH_MESSAGE and step_child.text:
message = step_child.text
break
if default_key:
properties[default_key] = { 'name': None, 'type': message, 'value': None, 'numchildren': None, 'children': None }
return properties
def has_debug_view(name=None):
"""
Determine if active window has any or specific debug view(s).
Keyword arguments:
name -- Name of debug view to search for in active window.
"""
for view in sublime.active_window().views():
if is_debug_view(view):
if name is not None:
if view.name() == name:
return True
else:
return True
return False
def is_debug_view(view):
"""
Check if view name matches debug name/title.
Keyword arguments:
view -- View reference which to check if name matches debug name/title.
"""
return view.name() == TITLE_WINDOW_BREAKPOINT or view.name() == TITLE_WINDOW_CONTEXT or view.name() == TITLE_WINDOW_STACK or view.name() == TITLE_WINDOW_WATCH
def set_layout(layout):
"""
Toggle between debug and default window layouts.
"""
# Get active window and set reference to active view
window = sublime.active_window()
previous_active = window.active_view()
# Do not set layout when disabled
if get_value(S.KEY_DISABLE_LAYOUT):
S.RESTORE_LAYOUT = window.get_layout()
set_window_value('restore_layout', S.RESTORE_LAYOUT)
S.RESTORE_INDEX = H.new_dictionary()
set_window_value('restore_index', S.RESTORE_INDEX)
return
# Show debug layout
if layout == 'debug':
debug_layout = get_value(S.KEY_DEBUG_LAYOUT, S.LAYOUT_DEBUG)
if window.get_layout() != debug_layout:
# Save current layout
S.RESTORE_LAYOUT = window.get_layout()
set_window_value('restore_layout', S.RESTORE_LAYOUT)
# Remember view indexes
S.RESTORE_INDEX = H.new_dictionary()
for view in window.views():
view_id = "%d" % view.id()
group, index = window.get_view_index(view)
S.RESTORE_INDEX[view_id] = { "group": group, "index": index }
set_window_value('restore_index', S.RESTORE_INDEX)
# Set debug layout
window.set_layout(S.LAYOUT_NORMAL)
window.set_layout(debug_layout)
# Show previous (single) layout
else:
# Get previous layout configuration
if S.RESTORE_LAYOUT is None:
S.RESTORE_LAYOUT = get_window_value('restore_layout', S.LAYOUT_NORMAL)
if S.RESTORE_INDEX is None:
S.RESTORE_INDEX = get_window_value('restore_index', {})
# Restore layout
window.set_layout(S.LAYOUT_NORMAL)
window.set_layout(S.RESTORE_LAYOUT)
for view in window.views():
view_id = "%d" % view.id()
# Set view indexes
if view_id in H.dictionary_keys(S.RESTORE_INDEX):
v = S.RESTORE_INDEX[view_id]
window.set_view_index(view, v["group"], v["index"])
# Restore focus to previous active view
if not previous_active is None:
window.focus_view(previous_active)
def show_content(data, content=None):
"""
Show content for specific data type in assigned window view.
Note: When view does not exists, it will create one.
"""
# Hande data type
if data == DATA_BREAKPOINT:
title = TITLE_WINDOW_BREAKPOINT
content = generate_breakpoint_output()
elif data == DATA_CONTEXT:
title = TITLE_WINDOW_CONTEXT
elif data == DATA_STACK:
title = TITLE_WINDOW_STACK
elif data == DATA_WATCH:
title = TITLE_WINDOW_WATCH
content = generate_watch_output()
else:
return
# Get list of group/index for all debug views
debug_index = get_debug_index()
# Find group/index of debug view for current data type
try:
key = [debug[2] for debug in debug_index].index(title)
except ValueError:
return
# Set group and index position
group, index, _ = debug_index[key]
# Get active window and set reference to active view
window = sublime.active_window()
previous_active = window.active_view_in_group(window.active_group())
# Loop through views in active window
found = False
view = None
previous_key = -1
active_debug = None
for v in window.views():
# Search for view assigned to data type
if v.name() == title:
found = True
view = v
continue
# Adjust group/index of debug view depending on other debug view(s)
if is_debug_view(v):
try:
current_key = [debug[2] for debug in debug_index].index(v.name())
except ValueError:
continue
# Get current position of view
view_group, view_index = window.get_view_index(v)
# Recalculate group/index for debug view
current_group, current_index, _ = debug_index[current_key]
if group == current_group:
if key > previous_key and key < current_key:
index = view_index
if key > current_key:
index = view_index + 1
# Remember debug view for setting focus
if v == window.active_view_in_group(group):
active_debug = v
previous_key = current_key
# Make sure index position is not out of boundary
index_limit = len(window.views_in_group(group))
if index > index_limit:
index = index_limit
# Create new view if it does not exists
if not found:
view = window.new_file()
view.set_scratch(True)
view.set_read_only(True)
view.set_name(title)
window.set_view_index(view, group, index)
# Set focus back to active debug view
if active_debug is not None:
window.focus_view(active_debug)
# Strip .sublime-package of package name for syntax file
package_extension = ".sublime-package"
package = S.PACKAGE_FOLDER
if package.endswith(package_extension):
package = package[:-len(package_extension)]
# Configure view settings
view.settings().set('word_wrap', False)
view.settings().set('syntax', 'Packages/' + package + '/Xdebug.tmLanguage')
# Set content for view and fold all indendation blocks
view.run_command('xdebug_view_update', {'data': content, 'readonly': True})
if data == DATA_CONTEXT or data == DATA_WATCH:
view.run_command('fold_all')
# Restore focus to previous active view/group
if previous_active is not None:
window.focus_view(previous_active)
else:
window.focus_group(0)
def show_context_output(view):
"""
Show selected variable in an output panel when clicked in context window.
Keyword arguments:
view -- View reference which holds the context window.
"""
# Check if there is a debug session and context data
if S.SESSION and S.SESSION.connected and S.CONTEXT_DATA:
try:
# Get selected point in view
point = view.sel()[0]
# Check if selected point uses variable scope
if point.size() == 0 and sublime.score_selector(view.scope_name(point.a), 'variable'):
# Find variable in line which contains the point
line = view.substr(view.line(point))
pattern = re.compile('^\\s*(\\$.*?)\\s+\\=')
match = pattern.match(line)
if match:
# Get variable details from context data
variable_name = match.group(1)
variable = get_context_variable(S.CONTEXT_DATA, variable_name)
if variable:
# Convert details to text output
variables = H.new_dictionary()
variables[variable_name] = variable
data = generate_context_output(variables)
# Show context variables and children in output panel
window = sublime.active_window()
panel = window.get_output_panel('xdebug')
panel.run_command("xdebug_view_update", {'data' : data} )
panel.run_command('set_setting', {"setting": 'word_wrap', "value": True})
window.run_command('show_panel', {"panel": 'output.xdebug'})
except:
pass
def show_file(filename, row=None):
"""
Open or focus file in window, which is currently being debugged.
Keyword arguments:
filename -- Absolute path of file on local device.
"""
# Check if file exists if being referred to file system
if os.path.exists(filename):
# Get active window
window = sublime.active_window()
window.focus_group(0)
# Check if file is already open
found = False
view = window.find_open_file(filename)
if not view is None:
found = True
window.focus_view(view)
# Set focus to row (line number)
show_at_row(view, row)
# Open file if not open
if not found:
view = window.open_file(filename)
window.focus_view(view)
# Set focus to row (line number) when file is loaded
S.SHOW_ROW_ONLOAD[filename] = row
def show_panel_content(content):
# Show response data in output panel
try:
window = sublime.active_window()
panel = window.get_output_panel('xdebug')
panel.run_command('xdebug_view_update', {'data': content})
panel.run_command('set_setting', {"setting": 'word_wrap', "value": True})
window.run_command('show_panel', {'panel': 'output.xdebug'})
except:
print(content)
def show_at_row(view, row=None):
"""
Scroll the view to center on the given row (line number).
Keyword arguments:
- view -- Which view to scroll to center on row.
- row -- Row where to center the view.
"""
if row is not None:
try:
# Convert row (line number) to region
row_region = rows_to_region(row)[0].a
# Scroll the view to row
view.show_at_center(row_region)
except:
# When defining row_region index could be out of bounds
pass
def rows_to_region(rows):
"""
Convert rows (line numbers) to a region (selection/cursor position).
Keyword arguments:
- rows -- Row number(s) to convert to region(s).
"""
# Get current active view
view = sublime.active_window().active_view()
# Unable to convert rows to regions when no view available
if view is None:
return
# List for containing regions to return
region = []
# Create list if it is a singleton
if not isinstance(rows, list):
rows = [rows]
for row in rows:
# Check if row is a digit
if isinstance(row, int) or H.is_digit(row):
# Convert from 1 based to a 0 based row (line) number
row_number = int(row) - 1
# Calculate offset point for row
offset_point = view.text_point(row_number, 0)
# Get region for row by offset point
region_row = view.line(offset_point)
# Add to list for result
region.append(region_row)
return region
def region_to_rows(region=None, filter_empty=False):
"""
Convert a region (selection/cursor position) to rows (line numbers).
Keyword arguments:
- region -- sublime.Selection/sublime.RegionSet or sublime.Region to convert to row number(s).
- filter_empty -- Filter empty rows (line numbers).
"""
# Get current active view
view = sublime.active_window().active_view()
# Unable to convert regions to rows when no view available
if view is None:
return
# Use current selection/cursor position if no region defined
if region is None:
region = view.sel()
# List for containing rows (line numbers) to return
rows = []
# Create list if it is a singleton
if isinstance(region, sublime.Region):
region = [region]
# Split the region up, so that each region returned exists on exactly one line
region_split = []
for region_part in region:
region_split.extend(view.split_by_newlines(region_part))
# Get row (line) number for each region area
for region_area in region_split:
# Retrieve line region for current region area
row_line = view.line(region_area)
# Check if line region is empty
if filter_empty and row_line.empty():
continue
# Get beginning coordination point of line region
row_point = row_line.begin()
# Retrieve row (line) number and column number of region
row, col = view.rowcol(row_point)
# Convert from 0 based to a 1 based row (line) number
row_number = str(row + 1)
# Add to list for result
rows.append(row_number)
return rows
def render_regions(view=None):
"""
Set breakpoint/current line marker(s) for current active view.
Note: View rendering conflict when using same icon for different scopes in add_regions().
"""
# Get current active view
if view is None:
view = sublime.active_window().active_view()
# Unable to set regions when no view available
if view is None:
return
# Do no set regions if view is empty or still loading
if view.size() == 0 or view.is_loading():
return
# Remove all markers to avoid marker conflict
view.erase_regions(S.REGION_KEY_BREAKPOINT)
view.erase_regions(S.REGION_KEY_CURRENT)
view.erase_regions(S.REGION_KEY_DISABLED)
# Get filename of current view and check if is a valid filename
filename = view.file_name()
if not filename:
return
# Determine icon for regions
icon_current = get_region_icon(S.KEY_CURRENT_LINE)
icon_disabled = get_region_icon(S.KEY_BREAKPOINT_DISABLED)
icon_enabled = get_region_icon(S.KEY_BREAKPOINT_ENABLED)
# Get all (disabled) breakpoint rows (line numbers) for file
breakpoint_rows = []
disabled_rows = []
if filename in S.BREAKPOINT and isinstance(S.BREAKPOINT[filename], dict):
for lineno, bp in S.BREAKPOINT[filename].items():
# Do not show temporary breakpoint
if S.BREAKPOINT_RUN is not None and S.BREAKPOINT_RUN['filename'] == filename and S.BREAKPOINT_RUN['lineno'] == lineno:
continue
# Determine if breakpoint is enabled or disabled
if bp['enabled']:
breakpoint_rows.append(lineno)
else:
disabled_rows.append(lineno)
# Get current line from breakpoint hit
if S.BREAKPOINT_ROW is not None:
# Make sure current breakpoint is in this file
if filename == S.BREAKPOINT_ROW['filename']:
# Remove current line number from breakpoint rows to avoid marker conflict
if S.BREAKPOINT_ROW['lineno'] in breakpoint_rows:
breakpoint_rows.remove(S.BREAKPOINT_ROW['lineno'])
# Set icon for current breakpoint
icon_breakpoint_current = get_region_icon(S.KEY_BREAKPOINT_CURRENT)
if icon_breakpoint_current:
icon_current = icon_breakpoint_current
if S.BREAKPOINT_ROW['lineno'] in disabled_rows:
disabled_rows.remove(S.BREAKPOINT_ROW['lineno'])
# Set current line marker
if icon_current:
view.add_regions(S.REGION_KEY_CURRENT, rows_to_region(S.BREAKPOINT_ROW['lineno']), S.REGION_SCOPE_CURRENT, icon_current, sublime.HIDDEN)
# Set breakpoint marker(s)
if breakpoint_rows and icon_enabled:
view.add_regions(S.REGION_KEY_BREAKPOINT, rows_to_region(breakpoint_rows), S.REGION_SCOPE_BREAKPOINT, icon_enabled, sublime.HIDDEN)
if disabled_rows and icon_disabled:
view.add_regions(S.REGION_KEY_DISABLED, rows_to_region(disabled_rows), S.REGION_SCOPE_BREAKPOINT, icon_disabled, sublime.HIDDEN)
def toggle_breakpoint(view):
try:
# Get selected point in view
point = view.sel()[0]
# Check if selected point uses breakpoint line scope
if point.size() == 3 and sublime.score_selector(view.scope_name(point.a), 'xdebug.output.breakpoint.line'):
# Find line number of breakpoint
line = view.substr(view.line(point))
pattern = re.compile('^\\s*(?:(\\|\\+\\|)|(\\|-\\|))\\s*(?P<line_number>\\d+)\\s*(?:(--)(.*)|.*)')
match = pattern.match(line)
# Check if it has found line number
if match and match.group('line_number'):
# Get all breakpoint filenames
breakpoint_file = view.find_by_selector('xdebug.output.breakpoint.file')
# Locate line with filename related to selected breakpoint
file_line = None
for entry in breakpoint_file:
# Stop searching if we have passed selected breakpoint
if entry > point:
break
file_line = view.substr(view.line(entry))
# Do not continue without line containing filename
if file_line is None:
return
# Remove unnecessary text from line to get filename
file_pattern = re.compile('^\\s*(=>)\\s*(?P<filename>.*)')
file_match = file_pattern.match(file_line)
# Check if it is a valid filename
if file_match and file_match.group('filename'):
filename = file_match.group('filename')
line_number = match.group('line_number')
enabled = None
# Disable breakpoint
if sublime.score_selector(view.scope_name(point.a), 'entity') and S.BREAKPOINT[filename][line_number]['enabled']:
enabled = False
# Enable breakpoint
if sublime.score_selector(view.scope_name(point.a), 'keyword') and not S.BREAKPOINT[filename][line_number]['enabled']:
enabled = True
# Toggle breakpoint only if it has valid value
if enabled is None:
return
sublime.active_window().run_command('xdebug_breakpoint', {"enabled": enabled, "rows": [line_number], "filename": filename})
# Check if selected point uses breakpoint file scope
elif point.size() > 3 and sublime.score_selector(view.scope_name(point.a), 'xdebug.output.breakpoint.file'):
# Get filename from selected line in view
file_line = view.substr(view.line(point))
file_pattern = re.compile('^\\s*(=>)\\s*(?P<filename>.*)')
file_match = file_pattern.match(file_line)
# Show file when it's a valid filename
if file_match and file_match.group('filename'):
filename = file_match.group('filename')
show_file(filename)
except:
pass
def toggle_stack(view):
try:
# Get selected point in view
point = view.sel()[0]
# Check if selected point uses stack entry scope
if point.size() > 3 and sublime.score_selector(view.scope_name(point.a), 'xdebug.output.stack.entry'):
# Get fileuri and line number from selected line in view
line = view.substr(view.line(point))
pattern = re.compile('^(\[\d+\])\s*(?P<fileuri>.*)(\..*)(\s*:.*?(?P<lineno>\d+))\s*(\((.*?):.*\)|$)')
match = pattern.match(line)
# Show file when it's a valid fileuri
if match and match.group('fileuri'):
filename = get_real_path(match.group('fileuri'))
lineno = 0
if match.group('lineno'):
lineno = match.group('lineno')
show_file(filename, lineno)
except:
pass
def toggle_watch(view):
# Do not try to toggle when no watch expressions defined
if not S.WATCH:
return
try:
# Get selected point in view
point = view.sel()[0]
# Check if selected point uses watch entry scope
if point.size() == 3 and sublime.score_selector(view.scope_name(point.a), 'xdebug.output.watch.entry'):
# Determine if watch entry is enabled or disabled
line = view.substr(view.line(point))
pattern = re.compile('^(?:(?P<enabled>\\|\\+\\|)|(?P<disabled>\\|-\\|))\\.*')
match = pattern.match(line)
if match and (match.group('enabled') or match.group('disabled')):
# Get all entries and determine index by line/point match
watch = view.find_by_selector('xdebug.output.watch.entry')
watch_index = 0
for entry in watch:
# Stop searching if we have passed selected breakpoint
if entry > point:
break
# Only increment watch index when it contains expression
watch_line = view.substr(view.line(entry))
watch_match = pattern.match(watch_line)
if watch_match and (watch_match.group('enabled') or watch_match.group('disabled')):
watch_index += 1
# Disable watch expression
if sublime.score_selector(view.scope_name(point.a), 'entity') and S.WATCH[watch_index]['enabled']:
S.WATCH[watch_index]['enabled'] = False
# Enable watch expression
if sublime.score_selector(view.scope_name(point.a), 'keyword') and not S.WATCH[watch_index]['enabled']:
S.WATCH[watch_index]['enabled'] = True
# Update watch view and save watch data to file
sublime.active_window().run_command('xdebug_watch', {"update": True})
except:
pass
| |
"""
The `compat` module provides support for backwards compatibility with older
versions of django/python, and compatibility wrappers around optional packages.
"""
# flake8: noqa
from __future__ import unicode_literals
import django
import inspect
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
# Try to import six from Django, fallback to included `six`.
try:
from django.utils import six
except ImportError:
from rest_framework import six
# location of patterns, url, include changes in 1.4 onwards
try:
from django.conf.urls import patterns, url, include
except ImportError:
from django.conf.urls.defaults import patterns, url, include
# Handle django.utils.encoding rename:
# smart_unicode -> smart_text
# force_unicode -> force_text
try:
from django.utils.encoding import smart_text
except ImportError:
from django.utils.encoding import smart_unicode as smart_text
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
# HttpResponseBase only exists from 1.5 onwards
try:
from django.http.response import HttpResponseBase
except ImportError:
from django.http import HttpResponse as HttpResponseBase
# django-filter is optional
try:
import django_filters
except ImportError:
django_filters = None
# guardian is optional
try:
import guardian
except ImportError:
guardian = None
# cStringIO only if it's available, otherwise StringIO
try:
import cStringIO.StringIO as StringIO
except ImportError:
StringIO = six.StringIO
BytesIO = six.BytesIO
# urlparse compat import (Required because it changed in python 3.x)
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
# UserDict moves in Python 3
try:
from UserDict import UserDict
from UserDict import DictMixin
except ImportError:
from collections import UserDict
from collections import MutableMapping as DictMixin
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
Image = None
def get_model_name(model_cls):
try:
return model_cls._meta.model_name
except AttributeError:
# < 1.6 used module_name instead of model_name
return model_cls._meta.module_name
def get_concrete_model(model_cls):
try:
return model_cls._meta.concrete_model
except AttributeError:
# 1.3 does not include concrete model
return model_cls
if django.VERSION >= (1, 5):
from django.views.generic import View
else:
from django.views.generic import View as _View
from django.utils.decorators import classonlymethod
from django.utils.functional import update_wrapper
class View(_View):
# 1.3 does not include head method in base View class
# See: https://code.djangoproject.com/ticket/15668
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r" % (
cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
# _allowed_methods only present from 1.5 onwards
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
# PATCH method is not implemented by Django
if 'patch' not in View.http_method_names:
View.http_method_names = View.http_method_names + ['patch']
# PUT, DELETE do not require CSRF until 1.4. They should. Make it better.
if django.VERSION >= (1, 4):
from django.middleware.csrf import CsrfViewMiddleware
else:
import hashlib
import re
import random
import logging
from django.conf import settings
from django.core.urlresolvers import get_callable
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('django.request')
if not logger.handlers:
logger.addHandler(NullHandler())
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return p1[0:2] == p2[0:2]
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
_MAX_CSRF_KEY = 18446744073709551616 # 2 << 63
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return hashlib.md5("%s%s" % (randrange(0, _MAX_CSRF_KEY), settings.SECRET_KEY)).hexdigest()
def get_token(request):
"""
Returns the the CSRF token required for a POST form. The token is an
alphanumeric value.
A side effect of calling this function is to make the the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
request.META["CSRF_COOKIE_USED"] = True
return request.META.get("CSRF_COOKIE", None)
def _sanitize_token(token):
# Allow only alphanum, and ensure we return a 'str' for the sake of the post
# processing middleware.
token = re.sub('[^a-zA-Z0-9]', '', str(token.decode('ascii', 'ignore')))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
else:
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
return _get_failure_view()(request, reason=reason)
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
try:
csrf_token = _sanitize_token(request.COOKIES[settings.CSRF_COOKIE_NAME])
# Use same token next time
request.META['CSRF_COOKIE'] = csrf_token
except KeyError:
csrf_token = None
# Generate token and store it in the request, so it's available to the view.
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RC2616 needs protection.
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite. It comes after
# the creation of CSRF cookies, so that everything else continues to
# work exactly the same (e.g. cookies are sent etc), but before the
# any branches that call reject()
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker,(man-in-the-middle, MITM) sends a
# POST form which targets https://example.com/detonate-bomb/ and
# submits it via javascript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that is no problem for a MITM and the session independent
# nonce we are using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = request.META.get('HTTP_REFERER')
if referer is None:
logger.warning('Forbidden (%s): %s' % (REASON_NO_REFERER, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_REFERER)
# Note that request.get_host() includes the port
good_referer = 'https://%s/' % request.get_host()
if not same_origin(referer, good_referer):
reason = REASON_BAD_REFERER % (referer, good_referer)
logger.warning('Forbidden (%s): %s' % (reason, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
logger.warning('Forbidden (%s): %s' % (REASON_NO_CSRF_COOKIE, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_CSRF_COOKIE)
# check non-cookie token for match
request_csrf_token = ""
if request.method == "POST":
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE
request_csrf_token = request.META.get('HTTP_X_CSRFTOKEN', '')
if not constant_time_compare(request_csrf_token, csrf_token):
logger.warning('Forbidden (%s): %s' % (REASON_BAD_TOKEN, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
# timezone support is new in Django 1.4
try:
from django.utils import timezone
except ImportError:
timezone = None
# dateparse is ALSO new in Django 1.4
try:
from django.utils.dateparse import parse_date, parse_datetime, parse_time
except ImportError:
import datetime
import re
date_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$'
)
datetime_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{1,2}:\d{1,2})?$'
)
time_re = re.compile(
r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
)
def parse_date(value):
match = date_re.match(value)
if match:
kw = dict((k, int(v)) for k, v in match.groupdict().iteritems())
return datetime.date(**kw)
def parse_time(value):
match = time_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
kw = dict((k, int(v)) for k, v in kw.iteritems() if v is not None)
return datetime.time(**kw)
def parse_datetime(value):
"""Parse datetime, but w/o the timezone awareness in 1.4"""
match = datetime_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
kw = dict((k, int(v)) for k, v in kw.iteritems() if v is not None)
return datetime.datetime(**kw)
# smart_urlquote is new on Django 1.4
try:
from django.utils.html import smart_urlquote
except ImportError:
import re
from django.utils.encoding import smart_str
try:
from urllib.parse import quote, urlsplit, urlunsplit
except ImportError: # Python 2
from urllib import quote
from urlparse import urlsplit, urlunsplit
unquoted_percents_re = re.compile(r'%(?![0-9A-Fa-f]{2})')
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
# Handle IDN before quoting.
scheme, netloc, path, query, fragment = urlsplit(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
pass
else:
url = urlunsplit((scheme, netloc, path, query, fragment))
# An URL is considered unquoted if it contains no % characters or
# contains a % not followed by two hexadecimal digits. See #9655.
if '%' not in url or unquoted_percents_re.search(url):
# See http://bugs.python.org/issue2637
url = quote(smart_str(url), safe=b'!*\'();:@&=+$,/?#[]~')
return force_text(url)
# RequestFactory only provide `generic` from 1.5 onwards
from django.test.client import RequestFactory as DjangoRequestFactory
from django.test.client import FakePayload
try:
# In 1.5 the test client uses force_bytes
from django.utils.encoding import force_bytes as force_bytes_or_smart_bytes
except ImportError:
# In 1.3 and 1.4 the test client just uses smart_str
from django.utils.encoding import smart_str as force_bytes_or_smart_bytes
class RequestFactory(DjangoRequestFactory):
def generic(self, method, path,
data='', content_type='application/octet-stream', **extra):
parsed = urlparse.urlparse(path)
data = force_bytes_or_smart_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': force_text(parsed[4]),
'REQUEST_METHOD': str(method),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
elif django.VERSION <= (1, 4):
# For 1.3 we need an empty WSGI payload
r.update({
'wsgi.input': FakePayload('')
})
r.update(extra)
return self.request(**r)
# Markdown is optional
try:
import markdown
def apply_markdown(text):
"""
Simple wrapper around :func:`markdown.markdown` to set the base level
of '#' style headers to <h2>.
"""
extensions = ['headerid(level=2)']
safe_mode = False
md = markdown.Markdown(extensions=extensions, safe_mode=safe_mode)
return md.convert(text)
except ImportError:
apply_markdown = None
# Yaml is optional
try:
import yaml
except ImportError:
yaml = None
# XML is optional
try:
import defusedxml.ElementTree as etree
except ImportError:
etree = None
# OAuth is optional
try:
# Note: The `oauth2` package actually provides oauth1.0a support. Urg.
import oauth2 as oauth
except ImportError:
oauth = None
# OAuth is optional
try:
import oauth_provider
from oauth_provider.store import store as oauth_provider_store
# check_nonce's calling signature in django-oauth-plus changes sometime
# between versions 2.0 and 2.2.1
def check_nonce(request, oauth_request, oauth_nonce, oauth_timestamp):
check_nonce_args = inspect.getargspec(oauth_provider_store.check_nonce).args
if 'timestamp' in check_nonce_args:
return oauth_provider_store.check_nonce(
request, oauth_request, oauth_nonce, oauth_timestamp
)
return oauth_provider_store.check_nonce(
request, oauth_request, oauth_nonce
)
except (ImportError, ImproperlyConfigured):
oauth_provider = None
oauth_provider_store = None
check_nonce = None
# OAuth 2 support is optional
try:
import provider.oauth2 as oauth2_provider
from provider.oauth2 import models as oauth2_provider_models
from provider.oauth2 import forms as oauth2_provider_forms
from provider import scope as oauth2_provider_scope
from provider import constants as oauth2_constants
from provider import __version__ as provider_version
if provider_version in ('0.2.3', '0.2.4'):
# 0.2.3 and 0.2.4 are supported version that do not support
# timezone aware datetimes
import datetime
provider_now = datetime.datetime.now
else:
# Any other supported version does use timezone aware datetimes
from django.utils.timezone import now as provider_now
except ImportError:
oauth2_provider = None
oauth2_provider_models = None
oauth2_provider_forms = None
oauth2_provider_scope = None
oauth2_constants = None
provider_now = None
# Handle lazy strings
from django.utils.functional import Promise
if six.PY3:
def is_non_str_iterable(obj):
if (isinstance(obj, str) or
(isinstance(obj, Promise) and obj._delegate_text)):
return False
return hasattr(obj, '__iter__')
else:
def is_non_str_iterable(obj):
return hasattr(obj, '__iter__')
try:
from django.utils.encoding import python_2_unicode_compatible
except ImportError:
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
| |
import random
import uuid
from Crypto.PublicKey import RSA
from google.appengine.ext import ndb
from google.appengine.api import users
from base64 import b64encode, b64decode
from jinja2 import Template
from utilities import firebase, keys
from augment_exceptions import NonUniqueException
from constants import *
ALPHA_NUMERIC = "abcdefghijklmnopqrstuvwxyz0123456789"
def generateNewRandomAlphaNumeric(length):
random.seed()
values = []
for i in range(length):
values.append(random.choice(ALPHA_NUMERIC))
return "".join(values)
class Name(ndb.Model):
pass
class Email(ndb.Model):
pass
class GoogleId(ndb.Model):
pass
class Person(ndb.Model):
name_key = ndb.KeyProperty(kind="Name", required=True)
email_key = ndb.KeyProperty(kind="Email", required=True)
google_id_key = ndb.KeyProperty(kind="GoogleId")
api_key = ndb.StringProperty()
@classmethod
def create(cls, name, email, google_id):
name_key = cls._new_unique_key(Name, name)
email_key = cls._new_unique_key(Email, email)
google_id_key = cls._new_unique_key(GoogleId, google_id)
person_uuid = str(uuid.uuid4())
api_key = generateNewRandomAlphaNumeric(30)
person = cls(name_key=name_key,
email_key=email_key,
google_id_key=google_id_key,
id=person_uuid,
api_key=api_key)
person.put()
return person
def get_name(self):
return self.name_key.id()
def set_name(self, new_name):
self._set_unique_attribute(Name, "name_key", new_name)
def get_email(self):
return self.email_key.id()
def set_email(self, new_email):
self._set_unique_attribute(Email, "email_key", new_email)
def get_google_id(self):
return self.google_id_key.id()
@classmethod
def with_email(cls, email):
key = ndb.Key(Email, email)
return cls.query(cls.email_key == key).get()
@classmethod
def with_name(cls, name):
key = ndb.Key(Name, name)
return cls.query(cls.name_key == key).get()
@classmethod
def with_google_id(cls, google_id):
key = ndb.Key(GoogleId, google_id)
return cls.query(cls.google_id_key == key).get()
@staticmethod
def _new_unique_key(attribute_class, new_value):
new_attribute_key = ndb.Key(attribute_class, new_value)
existing_attribute_obj = new_attribute_key.get()
if existing_attribute_obj is not None:
raise NonUniqueException("The value %s for %s is adready in use" % (new_value, attribute_class))
else:
new_attribute_obj = attribute_class(key=new_attribute_key)
new_attribute_obj.put()
return new_attribute_key
@ndb.transactional(xg=True)
def _set_unique_attribute(self, attribute_class, attribute_key_name, new_value):
current_attribute_key = getattr(self, attribute_key_name)
current_value = current_attribute_key.id()
if current_value == new_value:
return
new_attribute_key = self._new_unique_key(attribute_class, new_value)
current_attribute_key.delete()
setattr(self, attribute_key_name, new_attribute_key)
self.put()
def add_new_entity(self, **kwargs):
return Entity.create(self.key, **kwargs)
@property
def entities(self):
return [e for e in Entity.query(Entity.person_key == self.key).iter()]
@property
def configs(self):
return [c for c in ConfigFile.query(ancestor=self.key).iter()]
def remove(self):
ndb.delete_multi(ConfigFile.query(ancestor=self.key).iter(keys_only=True))
ndb.delete_multi(Entity.query(Entity.person_key == self.key).iter(keys_only=True))
self.name_key.delete()
self.email_key.delete()
self.google_id_key.delete()
self.key.delete()
def reset_api_key(self):
self.api_key = generateNewRandomAlphaNumeric(30)
self.put()
def add_config_file(self, name, text, path):
config_uuid = str(uuid.uuid4())
config_file = ConfigFile(id=config_uuid,
parent=self.key,
name=name,
text=text,
path=path)
config_file.put()
return config_file
name = property(get_name, set_name)
email = property(get_email, set_email)
google_id = property(get_google_id)
class ConfigFile(ndb.Model):
name = ndb.StringProperty()
text = ndb.TextProperty()
path = ndb.StringProperty()
def as_json(self, entity):
entity_uuid = entity.key.id()
template_values = entity.template_values
template = Template(self.text)
return {
"text": template.render(uuid=entity_uuid, **template_values),
"path": self.path
}
class Entity(ndb.Model):
name = ndb.StringProperty()
description = ndb.TextProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
person_key = ndb.KeyProperty(kind="Person", required=True)
public_key = ndb.TextProperty()
private_key = ndb.TextProperty()
serial = ndb.StringProperty()
config = ndb.KeyProperty(ConfigFile, repeated=True)
template_values = ndb.JsonProperty(default={})
schema = ndb.JsonProperty(default={})
def as_json(self):
entity_uuid = self.key.id()
return {
"name": self.name,
"description": self.description,
"created": str(self.created),
"person_key": self.person_key.id(),
"public_key": self.public_key,
"config": [c.get().as_json(self) for c in self.config]
}
@property
def config_files(self):
configs = [c.get() for c in self.config]
return configs
def add_config_file(self, config_file):
key = config_file.key
if not key in self.config:
self.config.append(key)
self.put()
def remove_config_file(self, config_file):
key = config_file.key
if key in self.config:
self.config.remove(key)
self.put()
def regenerate_keys(self):
public, private = keys.newkeys(2048)
self.private_key = private.exportKey('PEM')
self.public_key = public.exportKey('PEM')
self.put()
return self.private_key
@classmethod
def create(cls, person_key, **kwargs):
public, private = keys.newkeys(2048)
private_key = private.exportKey('PEM')
public_key = public.exportKey('PEM')
entity_uuid = str(uuid.uuid4())
entity = cls(id=entity_uuid,
person_key=person_key,
public_key=public_key,
private_key=private_key,
**kwargs)
entity.put()
return entity
| |
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
targets: list of targets to search for. The target names are unqualified.
The following is output:
error: only supplied if there is an error.
targets: the set of targets passed in via targets that either directly or
indirectly depend upon the set of paths supplied in files.
build_targets: minimal set of targets that directly depend on the changed
files and need to be built. The expectation is this set of targets is passed
into a build step.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case targets and build_targets are not output) or at
least one file was found.
invalid_targets: list of supplied targets thare were not found.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import json
import os
import posixpath
import sys
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print 'AddSource', org_source, result[len(result) - 1]
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ToLocalPath(toplevel_dir, path):
"""Converts |path| to a path relative to |toplevel_dir|."""
if path == toplevel_dir:
return ''
if path.startswith(toplevel_dir + '/'):
return path[len(toplevel_dir) + len('/'):]
return path
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print 'ExtractSources', target, base_path
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable.
is_static_library: true if the type of target is static_library.
is_or_has_linked_ancestor: true if the target does a link (eg executable), or
if there is a target in back_deps that does a link."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
self.is_static_library = False
self.is_or_has_linked_ancestor = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.targets = set(config.get('targets', []))
def _WasBuildFileModified(build_file, data, files, toplevel_dir):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|. |toplevel_dir| is
the root of the source tree."""
if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
if debug:
print 'gyp file modified', build_file
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if _ToLocalPath(toplevel_dir, rel_include_file) in files:
if debug:
print 'included gyp file modified, gyp_file=', build_file, \
'included file=', rel_include_file
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return bool(target_dict['type'] != 'none' or
target_dict.get('actions') or target_dict.get('rules'))
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Set of root Targets reachable from the the files |build_files|.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
targets = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(targets, target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target_type = target_dicts[target_name]['type']
target.is_executable = target_type == 'executable'
target.is_static_library = target_type == 'static_library'
target.is_or_has_linked_ancestor = (target_type == 'executable' or
target_type == 'shared_library')
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files, toplevel_dir)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print 'matching target from modified build file', target_name
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if os.path.normpath(source) in files:
print 'target', target_name, 'matches', source
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(targets, dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return targets, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|."""
result = {}
if not to_find:
return result
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result
return result
def _DoesTargetDependOn(target):
"""Returns true if |target| or any of its dependencies matches the supplied
set of paths. This updates |matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOn(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
print '\t', target.name, 'matches by dep', dep.name
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOn(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on the matched targets.
possible_targets: targets to search from."""
found = []
print 'Targets that matched by dependency:'
for target in possible_targets:
if _DoesTargetDependOn(target):
found.append(target)
return found
def _AddBuildTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = not target.back_deps and target in roots
for back_dep_target in target.back_deps:
_AddBuildTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
target.is_or_has_linked_ancestor |= (
back_dep_target.is_or_has_linked_ancestor)
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
# And always add static_libraries that have no dependencies on them from
# linkables. This is necessary as the other dependencies on them may be
# static libraries themselves, which are not compile time dependencies.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build)) or
(target.is_static_library and add_if_no_ancestor and
not target.is_or_has_linked_ancestor)):
print '\t\tadding to build targets', target.name, 'executable', \
target.is_executable, 'added_to_compile_targets', \
target.added_to_compile_targets, 'add_if_no_ancestor', \
add_if_no_ancestor, 'requires_build', target.requires_build, \
'is_static_library', target.is_static_library, \
'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor
result.add(target)
target.added_to_compile_targets = True
def _GetBuildTargets(matching_targets, roots):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
roots: set of root targets in the build files to search from."""
result = set()
for target in matching_targets:
print '\tfinding build targets for match', target.name
_AddBuildTargets(target, roots, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print 'Error:', values['error']
if 'status' in values:
print values['status']
if 'targets' in values:
values['targets'].sort()
print 'Supplied targets that depend on changed files:'
for target in values['targets']:
print '\t', target
if 'invalid_targets' in values:
values['invalid_targets'].sort()
print 'The following targets were not found:'
for target in values['invalid_targets']:
print '\t', target
if 'build_targets' in values:
values['build_targets'].sort()
print 'Targets that require a build:'
for target in values['build_targets']:
print '\t', target
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print json.dumps(values)
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print 'Error writing to output file', output_path, str(e)
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(include) in files:
print 'Include file modified, assuming all changed', include
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print 'toplevel_dir', toplevel_dir
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'targets': list(config.targets) }
_WriteOutput(params, **result_dict)
return
all_targets, matching_targets, roots = _GenerateTargets(
data, target_list, target_dicts, toplevel_dir, frozenset(config.files),
params['build_files'])
print 'roots:'
for root in roots:
print '\t', root.name
unqualified_mapping = _GetUnqualifiedToTargetMapping(all_targets,
config.targets)
invalid_targets = None
if len(unqualified_mapping) != len(config.targets):
invalid_targets = _NamesNotIn(config.targets, unqualified_mapping)
if matching_targets:
search_targets = _LookupTargets(config.targets, unqualified_mapping)
print 'supplied targets'
for target in config.targets:
print '\t', target
print 'expanded supplied targets'
for target in search_targets:
print '\t', target.name
matched_search_targets = _GetTargetsDependingOn(search_targets)
print 'raw matched search targets:'
for target in matched_search_targets:
print '\t', target.name
# Reset the visited status for _GetBuildTargets.
for target in all_targets.itervalues():
target.visited = False
print 'Finding build targets'
build_targets = _GetBuildTargets(matching_targets, roots)
matched_search_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matched_search_targets]
build_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in build_targets]
else:
matched_search_targets = []
build_targets = []
result_dict = { 'targets': matched_search_targets,
'status': found_dependency_string if matching_targets else
no_dependency_string,
'build_targets': build_targets}
if invalid_targets:
result_dict['invalid_targets'] = invalid_targets
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))
| |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Autogenerated by Thrift Compiler (0.13.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:utf8strings
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
all_structs = []
class Iface(object):
def result(self, id, result):
"""
Parameters:
- id
- result
"""
pass
def fetchRequest(self, functionName):
"""
Parameters:
- functionName
"""
pass
def failRequest(self, id):
"""
Parameters:
- id
"""
pass
def failRequestV2(self, id, e):
"""
Parameters:
- id
- e
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def result(self, id, result):
"""
Parameters:
- id
- result
"""
self.send_result(id, result)
self.recv_result()
def send_result(self, id, result):
self._oprot.writeMessageBegin('result', TMessageType.CALL, self._seqid)
args = result_args()
args.id = id
args.result = result
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_result(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = result_result()
result.read(iprot)
iprot.readMessageEnd()
if result.aze is not None:
raise result.aze
return
def fetchRequest(self, functionName):
"""
Parameters:
- functionName
"""
self.send_fetchRequest(functionName)
return self.recv_fetchRequest()
def send_fetchRequest(self, functionName):
self._oprot.writeMessageBegin('fetchRequest', TMessageType.CALL, self._seqid)
args = fetchRequest_args()
args.functionName = functionName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_fetchRequest(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = fetchRequest_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "fetchRequest failed: unknown result")
def failRequest(self, id):
"""
Parameters:
- id
"""
self.send_failRequest(id)
self.recv_failRequest()
def send_failRequest(self, id):
self._oprot.writeMessageBegin('failRequest', TMessageType.CALL, self._seqid)
args = failRequest_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_failRequest(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = failRequest_result()
result.read(iprot)
iprot.readMessageEnd()
if result.aze is not None:
raise result.aze
return
def failRequestV2(self, id, e):
"""
Parameters:
- id
- e
"""
self.send_failRequestV2(id, e)
self.recv_failRequestV2()
def send_failRequestV2(self, id, e):
self._oprot.writeMessageBegin('failRequestV2', TMessageType.CALL, self._seqid)
args = failRequestV2_args()
args.id = id
args.e = e
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_failRequestV2(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = failRequestV2_result()
result.read(iprot)
iprot.readMessageEnd()
if result.aze is not None:
raise result.aze
return
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["result"] = Processor.process_result
self._processMap["fetchRequest"] = Processor.process_fetchRequest
self._processMap["failRequest"] = Processor.process_failRequest
self._processMap["failRequestV2"] = Processor.process_failRequestV2
self._on_message_begin = None
def on_message_begin(self, func):
self._on_message_begin = func
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if self._on_message_begin:
self._on_message_begin(name, type, seqid)
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_result(self, seqid, iprot, oprot):
args = result_args()
args.read(iprot)
iprot.readMessageEnd()
result = result_result()
try:
self._handler.result(args.id, args.result)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("result", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_fetchRequest(self, seqid, iprot, oprot):
args = fetchRequest_args()
args.read(iprot)
iprot.readMessageEnd()
result = fetchRequest_result()
try:
result.success = self._handler.fetchRequest(args.functionName)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("fetchRequest", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_failRequest(self, seqid, iprot, oprot):
args = failRequest_args()
args.read(iprot)
iprot.readMessageEnd()
result = failRequest_result()
try:
self._handler.failRequest(args.id)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("failRequest", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_failRequestV2(self, seqid, iprot, oprot):
args = failRequestV2_args()
args.read(iprot)
iprot.readMessageEnd()
result = failRequestV2_result()
try:
self._handler.failRequestV2(args.id, args.e)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("failRequestV2", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class result_args(object):
"""
Attributes:
- id
- result
"""
def __init__(self, id=None, result=None,):
self.id = id
self.result = result
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.result = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('result_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8') if sys.version_info[0] == 2 else self.id)
oprot.writeFieldEnd()
if self.result is not None:
oprot.writeFieldBegin('result', TType.STRING, 2)
oprot.writeString(self.result.encode('utf-8') if sys.version_info[0] == 2 else self.result)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(result_args)
result_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'id', 'UTF8', None, ), # 1
(2, TType.STRING, 'result', 'UTF8', None, ), # 2
)
class result_result(object):
"""
Attributes:
- aze
"""
def __init__(self, aze=None,):
self.aze = aze
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('result_result')
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(result_result)
result_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'aze', [AuthorizationException, None], None, ), # 1
)
class fetchRequest_args(object):
"""
Attributes:
- functionName
"""
def __init__(self, functionName=None,):
self.functionName = functionName
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.functionName = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('fetchRequest_args')
if self.functionName is not None:
oprot.writeFieldBegin('functionName', TType.STRING, 1)
oprot.writeString(self.functionName.encode('utf-8') if sys.version_info[0] == 2 else self.functionName)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(fetchRequest_args)
fetchRequest_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'functionName', 'UTF8', None, ), # 1
)
class fetchRequest_result(object):
"""
Attributes:
- success
- aze
"""
def __init__(self, success=None, aze=None,):
self.success = success
self.aze = aze
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = DRPCRequest()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('fetchRequest_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(fetchRequest_result)
fetchRequest_result.thrift_spec = (
(0, TType.STRUCT, 'success', [DRPCRequest, None], None, ), # 0
(1, TType.STRUCT, 'aze', [AuthorizationException, None], None, ), # 1
)
class failRequest_args(object):
"""
Attributes:
- id
"""
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('failRequest_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8') if sys.version_info[0] == 2 else self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(failRequest_args)
failRequest_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'id', 'UTF8', None, ), # 1
)
class failRequest_result(object):
"""
Attributes:
- aze
"""
def __init__(self, aze=None,):
self.aze = aze
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('failRequest_result')
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(failRequest_result)
failRequest_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'aze', [AuthorizationException, None], None, ), # 1
)
class failRequestV2_args(object):
"""
Attributes:
- id
- e
"""
def __init__(self, id=None, e=None,):
self.id = id
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.e = DRPCExecutionException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('failRequestV2_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8') if sys.version_info[0] == 2 else self.id)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 2)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(failRequestV2_args)
failRequestV2_args.thrift_spec = (
None, # 0
(1, TType.STRING, 'id', 'UTF8', None, ), # 1
(2, TType.STRUCT, 'e', [DRPCExecutionException, None], None, ), # 2
)
class failRequestV2_result(object):
"""
Attributes:
- aze
"""
def __init__(self, aze=None,):
self.aze = aze
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('failRequestV2_result')
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(failRequestV2_result)
failRequestV2_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'aze', [AuthorizationException, None], None, ), # 1
)
fix_spec(all_structs)
del all_structs
| |
import os
import stat
import sys
import django
from django.core.management import call_command, CommandError
import click
import requests
from .utils import format_help, order_manually
class Config(object):
def __init__(self):
self.url = "https://raw.githubusercontent.com/pinax/pinax/master/projects.json"
self.apps_url = "https://raw.githubusercontent.com/pinax/pinax/master/distributions.json"
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
pass_config = click.make_pass_decorator(Config, ensure=True)
class PinaxGroup(click.Group):
"""Custom Group class with specially formatted help"""
def list_commands(self, ctx):
"""Override for showing commands in particular order"""
commands = super(PinaxGroup, self).list_commands(ctx)
return [cmd for cmd in order_manually(commands)]
def get_help_option(self, ctx):
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.invoked_subcommand:
# pinax main help
click.echo(format_help(ctx.get_help()))
else:
# pinax sub-command help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return click.Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.')
@click.group(cls=PinaxGroup, invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.option("--url", type=str, required=False, help="url to project data source")
@click.option("--apps_url", type=str, required=False, help="url to application data source")
@click.version_option()
@pass_config
@click.pass_context
def main(ctx, config, url, apps_url):
if url:
config.url = url
if apps_url:
config.apps_url = apps_url
if ctx.invoked_subcommand is None:
# Display help to user if no commands were passed.
click.echo(format_help(ctx.get_help()))
@main.command(short_help="Display Pinax starter projects")
@pass_config
def projects(config):
payload = requests.get(config.url).json()
if payload.get("version") == 1:
projects = payload.get("projects")
click.echo("{} {}".format("Release".rjust(7), "Project"))
click.echo("------- ---------------")
for project in projects:
if projects[project]["releases"]:
release = max([
x.split("/")[-1].replace(".tar.gz", "")
for x in projects[project]["releases"]
]).split("-")[-1]
else:
release = ""
click.echo("{} {}".format(release.rjust(7), project))
else:
click.echo("The projects manifest you are trying to consume will not work: \n{}".format(config.url))
def show_distribution_section(config, title, section_name):
"""
Obtain distribution data and display latest distribution section,
i.e. "demos" or "apps" or "themes".
"""
payload = requests.get(config.apps_url).json()
distributions = sorted(payload.keys(), reverse=True)
latest_distribution = payload[distributions[0]]
click.echo("{} {}".format("Release".rjust(7), title))
click.echo("------- ---------------")
section = latest_distribution[section_name]
names = sorted(section.keys())
for name in names:
click.echo("{} {}".format(section[name].rjust(7), name))
@main.command(short_help="Display Pinax apps")
@pass_config
def apps(config):
show_distribution_section(config, "Application", "apps")
@main.command(short_help="Display Pinax demo projects")
@pass_config
def demos(config):
show_distribution_section(config, "Demo", "demos")
@main.command(short_help="Display Pinax themes")
@pass_config
def themes(config):
show_distribution_section(config, "Theme", "themes")
@main.command(short_help="Display Pinax tools")
@pass_config
def tools(config):
show_distribution_section(config, "Tool", "tools")
@main.command(short_help="Create a new project based on a Pinax starter project")
@click.option("--dev", is_flag=True, help="use latest development branch instead of release")
@click.option("--location", type=str, default="", help="specify where project is created")
@click.argument("project", type=str, required=True)
@click.argument("name", type=str, required=True)
@pass_config
def start(config, dev, location, project, name):
payload = requests.get(config.url).json()
if payload.get("version") == 1:
projects = payload.get("projects")
try:
if dev or len(projects[project]["releases"]) > 0:
validate_django_compatible_with_python()
start_project(projects[project], name, dev, location)
click.echo("Finished")
output_instructions(projects[project])
cleanup(name, location)
else:
click.echo("There are no releases for {}. You need to specify the --dev flag to use.".format(project))
except KeyError:
click.echo("Project {} is not found.".format(project))
else:
click.echo("The projects manifest you are trying to consume will not work: \n{}".format(config.url))
def validate_django_compatible_with_python():
"""
Verify Django 1.11 is present if Python 2.7 is active
Installation of pinax-cli requires the correct version of Django for
the active Python version. If the developer subsequently changes
the Python version the installed Django may no longer be compatible.
"""
python_version = sys.version[:5]
django_version = django.get_version()
if sys.version_info == (2, 7) and django_version >= "2":
click.BadArgumentUsage("Please install Django v1.11 for Python {}, or switch to Python >= v3.4".format(python_version))
def start_project(project, name, dev, location):
click.echo("Starting project from Pinax")
template = project["url"] if dev else max(project["releases"])
kwargs = dict(
template=template,
files=project["process-files"]
)
args = [name]
if location:
args.append(location)
try:
call_command("startproject", *args, **kwargs)
except CommandError as e:
click.echo(click.style("Error: ", fg="red") + str(e))
sys.exit(1)
def output_instructions(project):
if "instructions" in project:
click.echo(project["instructions"])
def cleanup(name, location):
if not location:
# if location was not specified, start_project used `name` for new subdir
location = name
os.remove(os.path.join(location, "LICENSE"))
os.remove(os.path.join(location, "CONTRIBUTING.md"))
os.remove(os.path.join(location, "update.sh"))
managepy = os.path.join(location, "manage.py")
st = os.stat(managepy)
os.chmod(managepy, st.st_mode | stat.S_IEXEC)
| |
# -*- coding: utf8 -*-
"""
.. module:: lesscpy.lessc.parser
:synopsis: Lesscss parser.
http://www.dabeaz.com/ply/ply.html
http://www.w3.org/TR/CSS21/grammar.html#scanner
http://lesscss.org/#docs
Copyright (c)
See LICENSE for details.
.. moduleauthor:: Johann T. Mariusson <jtm@robot.is>
"""
from __future__ import print_function
import os
import tempfile
import sys
import ply.yacc
import six
from . import lexer
from . import utility
from .scope import Scope
from .color import Color
from lesscpy.exceptions import CompilationError
from lesscpy.plib import Block, Call, Deferred, Expression, Identifier, Mixin, NegatedExpression, Property, Statement, Variable, Import, KeyframeSelector
class ErrorRegister(object):
"""
Raises CompilationError when an error occurs.
"""
def __init__(self):
self.errors = []
def register(self, error):
self.errors.append(error) # we could store them or just raise here.
def __close__(self):
if self.errors:
raise CompilationError("\n".join(self.errors))
close = __close__
class PrintErrorRegister(object):
"""
Colored error output to stderr.
"""
def __init__(self):
self.has_errored = False
def register(self, error):
self.has_errored = True
color = '\x1b[31m' if error[0] == 'E' else '\x1b[33m'
print("%s%s\x1b[0m" % (color, error), end='\x1b[0m', file=sys.stderr)
def __close__(self):
pass
close = __close__
class LessParser(object):
precedence = (
('left', '+', '-'),
('left', '*', '/'),
)
def __init__(self,
lex_optimize=True,
yacc_optimize=True,
tabfile='yacctab',
yacc_debug=False,
scope=None,
outputdir=tempfile.gettempdir(),
importlvl=0,
verbose=False,
fail_with_exc=False
):
""" Parser object
Kwargs:
lex_optimize (bool): Optimize lexer
yacc_optimize (bool): Optimize parser
tabfile (str): Yacc tab filename
yacc_debug (bool): yacc debug mode
scope (Scope): Inherited scope
outputdir (str): Output (debugging)
importlvl (int): Import depth
verbose (bool): Verbose mode
fail_with_exc (bool): Throw exception on syntax error instead
of printing to stderr
"""
self.verbose = verbose
self.importlvl = importlvl
self.lex = lexer.LessLexer()
if not tabfile:
tabfile = 'yacctab'
self.ignored = ('css_comment', 'less_comment',
'css_vendor_hack')
self.tokens = [t for t in self.lex.tokens
if t not in self.ignored]
self.parser = ply.yacc.yacc(
module=self,
start='tunit',
debug=yacc_debug,
optimize=yacc_optimize,
tabmodule=tabfile,
outputdir=outputdir
)
self.scope = scope if scope else Scope()
self.stash = {}
self.result = None
self.target = None
self.fail_with_exc = fail_with_exc
if fail_with_exc:
self.register = ErrorRegister()
else:
self.register = PrintErrorRegister()
def parse(self, filename=None, file=None, debuglevel=0):
""" Parse file.
kwargs:
filename (str): File to parse
debuglevel (int): Parser debuglevel
"""
self.scope.push()
if not file:
# We use a path.
file = filename
else:
# We use a stream and try to extract the name from the stream.
if hasattr(file, 'name'):
if filename is not None:
raise AssertionError(
'names of file and filename are in conflict')
filename = file.name
else:
filename = '(stream)'
self.target = filename
if self.verbose and not self.fail_with_exc:
print('Compiling target: %s' % filename, file=sys.stderr)
self.result = self.parser.parse(
file, lexer=self.lex, debug=debuglevel)
self.post_parse()
self.register.close()
def post_parse(self):
""" Post parse cycle. nodejs version allows calls to mixins
not yet defined or known to the parser. We defer all calls
to mixins until after first cycle when all names are known.
"""
if self.result:
out = []
for pu in self.result:
try:
out.append(pu.parse(self.scope))
except SyntaxError as e:
self.handle_error(e, 0)
self.result = list(utility.flatten(out))
def scopemap(self):
""" Output scopemap.
"""
utility.debug_print(self.result)
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_tunit(self, p):
""" tunit : unit_list
"""
p[0] = [u for u in p[1] if u]
def p_unit_list(self, p):
""" unit_list : unit_list unit
| unit
"""
if isinstance(p[1], list):
if len(p) >= 3:
if isinstance(p[2], list):
p[1].extend(p[2])
else:
p[1].append(p[2])
else:
p[1] = [p[1]]
p[0] = p[1]
def p_unit(self, p):
""" unit : statement
| variable_decl
| block_decl
| mixin_decl
| call_mixin
| import_statement
"""
p[0] = p[1]
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_statement_aux(self, p):
""" statement : css_charset t_ws css_string t_semicolon
| css_namespace t_ws css_string t_semicolon
"""
p[0] = Statement(list(p)[1:], p.lineno(1))
p[0].parse(None)
def p_statement_namespace(self, p):
""" statement : css_namespace t_ws word css_string t_semicolon
"""
p[0] = Statement(list(p)[1:], p.lineno(1))
p[0].parse(None)
def p_statement_import(self, p):
""" import_statement : css_import t_ws string t_semicolon
| css_import t_ws css_string t_semicolon
| css_import t_ws css_string media_query_list t_semicolon
| css_import t_ws fcall t_semicolon
| css_import t_ws fcall media_query_list t_semicolon
"""
#import pdb; pdb.set_trace()
if self.importlvl > 8:
raise ImportError(
'Recrusive import level too deep > 8 (circular import ?)')
if isinstance(p[3], six.string_types):
ipath = utility.destring(p[3])
elif isinstance(p[3], list):
p[3] = Import(p[3], p.lineno(4)).parse(self.scope)
ipath = utility.destring(p[3])
elif isinstance(p[3], Call):
# NOTE(saschpe): Always in the form of 'url("...");', so parse it
# and retrieve the inner css_string. This whole func is messy.
p[3] = p[3].parse(self.scope) # Store it as string, Statement.fmt expects it.
ipath = utility.destring(p[3][4:-1])
fn, fe = os.path.splitext(ipath)
if not fe or fe.lower() == '.less':
try:
cpath = os.path.dirname(os.path.abspath(self.target))
if not fe:
ipath += '.less'
filename = "%s%s%s" % (cpath, os.sep, ipath)
if os.path.exists(filename):
recurse = LessParser(importlvl=self.importlvl + 1,
verbose=self.verbose, scope=self.scope)
recurse.parse(filename=filename, debuglevel=0)
p[0] = recurse.result
else:
err = "Cannot import '%s', file not found" % filename
self.handle_error(err, p.lineno(1), 'W')
p[0] = None
except ImportError as e:
self.handle_error(e, p)
else:
p[0] = Statement(list(p)[1:], p.lineno(1))
p[0].parse(None)
sys.stdout.flush()
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_block(self, p):
""" block_decl : block_open declaration_list brace_close
"""
p[0] = Block(list(p)[1:-1], p.lineno(3))
self.scope.pop()
self.scope.add_block(p[0])
def p_block_replace(self, p):
""" block_decl : identifier t_semicolon
"""
m = p[1].parse(None)
block = self.scope.blocks(m.raw())
if block:
p[0] = block.copy_inner(self.scope)
else:
# fallback to mixin. Allow calls to mixins without parens
p[0] = Deferred(p[1], None, p.lineno(2))
def p_block_open(self, p):
""" block_open : identifier brace_open
"""
try:
p[1].parse(self.scope)
except SyntaxError:
pass
p[0] = p[1]
self.scope.current = p[1]
def p_block_open_media_query(self, p):
""" block_open : media_query_decl brace_open
"""
p[0] = Identifier(p[1]).parse(self.scope)
def p_font_face_open(self, p):
""" block_open : css_font_face t_ws brace_open
"""
p[0] = Identifier([p[1], p[2]]).parse(self.scope)
def p_keyframe_open(self, p):
"""block_open : css_keyframe_selector brace_open
| number brace_open
"""
p[0] = KeyframeSelector([p[1]]).parse(self.scope)
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_mixin(self, p):
""" mixin_decl : open_mixin declaration_list brace_close
"""
self.scope.add_mixin(Mixin(list(p)[1:], p.lineno(3)).parse(self.scope))
self.scope.pop()
p[0] = None
def p_open_mixin(self, p):
""" open_mixin : identifier t_popen mixin_args_list t_pclose brace_open
| identifier t_popen mixin_args_list t_pclose mixin_guard brace_open
"""
p[1].parse(self.scope)
self.scope.current = p[1]
p[0] = [p[1], p[3]]
if len(p) > 6:
p[0].append(p[5])
else:
p[0].append(None)
def p_mixin_guard(self, p):
""" mixin_guard : less_when mixin_guard_cond_list
"""
p[0] = p[2]
def p_mixin_guard_cond_list_aux(self, p):
""" mixin_guard_cond_list : mixin_guard_cond_list t_comma mixin_guard_cond
| mixin_guard_cond_list less_and mixin_guard_cond
"""
p[1].append(p[2])
p[1].append(p[3])
p[0] = p[1]
def p_mixin_guard_cond_list(self, p):
""" mixin_guard_cond_list : mixin_guard_cond
"""
p[0] = [p[1]]
def p_mixin_guard_cond_rev(self, p):
""" mixin_guard_cond : less_not t_popen argument mixin_guard_cmp argument t_pclose
| less_not t_popen argument t_pclose
"""
p[0] = utility.reverse_guard(list(p)[3:-1])
def p_mixin_guard_cond(self, p):
""" mixin_guard_cond : t_popen argument mixin_guard_cmp argument t_pclose
| t_popen argument t_pclose
"""
p[0] = list(p)[2:-1]
def p_mixin_guard_cmp(self, p):
""" mixin_guard_cmp : '>'
| '<'
| '='
| '>' '='
| '=' '<'
"""
p[0] = ''.join(list(p)[1:])
def p_call_mixin(self, p):
""" call_mixin : identifier t_popen mixin_args_list t_pclose t_semicolon
"""
p[1].parse(None)
p[0] = Deferred(p[1], p[3], p.lineno(4))
def p_mixin_args_arguments(self, p):
""" mixin_args_list : less_arguments
"""
p[0] = [p[1]]
def p_mixin_args_list_aux(self, p):
""" mixin_args_list : mixin_args_list t_comma mixin_args
| mixin_args_list t_semicolon mixin_args
"""
p[1].extend([p[3]])
p[0] = p[1]
def p_mixin_args_list(self, p):
""" mixin_args_list : mixin_args
"""
p[0] = [p[1]]
def p_mixin_args_aux(self, p):
""" mixin_args : mixin_args argument
"""
p[1].extend(list(p)[2:])
p[0] = p[1]
def p_mixin_args(self, p):
""" mixin_args : argument
| mixin_kwarg
"""
p[0] = [p[1]]
def p_mixin_args_empty(self, p):
""" mixin_args : empty
"""
p[0] = None
def p_mixin_kwarg(self, p):
""" mixin_kwarg : variable t_colon mixin_kwarg_arg_list
"""
p[0] = Variable(list(p)[1:], p.lineno(2))
def p_margument_list_aux(self, p):
""" mixin_kwarg_arg_list : mixin_kwarg_arg_list argument
"""
p[1].extend(list(p)[2:])
p[0] = p[1]
def p_margument_list(self, p):
""" mixin_kwarg_arg_list : argument
"""
p[0] = [p[1]]
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_declaration_list(self, p):
""" declaration_list : declaration_list declaration
| declaration
| empty
"""
if len(p) > 2:
p[1].extend(p[2])
p[0] = p[1]
def p_declaration(self, p):
""" declaration : variable_decl
| property_decl
| block_decl
| mixin_decl
| call_mixin
| import_statement
"""
p[0] = p[1] if isinstance(p[1], list) else [p[1]]
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_variable_decl(self, p):
""" variable_decl : variable t_colon style_list t_semicolon
"""
p[0] = Variable(list(p)[1:-1], p.lineno(4))
p[0].parse(self.scope)
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_property_decl(self, p):
""" property_decl : prop_open style_list t_semicolon
| prop_open style_list css_important t_semicolon
| prop_open empty t_semicolon
"""
l = len(p)
p[0] = Property(list(p)[1:-1], p.lineno(l - 1))
def p_property_decl_arguments(self, p):
""" property_decl : prop_open less_arguments t_semicolon
"""
p[0] = Property([p[1], [p[2]]], p.lineno(3))
def p_prop_open_ie_hack(self, p):
""" prop_open : '*' prop_open
"""
p[0] = (p[1][0], p[2][0])
def p_prop_open(self, p):
""" prop_open : property t_colon
| vendor_property t_colon
| word t_colon
"""
p[0] = (p[1][0], '')
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_style_list_aux(self, p):
""" style_list : style_list style
| style_list t_comma style
| style_list t_ws style
"""
p[1].extend(list(p)[2:])
p[0] = p[1]
def p_style_list(self, p):
""" style_list : style
"""
p[0] = [p[1]]
def p_style(self, p):
""" style : expression
| string
| word
| property
| vendor_property
| estring
"""
p[0] = p[1]
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_identifier(self, p):
""" identifier : identifier_list
| page
| page filter
"""
p[0] = Identifier(p[1], 0)
def p_identifier_istr(self, p):
""" identifier : t_popen estring t_pclose
"""
p[0] = Identifier(Call([p[2], p[3]]), 0)
def p_identifier_list_aux(self, p):
""" identifier_list : identifier_list t_comma identifier_group
"""
p[1].extend([p[2]])
p[1].extend(p[3])
p[0] = p[1]
def p_identifier_list(self, p):
""" identifier_list : identifier_group
"""
p[0] = p[1]
def p_identifier_list_keyframe(self, p):
""" identifier_list : css_keyframes t_ws css_ident
| css_keyframes t_ws css_ident t_ws
"""
p[0] = list(p)[1:]
def p_identifier_list_viewport(self, p):
""" identifier_list : css_viewport
| css_viewport t_ws
"""
p[0] = list(p)[1:]
def p_identifier_group_op(self, p):
""" identifier_group : identifier_group child_selector ident_parts
| identifier_group '+' ident_parts
| identifier_group general_sibling_selector ident_parts
| identifier_group '*'
"""
p[1].extend([p[2]])
if len(p) > 3:
p[1].extend(p[3])
p[0] = p[1]
def p_identifier_group(self, p):
""" identifier_group : ident_parts
"""
p[0] = p[1]
def p_ident_parts_aux(self, p):
""" ident_parts : ident_parts ident_part
| ident_parts filter_group
"""
if isinstance(p[2], list):
p[1].extend(p[2])
else:
p[1].append(p[2])
p[0] = p[1]
def p_ident_parts(self, p):
""" ident_parts : ident_part
| selector
| filter_group
"""
if not isinstance(p[1], list):
p[1] = [p[1]]
p[0] = p[1]
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_media_query_decl(self, p):
""" media_query_decl : css_media t_ws
| css_media t_ws media_query_list
"""
p[0] = list(p)[1:]
def p_media_query_list_aux(self, p):
""" media_query_list : media_query_list t_comma media_query
"""
p[0] = list(p)[1:]
def p_media_query_list(self, p):
""" media_query_list : media_query
"""
p[0] = [p[1]]
def p_media_query_a(self, p):
""" media_query : media_type
| media_type media_query_expression_list
| not media_type
| not media_type media_query_expression_list
| only media_type
| only media_type media_query_expression_list
"""
p[0] = list(p)[1:]
def p_media_query_b(self, p):
""" media_query : media_query_expression media_query_expression_list
| media_query_expression
"""
p[0] = list(p)[1:]
def p_media_query_expression_list_aux(self, p):
""" media_query_expression_list : media_query_expression_list and media_query_expression
| and media_query_expression
"""
p[0] = list(p)[1:]
def p_media_query_expression(self, p):
""" media_query_expression : t_popen css_media_feature t_pclose
| t_popen css_media_feature t_colon media_query_value t_pclose
"""
p[0] = list(p)[1:]
def p_media_query_value(self, p):
""" media_query_value : number
| variable
| word
| color
| expression
"""
if utility.is_variable(p[1]):
var = self.scope.variables(''.join(p[1]))
if var:
value = var.value[0]
if hasattr(value, 'parse'):
p[1] = value.parse(self.scope)
else:
p[1] = value
if isinstance(p[1], Expression):
p[0] = p[1].parse(self.scope)
else:
p[0] = p[1]
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_selector(self, p):
""" selector : '*'
| '+'
| child_selector
| general_sibling_selector
"""
p[0] = p[1]
def p_ident_part(self, p):
""" ident_part : iclass
| id
| dom
| combinator
| color
"""
p[0] = p[1]
def p_ident_part_aux(self, p):
""" ident_part : combinator vendor_property
"""
p[0] = [p[1], p[2]]
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_filter_group_aux(self, p):
""" filter_group : filter_group filter
"""
p[1].extend(p[2])
p[0] = p[1]
def p_filter_group(self, p):
""" filter_group : filter
"""
p[0] = p[1]
def p_filter(self, p):
""" filter : css_filter
| css_filter t_ws
| t_colon word
| t_colon vendor_property
| t_colon vendor_property t_ws
| t_colon css_property
| t_colon css_property t_ws
| t_colon css_filter
| t_colon css_filter t_ws
| t_colon t_colon word
| t_colon t_colon vendor_property
"""
p[0] = list(p)[1:]
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_ms_filter(self, p):
""" ms_filter : css_ms_filter
| css_ms_filter t_ws
"""
p[0] = tuple(list(p)[1:])
def p_fcall(self, p):
""" fcall : word t_popen argument_list t_pclose
| property t_popen argument_list t_pclose
| vendor_property t_popen argument_list t_pclose
| less_open_format argument_list t_pclose
| ms_filter t_popen argument_list t_pclose
"""
p[0] = Call(list(p)[1:], 0)
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_argument_list_empty(self, p):
""" argument_list : empty
"""
p[0] = ''
def p_argument_list_aux(self, p):
""" argument_list : argument_list argument
| argument_list t_comma argument
"""
p[1].extend(list(p)[2:])
p[0] = p[1]
def p_argument_list(self, p):
""" argument_list : argument
"""
p[0] = [p[1]]
def p_argument(self, p):
""" argument : expression
| string
| estring
| word
| id
| css_uri
| '='
| fcall
"""
p[0] = p[1]
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_expression_aux(self, p):
""" expression : expression '+' expression
| expression '-' expression
| expression '/' expression
| expression '*' expression
| word '/' expression
"""
p[0] = Expression(list(p)[1:], 0)
def p_expression_p_neg(self, p):
""" expression : '-' t_popen expression t_pclose
"""
p[0] = NegatedExpression([p[3]], 0)
def p_expression_p(self, p):
""" expression : t_popen expression t_pclose
"""
p[0] = p[2]
def p_expression(self, p):
""" expression : factor
"""
p[0] = p[1]
def p_factor(self, p):
""" factor : color
| number
| variable
| css_dom
| fcall
"""
p[0] = p[1]
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_escaped_string(self, p):
""" estring : t_eopen style_list t_eclose
| t_eopen identifier_list t_eclose
"""
p[0] = p[2]
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_string_part(self, p):
""" string_part : variable
| css_string
"""
p[0] = p[1]
def p_string_part_list_aux(self, p):
""" string_part_list : string_part_list string_part
"""
p[1].extend([p[2]])
p[0] = p[1]
def p_string_part_list(self, p):
""" string_part_list : string_part
"""
p[0] = [p[1]]
def p_string_aux(self, p):
""" string : t_isopen string_part_list t_isclose
"""
p[0] = ['"', p[2], '"']
def p_string(self, p):
""" string : css_string
"""
p[0] = p[1]
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_variable_neg(self, p):
""" variable : '-' variable
"""
p[0] = ['-', p[2]]
def p_variable_strange(self, p):
""" variable : t_popen variable t_pclose
"""
p[0] = p[2]
def p_variable(self, p):
""" variable : less_variable
| less_variable t_ws
"""
# p[0] = p[1]
p[0] = tuple(list(p)[1:])
def p_color(self, p):
""" color : css_color
| css_color t_ws
"""
try:
p[0] = Color().fmt(p[1])
if len(p) > 2:
p[0] = [p[0], p[2]]
except ValueError:
self.handle_error(
'Illegal color value `%s`' % p[1], p.lineno(1), 'W')
p[0] = p[1]
def p_number(self, p):
""" number : css_number
| css_number t_ws
"""
p[0] = tuple(list(p)[1:])
def p_dom(self, p):
""" dom : css_dom
| css_dom t_ws
"""
p[0] = tuple(list(p)[1:])
def p_word(self, p):
""" word : css_ident
| css_ident t_ws
"""
p[0] = tuple(list(p)[1:])
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_class(self, p):
""" class : css_class
| css_class t_ws
"""
p[0] = tuple(list(p)[1:])
def p_interpolated_class_part(self, p):
""" iclass_part : less_variable
| less_variable t_ws
| class
"""
p[0] = list(p)[1:]
def p_interpolated_class_part_list_aux(self, p):
""" iclass_part_list : iclass_part_list iclass_part
"""
p[1].extend([p[2]])
p[0] = p[1]
def p_interpolated_class_part_list(self, p):
""" iclass_part_list : iclass_part
"""
p[0] = [p[1]]
def p_interpolated_class(self, p):
""" iclass : iclass_part_list
"""
p[0] = p[1]
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_id(self, p):
""" id : css_id
| css_id t_ws
"""
p[0] = tuple(list(p)[1:])
def p_property(self, p):
""" property : css_property
| css_property t_ws
"""
p[0] = tuple(list(p)[1:])
def p_page(self, p):
""" page : css_page
| css_page t_ws
"""
p[0] = tuple(list(p)[1:])
def p_vendor_property(self, p):
""" vendor_property : css_vendor_property
| css_vendor_property t_ws
"""
p[0] = tuple(list(p)[1:])
def p_media_type(self, p):
""" media_type : css_media_type
| css_media_type t_ws
"""
p[0] = tuple(list(p)[1:])
def p_combinator(self, p):
""" combinator : '&' t_ws
| '&'
"""
p[0] = tuple(list(p)[1:])
def p_child_selector(self, p):
""" child_selector : '>' t_ws
| '>'
"""
p[0] = tuple(list(p)[1:])
def p_general_sibling_selector(self, p):
""" general_sibling_selector : t_tilde t_ws
| t_tilde
"""
p[0] = tuple(list(p)[1:])
def p_scope_open(self, p):
""" brace_open : t_bopen
"""
self.scope.push()
p[0] = p[1]
def p_scope_close(self, p):
""" brace_close : t_bclose
"""
p[0] = p[1]
def p_and(self, p):
""" and : t_and t_ws
| t_and
"""
p[0] = tuple(list(p)[1:])
def p_not(self, p):
""" not : t_not t_ws
| t_not
"""
p[0] = tuple(list(p)[1:])
def p_only(self, p):
""" only : t_only t_ws
| t_only
"""
p[0] = tuple(list(p)[1:])
def p_empty(self, p):
'empty :'
pass
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def p_error(self, t):
""" Internal error handler
args:
t (Lex token): Error token
"""
if t:
error_msg = "E: %s line: %d, Syntax Error, token: `%s`, `%s`" % \
(self.target, t.lineno, t.type, t.value)
self.register.register(error_msg)
while True:
t = self.lex.token()
if not t or t.value == '}':
if len(self.scope) > 1:
self.scope.pop()
break
self.parser.restart()
return t
def handle_error(self, e, line, t='E'):
""" Custom error handler
args:
e (Mixed): Exception or str
line (int): line number
t(str): Error type
"""
self.register.register("%s: line: %d: %s\n" % (t, line, e))
| |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph.jit import declarative
from paddle.fluid.layers.utils import map_structure
SEED = 2020
np.random.seed(SEED)
# Situation 1: Test list append
def test_list_append_without_control_flow(x):
# Python list will not be transformed.
x = fluid.dygraph.to_variable(x)
a = []
# It's a plain python control flow which won't be transformed
if 2 > 1:
a.append(x)
return a
def test_list_append_in_if(x):
x = fluid.dygraph.to_variable(x)
a = []
if x.numpy()[0] > 0:
a.append(x)
else:
a.append(
fluid.layers.fill_constant(
shape=[1, 2], value=9, dtype="int64"))
# TODO(Aurelius84): Currently, run_program_op doesn't support output LoDTensorArray.
return a[0]
def test_list_append_in_for_loop(x, iter_num):
x = fluid.dygraph.to_variable(x)
# Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor
iter_num = fluid.layers.fill_constant(
shape=[1], value=iter_num, dtype="int32"
) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved
a = []
for i in range(iter_num):
a.append(x)
return a[0]
def test_list_append_in_for_subscript(x):
x = fluid.dygraph.to_variable(x)
iter_num = paddle.shape(x)[0]
a = []
for i in range(iter_num):
x = x + 1
a.append(x)
out = paddle.concat(a)
return out[0]
def test_list_append_in_while_loop_subscript(x):
x = fluid.dygraph.to_variable(x)
iter_num = paddle.shape(x)[0]
a = []
i = 0
while i < iter_num:
x = x + 1
a.append(x)
i += 1
out = paddle.concat(a)
return out[0]
def test_list_append_in_for_loop_with_concat(x, iter_num):
x = fluid.dygraph.to_variable(x)
a = []
# Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor
iter_num = fluid.layers.fill_constant(
shape=[1], value=iter_num, dtype="int32"
) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved
for i in range(iter_num):
a.append(x)
a = fluid.layers.concat(a, axis=0)
return a
def test_list_append_in_while_loop(x, iter_num):
x = fluid.dygraph.to_variable(x)
iter_num = fluid.layers.fill_constant(
shape=[1], value=iter_num, dtype="int32")
a = []
i = 0
while i < iter_num:
a.append(x)
i += 1
return a[0]
def test_list_append_in_while_loop_with_stack(x, iter_num):
x = fluid.dygraph.to_variable(x)
iter_num = fluid.layers.fill_constant(
shape=[1], value=iter_num, dtype="int32")
a = []
i = 0
while i < iter_num.numpy()[0]:
a.append(x)
i += 1
out = fluid.layers.stack(a, axis=1)
return out
# Situation 2: Test list pop
def test_list_pop_without_control_flow_1(x):
x = fluid.dygraph.to_variable(x)
a = []
if 2 > 1:
a.append(x)
a.pop()
return a
def test_list_pop_without_control_flow_2(x):
x = fluid.dygraph.to_variable(x)
a = []
if 2 > 1:
a.append(x)
a.append(x + 1)
last_item = a.pop(1)
return last_item
def test_list_pop_in_if(x):
x = fluid.dygraph.to_variable(x)
a = []
b = [x * 2 + (x + 1)]
if x.numpy()[0] > 0:
a.append(x)
b.append(x + 1)
a.append(fluid.layers.fill_constant(shape=[1], value=1, dtype="int64"))
else:
a.append(x + 1)
b.append(x - 1)
a.append(fluid.layers.fill_constant(shape=[2], value=2, dtype="int64"))
item1 = a.pop(1)
return item1, b[-1]
def test_list_pop_in_for_loop(x, iter_num):
x = fluid.dygraph.to_variable(x)
# Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor
iter_num = fluid.layers.fill_constant(
shape=[1], value=iter_num, dtype="int32"
) # TODO(liym27): Delete it if the type of parameter iter_num can be resolved
a = []
b = [x - 1, x + 1]
for i in range(iter_num):
a.append(x + i)
b.append(x * 2)
one = fluid.layers.ones(shape=[1], dtype="int32")
for i in range(one.numpy()[0]):
item = a.pop()
return a[0], item, b[1]
def test_list_pop_in_while_loop(x, iter_num):
x = fluid.dygraph.to_variable(x)
iter_num = fluid.layers.fill_constant(
shape=[1], value=iter_num, dtype="int32")
a = []
b = [x]
b.append(x)
b.pop()
i = 0
while i < iter_num:
a.append(x + i)
b.append(x - i)
i += 1
if i % 2 == 1:
a.pop()
return a[0], b[2]
class TestListWithoutControlFlow(unittest.TestCase):
def setUp(self):
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
self.init_data()
self.init_dygraph_func()
def init_data(self):
self.input = np.random.random((3)).astype('int32')
def init_dygraph_func(self):
self.all_dygraph_funcs = [
test_list_append_without_control_flow,
test_list_pop_without_control_flow_1,
test_list_pop_without_control_flow_2,
]
def varbase_to_numpy(self, res):
if isinstance(res, (list, tuple)):
res = map_structure(lambda x: x.numpy(), res)
else:
res = [res.numpy()]
return res
def run_static_mode(self):
return self.train(to_static=True)
def run_dygraph_mode(self):
return self.train(to_static=False)
def train(self, to_static=False):
with fluid.dygraph.guard():
if to_static:
res = declarative(self.dygraph_func)(self.input)
else:
res = self.dygraph_func(self.input)
return self.varbase_to_numpy(res)
def test_transformed_static_result(self):
for dyfunc in self.all_dygraph_funcs:
self.dygraph_func = dyfunc
static_res_list = self.run_static_mode()
dygraph_res_list = self.run_dygraph_mode()
self.assertEqual(len(static_res_list), len(dygraph_res_list))
for stat_res, dy_res in zip(static_res_list, dygraph_res_list):
self.assertTrue(
np.allclose(stat_res, dy_res),
msg='dygraph_res is {}\nstatic_res is {}'.format(stat_res,
dy_res))
class TestListInIf(TestListWithoutControlFlow):
def init_dygraph_func(self):
self.all_dygraph_funcs = [test_list_append_in_if, test_list_pop_in_if]
class TestListInWhileLoop(TestListWithoutControlFlow):
def init_data(self):
self.input = np.random.random((3)).astype('int32')
self.iter_num = 3
def init_dygraph_func(self):
self.all_dygraph_funcs = [
test_list_append_in_while_loop, test_list_pop_in_while_loop
]
def train(self, to_static=False):
with fluid.dygraph.guard():
if to_static:
res = declarative(self.dygraph_func)(self.input, self.iter_num)
else:
res = self.dygraph_func(self.input, self.iter_num)
return self.varbase_to_numpy(res)
class TestListInWhileLoopWithStack(TestListInWhileLoop):
def init_dygraph_func(self):
self.all_dygraph_funcs = [test_list_append_in_while_loop_with_stack]
class TestListInForLoop(TestListInWhileLoop):
def init_dygraph_func(self):
self.all_dygraph_funcs = [
test_list_append_in_for_loop, test_list_pop_in_for_loop
]
class TestListInForLoopWithConcat(TestListInWhileLoopWithStack):
def init_dygraph_func(self):
self.all_dygraph_funcs = [test_list_append_in_for_loop_with_concat, ]
class TestListInForLoopWithSubscript(TestListWithoutControlFlow):
def init_dygraph_func(self):
self.all_dygraph_funcs = [
test_list_append_in_for_subscript,
test_list_append_in_while_loop_subscript
]
def init_data(self):
self.input = np.random.random((3, 4)).astype('float32')
if __name__ == '__main__':
unittest.main()
| |
import unittest
from flask_testing import TestCase
from datetime import datetime as dt
from app.models import *
from app import create_app, db
from flask_login import current_user
import json
from base64 import urlsafe_b64encode
from werkzeug.http import parse_authorization_header, parse_accept_header
from werkzeug.datastructures import Headers
# coding: utf-8
class AppTestCase(TestCase):
@classmethod
def add_to_db(cls, query):
db.session.add(query)
db.session.commit()
@classmethod
def create_role(cls, name, description):
# print("create_role({0}, {1})".format(name, description))
cls.add_to_db(Role(name, description))
@classmethod
def create_user(cls, email, name, password, is_admin=False):
# print("create_user({0}, {1}, {2}, {3})".format(email, name, password, is_admin))
if email is None or name is None or password is None or is_admin is None:
raise ValueError("One of these values is None - email:({0}), name:({1}), password:({2}) is_admin: ({3})".format(email, name, password, is_admin))
rolename = 'Admin' if is_admin == True else 'User'
role = Role.query.filter_by(name=rolename).first()
assert role is not None, "No roles defined, role is {0}".format(role)
cls.add_to_db(User(email, name, password, role.id))
@classmethod
def create_store(cls, name):
# print("create_product {0}".format(name))
cls.add_to_db(Store(name))
@classmethod
def create_brand(cls, name):
cls.add_to_db(Brand(name=name))
@classmethod
def create_category(cls, name):
cls.add_to_db(Category(name=name))
@classmethod
def create_product(cls, name, upc, quantity, units, brand_id, category_id):
# print("create_product({0}, {1}, {2}, {3})".format(name, upc, quantity, units))
cls.add_to_db(Product(name=name, upc=upc, quantity=quantity, units=units, brand_id=brand_id, category_id=category_id))
@classmethod
def create_product_price(cls, date, price, store, product):
# print("create_product_price({0}, {1}, {2}, {3})".format(date, price, store, product))
if not isinstance(store, Store):
raise ValueError("store ({0}) is not a valid store".format(store))
if not isinstance(product, Product):
raise ValueError("product ({0}) is not a valid product".format(product))
cls.add_to_db(ProductPrice(date=date, price=price, product_id=product.id, store_id=store.id))
def create_app(self):
app = create_app('test')
# print(app.config)
return app
@classmethod
def setUp(cls):
print("TestCase.setUp()")
db.drop_all() # Always make sure we start fresh
db.create_all()
@classmethod
def tearDown(cls):
print("TestCase.tearDown()")
db.session.remove()
db.drop_all()
pass
class DBTestCase(AppTestCase):
roles = [ ('User', 'A normal user'),
('Admin', 'An admin account'),
('', 'Test description'),
('Test', '')]
users = [
('test@test.com', 'Test', 'testpass'),
('test1@test.com', 'Test1', 'testpass'),
('test2@test.com', 'Test2', 'testpass'),
('test3@test.com', 'Test3', 'testpass'),
('test4@test.com', 'Test4', 'testpass')
]
admins = [ ('admin1@test.com', 'Admin1', 'testpass'),
('admin2@test.com', 'Admin2', 'testpass')]
stores = [('storea'), ('storeb'), ('storec'), ('stored'), ('storee')]
products = [ ('producta', '12345678', 100, 'mg', 'Test Brand, 1', 'Test Category 1'),
('productb', '23456787', 100, 'mg', 'Test Brand, 2', 'Test Category 2'),
('productc', '234568709', 100, 'mg', 'Test Brand, 3', 'Test Category 3')]
product_logs = [ (dt(2007, 1, 1), 100, 'producta', 'storea'),
(dt(2008, 2, 2), 150, 'productb', 'storeb'),
(dt(2009, 3, 3), 200, 'productc', 'storec'),
(dt(2010, 4, 4), 100, 'productc', 'storea'),
(dt(2011, 5, 5), 150, 'producta', 'storeb'),
(dt(2012, 6, 6), 200, 'productb', 'storec')]
def test_user_db(self):
print("################################Test User DB - Start##########################################")
for role in self.roles:
name, description = role
super().create_role(name, description)
r = Role.query.filter_by(name=name).first()
assert r != None, "Role is not defined"
assert r.name == name, "Incorrect Name: {0} should be {1}".format(r.name, name)
assert r.description == description, "Incorrect Description: {0} should be {1}".format(r.description, description)
q = Role.query.all()
assert Role.query.filter_by(name='User').first() != None, "'User' is not defined in roles"
assert Role.query.filter_by(name='Admin').first() != None, "'User' is not defined in roles"
assert Role.query.filter_by(name='').first() != None, "'' is defined in roles"
assert Role.query.filter_by(name='Test').first() != None, "'Test' is not defined in roles"
# Assure that all the roles got added.
assert len(q) == len(self.roles), "Number of defined roles is {0}, it should be {1}".format(len(q), len(self.roles))
for user in self.users:
email, name, password = user
super().create_user(email, name, password)
u = User.query.filter_by(email=email).first()
assert u != None, "User was not successfully inserted into database"
assert u.email == email, "Incorrect Email: {0} should be {1}".format(u.email, email)
assert u.display_name == name, "Incorrect Name: {0} should be {1}".format(u.display_name, email)
assert u.role.name == 'User', "Incorrect Role: {0} not correct Role User".format(u.role.name)
for admin in self.admins:
email, name, password = admin
super().create_user(email, name, password, is_admin=True)
u = User.query.filter_by(email=email).first()
assert u != None, "User was not successfully inserted into database"
assert u.email == email, "Incorrect email: {0} should be {1}".format(u.email, email)
assert u.display_name == name, "Incorrect Name: {0} should be {1}".format(u.display_name, name)
assert u.role.name == 'Admin', "Incorrect Role: {0} not correct Role User".format(u.role.name)
print("################################ Test User DB - End ##########################################")
def test_product_listing_db(self):
print("############################ Test Product Listing DB - Start ######################################")
for store in self.stores:
name = store
super().create_store(name)
s = Store.query.filter_by(name=name).first()
assert s != None, "Store was not successfully inserted into database"
assert s.name == name, "Incorrect name: {0} should be {1}".format(s.name, name)
for product in self.products:
name, upc, quantity, units, brand_name, category_name = product
brand = Brand.query.filter(Brand.name==brand_name).first()
if brand is None:
super().create_brand(brand_name)
brand = Brand.query.filter(Brand.name==brand_name).first()
category = Category.query.filter(Category.name==category_name).first()
if category is None:
super().create_category(category_name)
category = Category.query.filter(Category.name==category_name).first()
brand_id = brand.id
category_id = category.id
super().create_product(name, upc, quantity, units, brand_id, category_id)
p = Product.query.filter_by(name=name).first()
assert p != None, "Product was not successfully inserted into database"
assert p.name == name, "Incorrect Name: {0} should be {1}".format(p.name, name)
assert p.upc == upc, "Incorrect Upc: {0} should be {1}".format(p.upc, upc)
assert p.brand_id == brand_id, "Incorrect Upc: {0} should be {1}".format(p.brand_id, brand_id)
assert p.category_id == category_id, "Incorrect Upc: {0} should be {1}".format(p.category_id, category_id)
id = 1
for product_log in self.product_logs:
date, price, product_name, store_name = product_log
# print("{0}, {1}, {2}, {3}".format(date, price, store_name, product_name))
product = Product.query.filter_by(name=product_name).first()
store = Store.query.filter_by(name=store_name).first()
assert product.name == product_name, "{0} != {1}".format(product.name, product_name)
assert store.name == store_name, "{0} != {1}".format(store.name, store_name)
super().create_product_price(date, price, store, product)
assert id == len(ProductPrice.query.all())
# print(ProductPrice.query.all())
pl = ProductPrice.query.get(id)
# print("{0}: {1}".format(id, pl))
id += 1
assert pl != None, "Product Price ({0}, {1}) was not successfully inserted into database".format(date.date(), price)
assert pl.date == date.date(),"Incorrect date: '{0}' should be '{1}'".format(pl.date, date.date())
assert pl.price == price, "Incorrect price: '{0}' should be '{1}'`".format(pl.price, price)
assert pl.store == store, "Incorrect store: '{0}' should be '{1}'`".format(pl.store, store)
assert pl.product == product, "Incorrect product: '{0}' should be '{1}'`".format(pl.product, product)
print("############################ Test Product Listing DB - End ########################################")
class ViewTestCase(AppTestCase):
roles = [
('User', 'A normal user of the application'),
('Admin', 'A website admin that has control of the dashboard')
]
# Sample users for each test
users = [ ('example@example.com', 'Example', 'examplepass'),
('example2@example.com', 'Example2', 'examplepass'),
('example3@example.com', 'Example3', 'examplepass')]
# Sample users for each test
admins = [ ('admin@example.com', 'Admin', 'adminpass'),
('admin2@example.com', 'Admin2', 'adminpass'),
('admin3@example.com', 'Admin3', 'adminpass')]
brands = [('Brand A'), ('Brand B'), ('Brand C')]
categories = [('Category A'), ('Category B'), ('Category C')]
# Sample stores
stores = [('Walmart'), ('Test Mart'), ('Super Test Mart')]
# sample products
products = [('Coca-cola', '0987654321356', 100, 'mL', 1, 1),
('Calpis Soda', '0213123456744', 200, 'mL', 1, 1),
('Test Product 1', '1230212354687', 100, 'mg', 2, 2),
('Test Product 2', '3320512354687', 150, 'mg', 3, 3),
('Test Product 3', '3411612354687', 200, 'mg', 3, 3)]
product_prices = [ (dt(2007, 1, 1), 100, 'Walmart', 'Coca-cola'),
(dt(2008, 2, 2), 150, 'Test Mart', 'Calpis Soda'),
(dt(2009, 3, 3), 200, 'Super Test Mart', 'Test Product 1'),
(dt(2010, 4, 4), 100, 'Walmart', 'Test Product 1'),
(dt(2011, 5, 5), 150, 'Test Mart', 'Coca-cola'),
(dt(2012, 6, 6), 200, 'Super Test Mart', 'Calpis Soda')]
# General User Login/Ot
def login(self, email, password):
print("login({0}, {1})".format(email, password))
return self.client.post('/auth/login', data=dict(
email=email,
password=password
), follow_redirects=True)
def logout(self):
print("logout")
return self.client.get('/auth/logout', follow_redirects=True)
@classmethod
def setUp(cls):
print("AppTestCase setUp()")
super(ViewTestCase, cls).setUp()
for role in cls.roles:
name, description = role
cls.create_role(name, description)
for user in cls.users:
email, displayname, password = user
cls.create_user(email, displayname, password)
for admin in cls.admins:
email, displayname, password = admin
cls.create_user(email, displayname, password, is_admin=True)
for store in cls.stores:
name = store
cls.create_store(name)
for brand in cls.brands:
name = brand
cls.create_brand(name=name)
for category in cls.categories:
name = category
cls.create_category(name=name)
for product in cls.products:
name, upc, quantity, units, brand_id, category_id = product
# brand_id = Brand.query.filter(Brand.name == brand).first()
# category_id = Category.query.filter(Category.name == category).first()
# if brand_id is None:
# raise ValueError('{0} is not a valid brand'.format(brand_id))
# if category_id is None:
# raise ValueError('{0} is not a valid brand'.format(category_id))
cls.create_product(name, upc, quantity, units, brand_id, category_id)
for entry in cls.product_prices:
date, price, storename, productname = entry
store = Store.query.filter(Store.name==storename).first()
product = Product.query.filter(Product.name==productname).first()
cls.create_product_price(date, price, store, product)
@classmethod
def tearDown(cls):
print("AppTestCase tearDown()")
super().tearDown()
class AuthTestCase(ViewTestCase):
register_users_fail = [
# Should fail
('test.com', 'Test', 'testpass',),
('@test.com', 'Test', 'testpass',),
('test@', 'Test', 'testpass',),
('com', 'Test', 'testpass',),
('test@test.com', '', 'testpass'),
('test@test.com', 'Test', ''),
('example@example.com', 'Example', 'examplepass') # Should fail because it's already in the database
]
register_users_pass = [
# Should pass
('testX1@test.com', 'TestX1', 'testpass'),
('testX2@test.com', 'TestX2', 'testpass'),
('testX3@test.com', 'TestX3', 'testpass')
]
login_users = []
def register(self, email, display_name, password):
print("register({0}, {1}, {2})".format(email, display_name, password))
return self.client.post('/auth/register', data=dict(
email=email,
display_name=display_name,
password=password,
confirm=password
), follow_redirects=True)
def test_registration(self):
print("##################################### test_registration - complete #####################################")
with self.client:
print("Test User Registration Failures")
assert current_user.is_anonymous == True, "User session is currently logged in"
for user in self.register_users_fail:
assert current_user.is_anonymous == True, "User session is currently logged in"
# Test Registration request
email, displayname, password = user
# print("{0}, {1}, {2}".format(email, displayname, password))
resp = self.register(email, displayname, password)
self.assertEqual(404, resp.status_code)
# self.assertNotIn(b'Registration successful', resp.data)
assert current_user.is_anonymous == True, "User session is not currently logged in"
print("Test User Registration Passes")
for user in self.register_users_pass:
email, displayname, password = user
# print("{0}, {1}, {2}".format(email, displayname, password))
# print("Current logged in user: {0}".format(current_user))
assert current_user.is_anonymous == True, "User session is currently logged in"
# Test Registration request
resp = self.register(email, displayname, password)
self.assertEqual(404, resp.status_code)
# self.assertIn(b'Registration successful', resp.data)
resp = self.login(email, password)
self.assertEqual(200, resp.status_code)
# self.assertIn(b'Successfully logged in', resp.data)
assert current_user.is_anonymous == True, "User session is not currently logged in"
# self.logout()
assert current_user.is_anonymous == True, "User session is currently logged in"
print("Login tests")
for user in self.register_users_pass:
email, displayname, password = user
resp = self.login(email, password)
self.assertEqual(200, resp.status_code)
# self.assertIn(b'Successfully logged in', resp.data)
# assert current_user.is_anonymous == False, "User failed to login"
# resp = self.logout()
assert current_user.is_anonymous == True, "User is currently logged in"
print("##################################### test_registration - complete #####################################")
class ProductListAPITestCase(ViewTestCase):
# Headers to be used for testing the API during this test
request_headers = {
'authorization': 'Basic ' + urlsafe_b64encode('example@example.com:examplepass'.encode('utf-8')).decode('utf-8'),
'accept': 'application/json',
'accept-charset': 'utf-8',
'content-type' : 'application/json'
}
@classmethod
def setUp(cls):
print('ProductListAPITestCase.setUp()')
super(ProductListAPITestCase, cls).setUp()
@classmethod
def tearDown(cls):
print('ProductListAPITestCase.tearDown()')
super(ProductListAPITestCase, cls).tearDown()
def add_store(self, store_name):
# print("add_store({0})".format(store_name))
return self.client.post('/api/store/',
data=json.dumps(dict(name=store_name)),
headers = self.request_headers)
def edit_store(self, id, name):
# print("edit({0}, {1})".format(id, name))
return self.client.put('/api/store/{0}'.format(id),
data=json.dumps(dict(name=name)),
headers = self.request_headers)
def delete_store(self, id):
# print("delete_store({0})".format(id))
q = Store.query.get(id)
assert q != None, "Store_name does not exist"
return self.client.delete('/api/store/{0}'.format(id),
headers = self.request_headers)
def read_stores(self):
# print("read_stores()")
return self.client.get('/api/stores/',
headers = self.request_headers)
def read_store(self, id):
# print("read_store({0})".format(id))
return self.client.get('/api/store/{0}'.format(id),
headers = self.request_headers)
def add_product(self, name, upc, quantity, units, brand_id, category_id):
# print("add_product({0})".format(product_name))
return self.client.post('/api/product/',
data=json.dumps(dict(name=name, upc=upc, quantity=quantity, units=units, brand_id=brand_id, category_id=category_id)),
headers = self.request_headers)
def edit_product(self, id, name, upc, quantity, units, brand_id, category_id):
# print("edit_product()")
return self.client.put('/api/product/{0}'.format(id),
data=json.dumps(dict(name=name, upc=upc, quantity=quantity, units=units, brand_id=brand_id, category_id=category_id)),
headers = self.request_headers)
def delete_product(self, id):
# print("delete_product()")
return self.client.delete('/api/product/{0}'.format(id),
headers = self.request_headers)
def read_products(self):
# print("read_products()")
return self.client.get('/api/products/',
headers = self.request_headers
)
def read_product(self, id):
# print("read_product({0})".format(id))
return self.client.get('/api/product/{0}'.format(id),
headers = self.request_headers)
def add_product_price(self, date, price, store_id, product_id):
# print("add_product_price({0}, {1}, {2}, {3})".format(date.isoformat(), price, store_id, product_id))
return self.client.post('/api/product_price/',
data=json.dumps(dict(store_id=store_id, product_id=product_id, date=date.isoformat(), price=price)),
headers = self.request_headers)
def delete_product_price(self, id):
print("delete_product_prices({0})".format(id))
return self.client.delete('/api/product_price/{0}'.format(id),
headers = self.request_headers)
def read_product_prices(self):
# print("read_product_prices()")
return self.client.get('/api/product_prices/',
headers = self.request_headers)
def read_product_price(self, id):
# print("read_product_prices({0})".format(id))
return self.client.get('/api/product_price/{0}'.format(id),
headers = self.request_headers)
def test_store_crud(self):
print("##################################### test_store_crud - start #####################################")
with self.client:
# Test Get
resp = self.read_stores()
self.assertEqual(200, resp.status_code)
print(resp)
resp = self.read_store(0)
self.assertEqual(404, resp.status_code)
print(resp)
print(resp.data)
for i in range(0, len(self.stores)):
resp = self.read_store(i+1) # Database is starting index is 1 rather than 0 so convert
self.assertEqual(200, resp.status_code)
data = json.loads(resp.data.decode('utf-8'))
print(data)
self.assertIn(self.stores[i], data['store']['name'])
print(resp)
resp = self.read_store(12348756)
self.assertEqual(404, resp.status_code)
print(resp)
# Test Create
test_stores_to_create = ['New Mart', 'Newer Mart', 'Newest Mart', 'Mega new martttt!!123341233487656']
for store in test_stores_to_create:
resp = self.add_store(store)
self.assertEqual(201, resp.status_code)
data_id = int(json.loads(resp.data.decode('utf-8'))['store']['uri'].split('/')[-1])
resp = self.read_store(data_id)
self.assertEqual(200, resp.status_code)
s = json.loads(resp.data.decode('utf-8'))['store']
print(s)
self.assertEqual(store, s['name'])
for store in test_stores_to_create:
resp = self.add_store(store)
self.assertEqual(500 , resp.status_code)
# Test Edit
edits = [(4, 'New Mart (edited)'),
(5, 'Newer Mart (edited)'),
(6, 'Newest Mart (edited)')]
for edit in edits:
id, name = edit
resp = self.edit_store(id, name)
self.assertEqual(200, resp.status_code)
data = json.loads(resp.data.decode('utf-8'))
self.assertEqual(name, data['store']['name'])
resp = self.read_store(id)
self.assertEqual(200, resp.status_code)
data = json.loads(resp.data.decode('utf-8'))
self.assertEqual(name, data['store']['name'])
# Test Delete
deletion_indexes = [1, 3, 4, 5]
for i in deletion_indexes:
resp = self.delete_store(i)
self.assertEqual(200, resp.status_code)
resp = self.read_stores()
self.assertEqual(200, resp.status_code)
data = json.loads(resp.data.decode('utf-8'))['stores']
self.assertEqual(3, len(data))
print("##################################### test_store_crud - complete #####################################")
def test_product_crud(self):
print("##################################### test_product_crud - start #####################################")
with self.client:
# Test Get
resp = self.read_products()
self.assertEqual(200, resp.status_code)
print(resp)
resp = self.read_product(0)
self.assertEqual(404, resp.status_code)
print(resp)
print(resp.data)
for i in range(0, len(self.products)):
resp = self.read_product(i+1) # Database is starting index is 1 rather than 0 so convert
self.assertEqual(200, resp.status_code)
data = json.loads(resp.data.decode('utf-8'))
self.assertEqual(self.products[i][0], data['product']['name'])
self.assertEqual(self.products[i][1], data['product']['upc'])
self.assertEqual(self.products[i][2], data['product']['quantity'])
self.assertEqual(self.products[i][3], data['product']['units'])
print(resp)
resp = self.read_product(12348756)
self.assertEqual(404, resp.status_code)
print(resp)
# Test Create
test_products_to_create = [
('New Product', '1203981322287', 100, 'mg', 1, 1),
('Newer Product', '3548532112312', 100, 'mg', 1, 2),
('Newest Product', '4890465784213', 100, 'mg', 2, 2),
('Mega new Product!!123341233487656', '8643189156785', 100, 'mg', 3, 3)]
for product in test_products_to_create:
name, upc, quantity, units, brand_id, category_id = product
resp = self.add_product(name, upc, quantity, units, brand_id, category_id)
self.assertEqual(201, resp.status_code)
data_id = int(json.loads(resp.data.decode('utf-8'))['product']['uri'].split('/')[-1])
resp = self.read_product(data_id)
self.assertEqual(200, resp.status_code)
s = json.loads(resp.data.decode('utf-8'))['product']
print(s)
name, upc, quantity, units, brand_id, category_id = product
self.assertEqual(name, s['name'])
self.assertEqual(upc, s['upc'])
self.assertEqual(quantity, s['quantity'])
self.assertEqual(units, s['units'])
self.assertEqual(brand_id, s['brand_id'])
self.assertEqual(category_id, s['category_id'])
# for product in test_products_to_create:
# name, upc, quantity, units = product
# resp = self.add_product(name, upc, quantity, units)
# self.assertEqual(400, resp.status_code)
# Test Edit
edits = [ (4, 'New Product (edit)', '1203981322287', 100, 'mg', 1, 2),
(5, 'Newer Product (edit)', '3548532112312', 100, 'mg', 1, 2),
(6, 'Newest Product (edit)', '4890465784213', 100, 'mg', 1, 2)]
for edit in edits:
id, name, upc, quantity, units, brand_id, category_id = edit
resp = self.edit_product(id, name, upc, quantity, units, brand_id, category_id)
self.assertEqual(200, resp.status_code)
data = json.loads(resp.data.decode('utf-8'))['product']
self.assertEqual(name, data['name'])
self.assertEqual(upc, data['upc'])
self.assertEqual(quantity, data['quantity'])
self.assertEqual(units, data['units'])
resp = self.read_product(id)
self.assertEqual(200, resp.status_code)
data = json.loads(resp.data.decode('utf-8'))['product']
self.assertEqual(name, data['name'])
self.assertEqual(upc, data['upc'])
self.assertEqual(quantity, data['quantity'])
self.assertEqual(units, data['units'])
# Test Delete
deletion_indexes = [1, 3, 4, 5]
for id in deletion_indexes:
print("delete id: {0}".format(id))
resp = self.delete_product(id)
self.assertEqual(200, resp.status_code)
resp = self.read_products()
self.assertEqual(200, resp.status_code)
data = json.loads(resp.data.decode('utf-8'))['products']
self.assertEqual(5, len(data))
print("##################################### test_product_crud - complete #####################################")
def test_product_listing_crud(self):
print("##################################### test_product_listing_crud start #####################################")
with self.client:
# Test Get
resp = self.read_product_prices()
self.assertEqual(200, resp.status_code)
pldata = json.loads(resp.data.decode('utf-8'))['product_prices']
# print(resp)
print(pldata)
resp = self.read_product_price(0)
self.assertEqual(404, resp.status_code)
for i in range(0, len(self.product_prices)):
resp = self.read_product_price(int(pldata[i]['uri'].split('/')[-1])) # Grab the value indicated by each URI and verify that it matches.
self.assertEqual(200, resp.status_code)
data = json.loads(resp.data.decode('utf-8'))['product_price']
self.assertEqual(pldata[i]['uri'], data['uri'])
self.assertEqual(pldata[i]['date'], data['date'])
self.assertEqual(pldata[i]['price'], data['price'])
self.assertEqual(pldata[i]['product_id'], data['product_id'])
self.assertEqual(pldata[i]['store_id'], data['store_id'])
# self.assertIn(self.product_prices[i], data[])
print(resp)
resp = self.read_product(12348756)
self.assertEqual(404, resp.status_code)
print(resp)
# Test Create
test_product_prices_to_create = [
(dt(2014, 1, 1), 100, 'Walmart', 'Coca-cola'),
(dt(2014, 2, 2), 150, 'Test Mart', 'Calpis Soda'),
(dt(2015, 3, 3), 200, 'Super Test Mart', 'Test Product 1'),
(dt(2015, 4, 4), 100, 'Walmart', 'Test Product 1'),
(dt(2015, 5, 5), 150, 'Test Mart', 'Coca-cola'),
(dt(2015, 6, 6), 200, 'Super Test Mart', 'Calpis Soda')]
for product_price in test_product_prices_to_create:
date, price, store_name, product_name = product_price
store = Store.query.filter_by(name=store_name).first()
product = Product.query.filter_by(name=product_name).first()
resp = self.add_product_price(date, price, store.id, product.id)
self.assertEqual(201, resp.status_code)
data_id = int(json.loads(resp.data.decode('utf-8'))['product_price']['uri'].split('/')[-1])
resp = self.read_product_price(data_id)
self.assertEqual(200, resp.status_code)
p = json.loads(resp.data.decode('utf-8'))['product_price']
print(p)
self.assertEqual(price, p['price'])
# self.assertEqual(str(date), p['date'])
self.assertEqual(store.id, p['store_id'])
self.assertEqual(product.id, p['product_id'])
# for product_price in test_product_prices_to_create:
# resp = self.add_product_price(product_price)
# self.assertEqual(200, resp.status_code)
# Test Delete
deletion_dates = [ (dt(2014, 1, 1), 100, 'Walmart', 'Coca-cola'),
(dt(2014, 2, 2), 150, 'Test Mart', 'Calpis Soda'),
(dt(2015, 3, 3), 200, 'Super Test Mart', 'Test Product 1')]
for deletion_date in deletion_dates:
date, price, store_name, product_name = deletion_date
print(deletion_date)
product = Product.query.filter_by(name=product_name).first()
print(product)
store = Store.query.filter_by(name=store_name).first()
print(store)
q = ProductPrice.query.filter(ProductPrice.date==date and ProductPrice.price==price and ProductPrice.product_id==product.id and ProductPrice.store_id==store.id).first()
print(q)
resp = self.delete_product_price(q.id)
self.assertEqual(200, resp.status_code)
resp = self.read_product_prices()
self.assertEqual(200, resp.status_code)
data = json.loads(resp.data.decode('utf-8'))['product_prices']
self.assertEqual(9, len(data))
print("##################################### test_product_listing_crud - complete #####################################")
# class ProductListViewTestCase(ViewTestCase):
# def add_store(self, store_name):
# print("add_store({0})".format(store_name))
# return self.client.post('/app/store/add', data=dict(
# store_name
# ), follow_redirects=True)
# def edit_store(self, store_name, new_store_name):
# print("edit({0}, {1})".format(store_name, new_store_name))
# return self.client.post('/app/store/edit', data=dict(
# store_id,
# store_name,
# new_store_name))
# def delete_store(self, store_name):
# print("delete_store({0}, {1}, {2})".format(email, display_name, password))
# q = Store.query.filter(Store.name==store_name).first()
# assert q != None, "Store_name does not exist"
# return self.client.post('/app/store/delete', data=dict(
# store_id=q.id
# ), follow_redirects=True)
# def read_stores(self):
# print("read_stores()")
# return self.client.post('/app/stores/', data=dict(), follow_redirects=True)
# def add_product(self, product_name):
# print("add_product({0})".format(product_name))
# return self.client.post('/app/product/add', data=dict(), follow_redirects=True)
# def edit_product(self):
# print("edit_product()")
# return self.client.post('/app/product/edit', data=dict(), follow_redirects=True)
# def delete_product(self):
# print("delete_product()")
# return self.client.post('/app/product/delete', data=dict(), follow_redirects=True)
# def read_products(self):
# print("read_products()")
# return self.client.post('/app/products/', data=dict(), follow_redirects=True)
# # def add_listing(self, store_id, product_id, date, price):
# # return self.client.post('/productlist/add', data=dict(
# # store_id=store_id,
# # product_id=product_id,
# # date=date,
# # price=price
# # ), follow_redirects=True)
# def test_store_crud(self):
# with self.client:
# assert current_user.is_anonymous == True, "User is currently logged in"
# email, displayname, password = self.users[0]
# resp = self.login(email, password)
# self.assertEqual(200, resp.status_code)
# assert current_user.is_anonymous == False, "User did not correctly log in"
# # Test View
# resp = self.read_stores()
# self.assertEqual(200, resp.status_code)
# for store in self.stores:
# self.assertIn(bytearray(store), resp.data)
# # Test Add
# resp = self.add_store("NewTestStore")
# self.assertEqual(200, resp.status_code)
# resp = self.read_stores()
# self.assertEqual(200, resp.status_code)
# self.assertIn("NewTestStore", resp.data)
# # Test Edit
# resp = self.edit_product("NewTestProduct", "EditedTestProduct")
# self.assert(200, resp.status_code)
# # Test Delete
# resp = self.delete_product("EditedTestProduct")
# resp = self.logout()
# self.assert(200, resp.status_code)
# assert current_user.is_anonymous == True, "User <{0}> is did not log out".format(current_user)
# def test_product_crud(self):
# with self.client:
# assert current_user.is_anonymous == True, "User is currently logged in"
# email, displayname, password = self.users[0]
# resp = self.login(email, password)
# self.assertEqual(200, resp.status_code)
# assert current_user.is_anonymous == False, "User did not correctly log in"
# # Test View
# resp = self.read_products()
# self.assertEqual(200, resp.status_code)
# for product in self.products:
# self.assertIn(bytearray(product), resp.data)
# # Test Create
# resp = self.add_product("NewTestProduct")
# self.assertEqual(200, resp.status_code)
# resp = self.read_product()
# self.assertEqual(200, resp.status_code)
# self.assertIn("NewTestProduct", resp.data)
# # Test Edit
# resp = self.edit_product("NewTestProduct", "EditedTestProduct")
# self.assert(200, resp.status_code)
# # Test Delete
# resp = self.delete_product("EditedTestProduct")
# self.logout()
# self.assert(200, resp.status_code)
# assert current_user.is_anonymous == True, "User <{0}> is did not log out".format(current_user)
# def test_product_listing_crud(self):
# with self.client:
# return None
# class AdminTestCase
if __name__ == '__main__':
unittest.main()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.common import service_client
from tempest import config
from tempest import exceptions
CONF = config.CONF
class IdentityClientJSON(service_client.ServiceClient):
def __init__(self, auth_provider):
super(IdentityClientJSON, self).__init__(
auth_provider,
CONF.identity.catalog_type,
CONF.identity.region,
endpoint_type='adminURL')
def has_admin_extensions(self):
"""
Returns True if the KSADM Admin Extensions are supported
False otherwise
"""
if hasattr(self, '_has_admin_extensions'):
return self._has_admin_extensions
# Try something that requires admin
try:
self.list_roles()
self._has_admin_extensions = True
except Exception:
self._has_admin_extensions = False
return self._has_admin_extensions
def create_role(self, name):
"""Create a role."""
post_body = {
'name': name,
}
post_body = json.dumps({'role': post_body})
resp, body = self.post('OS-KSADM/roles', post_body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def get_role(self, role_id):
"""Get a role by its id."""
resp, body = self.get('OS-KSADM/roles/%s' % role_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['role'])
def create_tenant(self, name, **kwargs):
"""
Create a tenant
name (required): New tenant name
description: Description of new tenant (default is none)
enabled <true|false>: Initial tenant status (default is true)
"""
post_body = {
'name': name,
'description': kwargs.get('description', ''),
'enabled': kwargs.get('enabled', True),
}
post_body = json.dumps({'tenant': post_body})
resp, body = self.post('tenants', post_body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def delete_role(self, role_id):
"""Delete a role."""
resp, body = self.delete('OS-KSADM/roles/%s' % str(role_id))
self.expected_success(204, resp.status)
return resp, body
def list_user_roles(self, tenant_id, user_id):
"""Returns a list of roles assigned to a user for a tenant."""
url = '/tenants/%s/users/%s/roles' % (tenant_id, user_id)
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBodyList(resp, self._parse_resp(body))
def assign_user_role(self, tenant_id, user_id, role_id):
"""Add roles to a user on a tenant."""
resp, body = self.put('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
(tenant_id, user_id, role_id), "")
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def remove_user_role(self, tenant_id, user_id, role_id):
"""Removes a role assignment for a user on a tenant."""
resp, body = self.delete('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
(tenant_id, user_id, role_id))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def delete_tenant(self, tenant_id):
"""Delete a tenant."""
resp, body = self.delete('tenants/%s' % str(tenant_id))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def get_tenant(self, tenant_id):
"""Get tenant details."""
resp, body = self.get('tenants/%s' % str(tenant_id))
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def list_roles(self):
"""Returns roles."""
resp, body = self.get('OS-KSADM/roles')
self.expected_success(200, resp.status)
return service_client.ResponseBodyList(resp, self._parse_resp(body))
def list_tenants(self):
"""Returns tenants."""
resp, body = self.get('tenants')
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['tenants'])
def get_tenant_by_name(self, tenant_name):
tenants = self.list_tenants()
for tenant in tenants:
if tenant['name'] == tenant_name:
return tenant
raise exceptions.NotFound('No such tenant')
def update_tenant(self, tenant_id, **kwargs):
"""Updates a tenant."""
body = self.get_tenant(tenant_id)
name = kwargs.get('name', body['name'])
desc = kwargs.get('description', body['description'])
en = kwargs.get('enabled', body['enabled'])
post_body = {
'id': tenant_id,
'name': name,
'description': desc,
'enabled': en,
}
post_body = json.dumps({'tenant': post_body})
resp, body = self.post('tenants/%s' % tenant_id, post_body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def create_user(self, name, password, tenant_id, email, **kwargs):
"""Create a user."""
post_body = {
'name': name,
'password': password,
'email': email
}
if tenant_id is not None:
post_body['tenantId'] = tenant_id
if kwargs.get('enabled') is not None:
post_body['enabled'] = kwargs.get('enabled')
post_body = json.dumps({'user': post_body})
resp, body = self.post('users', post_body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def update_user(self, user_id, **kwargs):
"""Updates a user."""
put_body = json.dumps({'user': kwargs})
resp, body = self.put('users/%s' % user_id, put_body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def get_user(self, user_id):
"""GET a user."""
resp, body = self.get("users/%s" % user_id)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def delete_user(self, user_id):
"""Delete a user."""
resp, body = self.delete("users/%s" % user_id)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def get_users(self):
"""Get the list of users."""
resp, body = self.get("users")
self.expected_success(200, resp.status)
return service_client.ResponseBodyList(resp, self._parse_resp(body))
def enable_disable_user(self, user_id, enabled):
"""Enables or disables a user."""
put_body = {
'enabled': enabled
}
put_body = json.dumps({'user': put_body})
resp, body = self.put('users/%s/enabled' % user_id, put_body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def get_token(self, token_id):
"""Get token details."""
resp, body = self.get("tokens/%s" % token_id)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def delete_token(self, token_id):
"""Delete a token."""
resp, body = self.delete("tokens/%s" % token_id)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def list_users_for_tenant(self, tenant_id):
"""List users for a Tenant."""
resp, body = self.get('/tenants/%s/users' % tenant_id)
self.expected_success(200, resp.status)
return service_client.ResponseBodyList(resp, self._parse_resp(body))
def get_user_by_username(self, tenant_id, username):
users = self.list_users_for_tenant(tenant_id)
for user in users:
if user['name'] == username:
return user
raise exceptions.NotFound('No such user')
def create_service(self, name, type, **kwargs):
"""Create a service."""
post_body = {
'name': name,
'type': type,
'description': kwargs.get('description')
}
post_body = json.dumps({'OS-KSADM:service': post_body})
resp, body = self.post('/OS-KSADM/services', post_body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def get_service(self, service_id):
"""Get Service."""
url = '/OS-KSADM/services/%s' % service_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def list_services(self):
"""List Service - Returns Services."""
resp, body = self.get('/OS-KSADM/services')
self.expected_success(200, resp.status)
return service_client.ResponseBodyList(resp, self._parse_resp(body))
def delete_service(self, service_id):
"""Delete Service."""
url = '/OS-KSADM/services/%s' % service_id
resp, body = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
def update_user_password(self, user_id, new_pass):
"""Update User Password."""
put_body = {
'password': new_pass,
'id': user_id
}
put_body = json.dumps({'user': put_body})
resp, body = self.put('users/%s/OS-KSADM/password' % user_id, put_body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def list_extensions(self):
"""List all the extensions."""
resp, body = self.get('/extensions')
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp,
body['extensions']['values'])
| |
import pytest
import aiohttpretty
from waterbutler.core import exceptions
from waterbutler.providers.bitbucket import BitbucketProvider
from waterbutler.providers.bitbucket.provider import BitbucketPath
from waterbutler.providers.bitbucket import settings as bitbucket_settings
from waterbutler.providers.bitbucket.metadata import BitbucketFileMetadata
from waterbutler.providers.bitbucket.metadata import BitbucketFolderMetadata
from waterbutler.providers.bitbucket.metadata import BitbucketRevisionMetadata
from tests.providers.bitbucket import fixtures
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': 'cat@cat.com',
}
@pytest.fixture
def credentials():
return {'token': 'naps'}
@pytest.fixture
def settings():
return {
'owner': 'fitz_cos',
'repo': 'wb-testing',
}
@pytest.fixture
def provider(auth, credentials, settings):
provider = BitbucketProvider(auth, credentials, settings)
return provider
class TestValidatePath:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_root(self, provider):
test_fixtures = fixtures.validate_path
default_branch_body = test_fixtures['default_branch']
default_branch_url = provider._build_v1_repo_url('main-branch')
aiohttpretty.register_json_uri('GET', default_branch_url, body=default_branch_body)
try:
wb_path_v1 = await provider.validate_v1_path('/')
except Exception as exc:
pytest.fail(str(exc))
wb_path_v0 = await provider.validate_path('/')
assert wb_path_v1 == wb_path_v0
assert wb_path_v1.branch_name == default_branch_body['name']
assert wb_path_v1.commit_sha == None
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
@pytest.mark.parametrize('path,kind', [
('/foo-file.txt', 'file'),
('/foo-dir/', 'folder'),
])
async def test_validate_v1_path(self, provider, path, kind):
test_fixtures = fixtures.validate_path
default_branch_body = test_fixtures['default_branch']
default_branch = default_branch_body['name']
default_branch_url = provider._build_v1_repo_url('main-branch')
aiohttpretty.register_json_uri('GET', default_branch_url, body=default_branch_body)
dir_listing_body = test_fixtures['root_dir_listing']
dir_listing_url = provider._build_v1_repo_url('src', default_branch) + '/'
aiohttpretty.register_json_uri('GET', dir_listing_url, body=dir_listing_body)
try:
wb_path_v1 = await provider.validate_v1_path(path)
except Exception as exc:
pytest.fail(str(exc))
wb_path_v0 = await provider.validate_path(path)
assert wb_path_v1 == wb_path_v0
assert wb_path_v1.branch_name == default_branch
# TODO: assert commitSha
bad_path = path.rstrip('/') if kind == 'folder' else path + '/'
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path(bad_path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
@pytest.mark.parametrize('arg_name,arg_val,attr_name', [
('commitSha', 'a1b2c3d4', 'commit_sha', ),
('branch', 'other-branch', 'branch_name'),
('revision', 'bleep-blorp', 'branch_name'),
('revision', '345def023ab29', 'commit_sha'),
])
async def test_validate_v1_path_commit_sha(self, provider, arg_name, arg_val, attr_name):
test_fixtures = fixtures.validate_path
dir_listing_body = test_fixtures['root_dir_listing']
base_commit = dir_listing_body['node']
dir_listing_url = provider._build_v1_repo_url('src', arg_val) + '/'
aiohttpretty.register_json_uri('GET', dir_listing_url, body=dir_listing_body)
path = '/foo-file.txt'
kwargs = {arg_name: arg_val}
try:
wb_path_v1 = await provider.validate_v1_path(path, **kwargs)
except Exception as exc:
pytest.fail(str(exc))
ref_val = arg_val
if attr_name == 'commit_sha' and len(arg_val) < len(base_commit):
arg_val = base_commit
ref_val = base_commit
if attr_name != 'commit_sha':
ref_val = base_commit
commit_sha = ref_val
branch_name = None if attr_name == 'commit_sha' else arg_val
assert getattr(wb_path_v1, attr_name) == arg_val
assert wb_path_v1.ref == ref_val
assert wb_path_v1.extra == {
'commitSha': commit_sha,
'branchName': branch_name,
}
wb_path_v0 = await provider.validate_path(path, **kwargs)
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_subfolder(self, provider):
test_fixtures = fixtures.validate_path
dir_listing_body = test_fixtures['subfolder_dir_listing']
base_commit = dir_listing_body['node']
dir_listing_url = provider._build_v1_repo_url('src', 'main-branch', 'subfolder') + '/'
aiohttpretty.register_json_uri('GET', dir_listing_url, body=dir_listing_body)
path = '/subfolder/.gitkeep'
try:
wb_path_v1 = await provider.validate_v1_path(path, branch='main-branch')
except Exception as exc:
pytest.fail(str(exc))
wb_path_v0 = await provider.validate_path(path, branch='main-branch')
assert wb_path_v1 == wb_path_v0
class TestRevisions:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_get_revisions(self, provider):
path = BitbucketPath('/aaa-01.txt', _ids=[('a1b2c3d4', 'master'), ('a1b2c3d4', 'master')])
filehistory = fixtures.revisions['filehistory_complex']
revisions_url = provider._build_v1_repo_url('filehistory', 'a1b2c3d4', 'aaa-01.txt')
aiohttpretty.register_json_uri('GET', revisions_url, body=filehistory)
result = await provider.revisions(path)
assert len(result) == 4
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_get_metadata_for_file(self, provider):
base_ref = 'a1b2c3d4'
path = BitbucketPath('/foo-file.txt', _ids=[(base_ref, 'develop'), (base_ref, 'develop')])
test_fixtures = fixtures.validate_path
dir_listing_body = test_fixtures['root_dir_listing']
dir_listing_url = provider._build_v1_repo_url('src', base_ref) + '/'
aiohttpretty.register_json_uri('GET', dir_listing_url, body=dir_listing_body)
result = await provider.metadata(path)
assert result is not None
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_get_metadata_for_file_cached(self, provider):
base_ref = 'a1b2c3d4'
path = BitbucketPath('/foo-file.txt', _ids=[(base_ref, 'develop'), (base_ref, 'develop')])
provider._parent_dir = fixtures.validate_path['root_dir_listing']
result = await provider.metadata(path)
assert result is not None
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_get_metadata_for_folder(self, provider):
path = BitbucketPath('/', _ids=[(None, 'develop')], folder=True)
test_fixtures = fixtures.validate_path
dir_listing_body = test_fixtures['root_dir_listing']
dir_listing_url = provider._build_v1_repo_url('src', 'develop') + '/'
aiohttpretty.register_json_uri('GET', dir_listing_url, body=dir_listing_body)
result = await provider.metadata(path)
assert len(result) == 4
class TestDownload:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_get_metadata_for_file(self, provider):
base_ref = 'a1b2c3d4'
path = BitbucketPath('/foo-file.txt', _ids=[(base_ref, 'develop'), (base_ref, 'develop')])
test_fixtures = fixtures.validate_path
dir_listing_body = test_fixtures['root_dir_listing']
dir_listing_url = provider._build_v1_repo_url('src', base_ref) + '/'
aiohttpretty.register_json_uri('GET', dir_listing_url, body=dir_listing_body)
download_url = provider._build_v1_repo_url('raw', path.commit_sha, *path.path_tuple())
aiohttpretty.register_uri('GET', download_url, body=b'better')
result = await provider.download(path)
content = await result.response.read()
assert content == b'better'
class TestReadOnlyProvider:
@pytest.mark.asyncio
async def test_upload(self, provider):
with pytest.raises(exceptions.ReadOnlyProviderError) as e:
await provider.upload('/foo-file.txt')
assert e.value.code == 501
@pytest.mark.asyncio
async def test_delete(self, provider):
with pytest.raises(exceptions.ReadOnlyProviderError) as e:
await provider.delete()
assert e.value.code == 501
@pytest.mark.asyncio
async def test_move(self, provider):
with pytest.raises(exceptions.ReadOnlyProviderError) as e:
await provider.move()
assert e.value.code == 501
@pytest.mark.asyncio
async def test_copy_to(self, provider):
with pytest.raises(exceptions.ReadOnlyProviderError) as e:
await provider.copy(provider)
assert e.value.code == 501
def test_can_intra_move(self, provider):
assert provider.can_intra_move(provider) == False
def test_can_intra_copy(self, provider):
assert provider.can_intra_copy(provider) == False
# leftover bits
class TestMisc:
def test_can_duplicate_name(self, provider):
assert provider.can_duplicate_names() == False
def test_path_from_metadata(self, provider):
name = 'aaa-01-2.txt'
subdir = 'plaster'
full_path = '/{}/{}'.format(subdir, name)
branch = 'master'
commit_sha = '123abc456def'
path = BitbucketPath(full_path, _ids=[
(commit_sha, branch), (commit_sha, branch), (commit_sha, branch)
])
metadata = BitbucketFileMetadata(fixtures.file_metadata, path, owner=fixtures.owner, repo=fixtures.repo)
child_path = provider.path_from_metadata(path.parent, metadata)
assert child_path.full_path == path.full_path
assert child_path == path
| |
from django.http import Http404
from django.contrib.auth.models import User
from rest_framework import status, generics
from rest_framework.views import APIView
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from mockatron_core.models import Agent, Operation, Filter, RequestCondition, ResponseCondition
from mockatron_core.models import Response as ResponseModel
from .serializers import UserReadOnlySerializer, UserCreateSerializer, AgentSerializer, OperationSerializer, ResponseSerializer, FilterSerializer, RequestConditionSerializer, ResponseConditionSerializer
from .classes import EntityResume
import json
class Resume(APIView):
def get(self, request, format=None):
er1 = EntityResume("agent", "Agents", Agent.objects.count())
er2 = EntityResume("operation", "Operations", Operation.objects.count())
er3 = EntityResume("filter", "Filters", Filter.objects.count())
er4 = EntityResume("response", "Responses", ResponseModel.objects.count())
#er5 = EntityResume("request_condition", "Request Conditions", RequestCondition.objects.count())
#er6 = EntityResume("response_condition", "Response Conditions", ResponseCondition.objects.count())
response = Response()
response['Content-Type'] = 'application/json'
#response.content = json.dumps([er1, er2, er3, er4, er5, er6], default=lambda o: o.__dict__, indent=4)
response.content = json.dumps([er1, er2, er3, er4], default=lambda o: o.__dict__, indent=4)
return response
class SignUp(APIView):
permission_classes = [AllowAny]
def post(self, request, format=None):
serializer = UserCreateSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserList(generics.ListAPIView):
queryset = User.objects.all()
serializer_class = UserReadOnlySerializer
class UserDetail(generics.RetrieveAPIView):
queryset = User.objects.all()
serializer_class = UserReadOnlySerializer
class AgentList(generics.ListCreateAPIView):
queryset = Agent.objects.all()
serializer_class = AgentSerializer
class AgentDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Agent.objects.all()
serializer_class = AgentSerializer
class OperationList(generics.ListCreateAPIView):
queryset = Operation.objects.all()
serializer_class = OperationSerializer
class OperationDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Operation.objects.all()
serializer_class = OperationSerializer
class ResponseList(generics.ListCreateAPIView):
queryset = ResponseModel.objects.all()
serializer_class = ResponseSerializer
class ResponseDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = ResponseModel.objects.all()
serializer_class = ResponseSerializer
class FilterList(generics.ListCreateAPIView):
queryset = Filter.objects.all()
serializer_class = FilterSerializer
class FilterDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Filter.objects.all()
serializer_class = FilterSerializer
class RequestConditionList(generics.ListCreateAPIView):
queryset = RequestCondition.objects.all()
serializer_class = RequestConditionSerializer
class RequestConditionDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = RequestCondition.objects.all()
serializer_class = RequestConditionSerializer
class ResponseConditionList(generics.ListCreateAPIView):
queryset = ResponseCondition.objects.all()
serializer_class = ResponseConditionSerializer
class ResponseConditionDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = ResponseCondition.objects.all()
serializer_class = ResponseConditionSerializer
class OperationListByAgent(APIView):
def get_object(self, pk_agent):
try:
return Agent.objects.get(pk=pk_agent)
except Agent.DoesNotExist:
raise Http404
def get(self, request, pk_agent, format=None):
agent = self.get_object(pk_agent)
operations = agent.operations.all()
serializer = OperationSerializer(operations, many=True)
return Response(serializer.data)
def post(self, request, pk_agent, format=None):
agent = self.get_object(pk_agent)
serializer = OperationSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class OperationDetailByAgent(APIView):
def get_object(self, pk_agent, pk):
try:
agent = Agent.objects.get(pk=pk_agent)
return agent.operations.get(pk=pk)
except Agent.DoesNotExist:
raise Http404
except Operation.DoesNotExist:
raise Http404
def get(self, request, pk_agent, pk, format=None):
operation = self.get_object(pk_agent, pk)
serializer = OperationSerializer(operation)
return Response(serializer.data)
def put(self, request, pk_agent, pk, format=None):
operation = self.get_object(pk_agent, pk)
serializer = OperationSerializer(operation, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk_agent, pk, format=None):
operation = self.get_object(pk_agent, pk)
operation.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ResponseListByAgent(APIView):
def get_object(self, pk_agent):
try:
return Agent.objects.get(pk=pk_agent)
except Agent.DoesNotExist:
raise Http404
def get(self, request, pk_agent, format=None):
agent = self.get_object(pk_agent)
responses = agent.responses.all()
serializer = ResponseSerializer(responses, many=True)
return Response(serializer.data)
def post(self, request, pk_agent, format=None):
agent = self.get_object(pk_agent)
serializer = ResponseSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ResponseDetailByAgent(APIView):
def get_object(self, pk_agent, pk):
try:
agent = Agent.objects.get(pk=pk_agent)
return agent.responses.get(pk=pk)
except Agent.DoesNotExist:
raise Http404
except Response.DoesNotExist:
raise Http404
def get(self, request, pk_agent, pk, format=None):
response = self.get_object(pk_agent, pk)
serializer = ResponseSerializer(response)
return Response(serializer.data)
def put(self, request, pk_agent, pk, format=None):
response = self.get_object(pk_agent, pk)
serializer = ResponseSerializer(response, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk_agent, pk, format=None):
response = self.get_object(pk_agent, pk)
response.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ResponseListByOperation(APIView):
def get_object(self, pk_operation):
try:
return Operation.objects.get(pk=pk_operation)
except Operation.DoesNotExist:
raise Http404
def get(self, request, pk_operation, format=None):
operation = self.get_object(pk_operation)
responses = operation.responses.all()
serializer = ResponseSerializer(responses, many=True)
return Response(serializer.data)
def post(self, request, pk_operation, format=None):
operation = self.get_object(pk_operation)
serializer = ResponseSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ResponseDetailByOperation(APIView):
def get_object(self, pk_operation, pk):
try:
operation = Operation.objects.get(pk=pk_operation)
return operation.responses.get(pk=pk)
except Operation.DoesNotExist:
raise Http404
except Response.DoesNotExist:
raise Http404
def get(self, request, pk_operation, pk, format=None):
response = self.get_object(pk_operation, pk)
serializer = ResponseSerializer(response)
return Response(serializer.data)
def put(self, request, pk_operation, pk, format=None):
response = self.get_object(pk_operation, pk)
serializer = ResponseSerializer(response, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk_operation, pk, format=None):
response = self.get_object(pk_operation, pk)
response.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class FilterListByAgent(APIView):
def get_object(self, pk_agent):
try:
return Agent.objects.get(pk=pk_agent)
except Agent.DoesNotExist:
raise Http404
def get(self, request, pk_agent, format=None):
agent = self.get_object(pk_agent)
filters = agent.filters.all()
serializer = FilterSerializer(filters, many=True)
return Response(serializer.data)
def post(self, request, pk_agent, format=None):
agent = self.get_object(pk_agent)
serializer = FilterSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class FilterDetailByAgent(APIView):
def get_object(self, pk_agent, pk):
try:
agent = Agent.objects.get(pk=pk_agent)
return agent.filters.get(pk=pk)
except Agent.DoesNotExist:
raise Http404
except Filter.DoesNotExist:
raise Http404
def get(self, request, pk_agent, pk, format=None):
filter = self.get_object(pk_agent, pk)
serializer = FilterSerializer(filter)
return Response(serializer.data)
def put(self, request, pk_agent, pk, format=None):
filter = self.get_object(pk_agent, pk)
serializer = FilterSerializer(filter, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk_agent, pk, format=None):
filter = self.get_object(pk_agent, pk)
filter.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class FilterListByOperation(APIView):
def get_object(self, pk_operation):
try:
return Operation.objects.get(pk=pk_operation)
except Operation.DoesNotExist:
raise Http404
def get(self, request, pk_operation, format=None):
operation = self.get_object(pk_operation)
filters = operation.filters.all()
serializer = FilterSerializer(filters, many=True)
return Response(serializer.data)
def post(self, request, pk_operation, format=None):
operation = self.get_object(pk_operation)
serializer = FilterSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class FilterDetailByOperation(APIView):
def get_object(self, pk_operation, pk):
try:
operation = Operation.objects.get(pk=pk_operation)
return operation.filters.get(pk=pk)
except Operation.DoesNotExist:
raise Http404
except Filter.DoesNotExist:
raise Http404
def get(self, request, pk_operation, pk, format=None):
filter = self.get_object(pk_operation, pk)
serializer = FilterSerializer(filter)
return Response(serializer.data)
def put(self, request, pk_operation, pk, format=None):
filter = self.get_object(pk_operation, pk)
serializer = FilterSerializer(filter, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk_operation, pk, format=None):
filter = self.get_object(pk_operation, pk)
filter.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class RequestConditionListByFilter(APIView):
def get_object(self, pk_filter):
try:
return Filter.objects.get(pk=pk_filter)
except Filter.DoesNotExist:
raise Http404
def get(self, request, pk_filter, format=None):
filter = self.get_object(pk_filter)
request_conditions = filter.request_conditions.all()
serializer = RequestConditionSerializer(request_conditions, many=True)
return Response(serializer.data)
def post(self, request, pk_filter, format=None):
filter = self.get_object(pk_filter)
serializer = RequestConditionSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class RequestConditionDetailByFilter(APIView):
def get_object(self, pk_filter, pk):
try:
filter = Filter.objects.get(pk=pk_filter)
return filter.request_conditions.get(pk=pk)
except Filter.DoesNotExist:
raise Http404
except RequestCondition.DoesNotExist:
raise Http404
def get(self, request, pk_filter, pk, format=None):
request_condition = self.get_object(pk_filter, pk)
serializer = RequestConditionSerializer(request_condition)
return Response(serializer.data)
def put(self, request, pk_filter, pk, format=None):
request_condition = self.get_object(pk_filter, pk)
serializer = RequestConditionSerializer(request_condition, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk_filter, pk, format=None):
request_condition = self.get_object(pk_filter, pk)
request_condition.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ResponseConditionListByFilter(APIView):
def get_object(self, pk_filter):
try:
return Filter.objects.get(pk=pk_filter)
except Filter.DoesNotExist:
raise Http404
def get(self, response, pk_filter, format=None):
filter = self.get_object(pk_filter)
response_conditions = filter.response_conditions.all()
serializer = ResponseConditionSerializer(response_conditions, many=True)
return Response(serializer.data)
def post(self, response, pk_filter, format=None):
filter = self.get_object(pk_filter)
serializer = ResponseConditionSerializer(data=response.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ResponseConditionDetailByFilter(APIView):
def get_object(self, pk_filter, pk):
try:
filter = Filter.objects.get(pk=pk_filter)
return filter.response_conditions.get(pk=pk)
except Filter.DoesNotExist:
raise Http404
except ResponseCondition.DoesNotExist:
raise Http404
def get(self, response, pk_filter, pk, format=None):
response_condition = self.get_object(pk_filter, pk)
serializer = ResponseConditionSerializer(response_condition)
return Response(serializer.data)
def put(self, response, pk_filter, pk, format=None):
response_condition = self.get_object(pk_filter, pk)
serializer = ResponseConditionSerializer(response_condition, data=response.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, response, pk_filter, pk, format=None):
response_condition = self.get_object(pk_filter, pk)
response_condition.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of14']
class port_desc_prop(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = port_desc_prop.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = port_desc_prop()
obj.type = reader.read("!H")[0]
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("port_desc_prop {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class experimenter(port_desc_prop):
subtypes = {}
type = 65535
def __init__(self, experimenter=None):
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!L", self.experimenter))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 4)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_type = reader.read("!H")[0]
assert(_type == 65535)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.experimenter = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.experimenter != other.experimenter: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
port_desc_prop.subtypes[65535] = experimenter
class bsn(experimenter):
subtypes = {}
type = 65535
experimenter = 6035143
def __init__(self, exp_type=None):
if exp_type != None:
self.exp_type = exp_type
else:
self.exp_type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.exp_type))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 8)
subclass = bsn.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn()
_type = reader.read("!H")[0]
assert(_type == 65535)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
obj.exp_type = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.exp_type != other.exp_type: return False
return True
def pretty_print(self, q):
q.text("bsn {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
experimenter.subtypes[6035143] = bsn
class bsn_generation_id(bsn):
type = 65535
experimenter = 6035143
exp_type = 1
def __init__(self, generation_id=None):
if generation_id != None:
self.generation_id = generation_id
else:
self.generation_id = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.exp_type))
packed.append(struct.pack("!Q", self.generation_id))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_generation_id()
_type = reader.read("!H")[0]
assert(_type == 65535)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_exp_type = reader.read("!L")[0]
assert(_exp_type == 1)
obj.generation_id = reader.read("!Q")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.generation_id != other.generation_id: return False
return True
def pretty_print(self, q):
q.text("bsn_generation_id {")
with q.group():
with q.indent(2):
q.breakable()
q.text("generation_id = ");
q.text("%#x" % self.generation_id)
q.breakable()
q.text('}')
bsn.subtypes[1] = bsn_generation_id
class bsn_uplink(bsn):
type = 65535
experimenter = 6035143
exp_type = 0
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.exp_type))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_uplink()
_type = reader.read("!H")[0]
assert(_type == 65535)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_exp_type = reader.read("!L")[0]
assert(_exp_type == 0)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("bsn_uplink {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
bsn.subtypes[0] = bsn_uplink
class ethernet(port_desc_prop):
type = 0
def __init__(self, curr=None, advertised=None, supported=None, peer=None, curr_speed=None, max_speed=None):
if curr != None:
self.curr = curr
else:
self.curr = 0
if advertised != None:
self.advertised = advertised
else:
self.advertised = 0
if supported != None:
self.supported = supported
else:
self.supported = 0
if peer != None:
self.peer = peer
else:
self.peer = 0
if curr_speed != None:
self.curr_speed = curr_speed
else:
self.curr_speed = 0
if max_speed != None:
self.max_speed = max_speed
else:
self.max_speed = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append('\x00' * 4)
packed.append(struct.pack("!L", self.curr))
packed.append(struct.pack("!L", self.advertised))
packed.append(struct.pack("!L", self.supported))
packed.append(struct.pack("!L", self.peer))
packed.append(struct.pack("!L", self.curr_speed))
packed.append(struct.pack("!L", self.max_speed))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = ethernet()
_type = reader.read("!H")[0]
assert(_type == 0)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
reader.skip(4)
obj.curr = reader.read("!L")[0]
obj.advertised = reader.read("!L")[0]
obj.supported = reader.read("!L")[0]
obj.peer = reader.read("!L")[0]
obj.curr_speed = reader.read("!L")[0]
obj.max_speed = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.curr != other.curr: return False
if self.advertised != other.advertised: return False
if self.supported != other.supported: return False
if self.peer != other.peer: return False
if self.curr_speed != other.curr_speed: return False
if self.max_speed != other.max_speed: return False
return True
def pretty_print(self, q):
q.text("ethernet {")
with q.group():
with q.indent(2):
q.breakable()
q.text("curr = ");
q.text("%#x" % self.curr)
q.text(","); q.breakable()
q.text("advertised = ");
q.text("%#x" % self.advertised)
q.text(","); q.breakable()
q.text("supported = ");
q.text("%#x" % self.supported)
q.text(","); q.breakable()
q.text("peer = ");
q.text("%#x" % self.peer)
q.text(","); q.breakable()
q.text("curr_speed = ");
q.text("%#x" % self.curr_speed)
q.text(","); q.breakable()
q.text("max_speed = ");
q.text("%#x" % self.max_speed)
q.breakable()
q.text('}')
port_desc_prop.subtypes[0] = ethernet
class optical(port_desc_prop):
type = 1
def __init__(self, supported=None, tx_min_freq_lmda=None, tx_max_freq_lmda=None, tx_grid_freq_lmda=None, rx_min_freq_lmda=None, rx_max_freq_lmda=None, rx_grid_freq_lmda=None, tx_pwr_min=None, tx_pwr_max=None):
if supported != None:
self.supported = supported
else:
self.supported = 0
if tx_min_freq_lmda != None:
self.tx_min_freq_lmda = tx_min_freq_lmda
else:
self.tx_min_freq_lmda = 0
if tx_max_freq_lmda != None:
self.tx_max_freq_lmda = tx_max_freq_lmda
else:
self.tx_max_freq_lmda = 0
if tx_grid_freq_lmda != None:
self.tx_grid_freq_lmda = tx_grid_freq_lmda
else:
self.tx_grid_freq_lmda = 0
if rx_min_freq_lmda != None:
self.rx_min_freq_lmda = rx_min_freq_lmda
else:
self.rx_min_freq_lmda = 0
if rx_max_freq_lmda != None:
self.rx_max_freq_lmda = rx_max_freq_lmda
else:
self.rx_max_freq_lmda = 0
if rx_grid_freq_lmda != None:
self.rx_grid_freq_lmda = rx_grid_freq_lmda
else:
self.rx_grid_freq_lmda = 0
if tx_pwr_min != None:
self.tx_pwr_min = tx_pwr_min
else:
self.tx_pwr_min = 0
if tx_pwr_max != None:
self.tx_pwr_max = tx_pwr_max
else:
self.tx_pwr_max = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append('\x00' * 4)
packed.append(struct.pack("!L", self.supported))
packed.append(struct.pack("!L", self.tx_min_freq_lmda))
packed.append(struct.pack("!L", self.tx_max_freq_lmda))
packed.append(struct.pack("!L", self.tx_grid_freq_lmda))
packed.append(struct.pack("!L", self.rx_min_freq_lmda))
packed.append(struct.pack("!L", self.rx_max_freq_lmda))
packed.append(struct.pack("!L", self.rx_grid_freq_lmda))
packed.append(struct.pack("!L", self.tx_pwr_min))
packed.append(struct.pack("!L", self.tx_pwr_max))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = optical()
_type = reader.read("!H")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
reader.skip(4)
obj.supported = reader.read("!L")[0]
obj.tx_min_freq_lmda = reader.read("!L")[0]
obj.tx_max_freq_lmda = reader.read("!L")[0]
obj.tx_grid_freq_lmda = reader.read("!L")[0]
obj.rx_min_freq_lmda = reader.read("!L")[0]
obj.rx_max_freq_lmda = reader.read("!L")[0]
obj.rx_grid_freq_lmda = reader.read("!L")[0]
obj.tx_pwr_min = reader.read("!L")[0]
obj.tx_pwr_max = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.supported != other.supported: return False
if self.tx_min_freq_lmda != other.tx_min_freq_lmda: return False
if self.tx_max_freq_lmda != other.tx_max_freq_lmda: return False
if self.tx_grid_freq_lmda != other.tx_grid_freq_lmda: return False
if self.rx_min_freq_lmda != other.rx_min_freq_lmda: return False
if self.rx_max_freq_lmda != other.rx_max_freq_lmda: return False
if self.rx_grid_freq_lmda != other.rx_grid_freq_lmda: return False
if self.tx_pwr_min != other.tx_pwr_min: return False
if self.tx_pwr_max != other.tx_pwr_max: return False
return True
def pretty_print(self, q):
q.text("optical {")
with q.group():
with q.indent(2):
q.breakable()
q.text("supported = ");
q.text("%#x" % self.supported)
q.text(","); q.breakable()
q.text("tx_min_freq_lmda = ");
q.text("%#x" % self.tx_min_freq_lmda)
q.text(","); q.breakable()
q.text("tx_max_freq_lmda = ");
q.text("%#x" % self.tx_max_freq_lmda)
q.text(","); q.breakable()
q.text("tx_grid_freq_lmda = ");
q.text("%#x" % self.tx_grid_freq_lmda)
q.text(","); q.breakable()
q.text("rx_min_freq_lmda = ");
q.text("%#x" % self.rx_min_freq_lmda)
q.text(","); q.breakable()
q.text("rx_max_freq_lmda = ");
q.text("%#x" % self.rx_max_freq_lmda)
q.text(","); q.breakable()
q.text("rx_grid_freq_lmda = ");
q.text("%#x" % self.rx_grid_freq_lmda)
q.text(","); q.breakable()
q.text("tx_pwr_min = ");
q.text("%#x" % self.tx_pwr_min)
q.text(","); q.breakable()
q.text("tx_pwr_max = ");
q.text("%#x" % self.tx_pwr_max)
q.breakable()
q.text('}')
port_desc_prop.subtypes[1] = optical
| |
"""
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import re
import time
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
from django.conf import settings
from django.core.cache import cache
from django.utils.encoding import smart_str, iri_to_uri
from django.utils.http import http_date
from django.utils.hashcompat import md5_constructor
from django.http import HttpRequest
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
This function patches the Cache-Control header by adding all
keyword arguments to it. The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return t[0] + '=' + smart_str(t[1])
if response.has_header('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict([dictitem(el) for el in cc])
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(cc['max-age'], kwargs['max_age'])
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join([dictvalue(el) for el in cc.items()])
response['Cache-Control'] = cc
def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict([_to_tuple(el) for el in
cc_delim_re.split(response['Cache-Control'])])
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (ValueError, TypeError):
pass
def patch_response_headers(response, cache_timeout=None):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if not response.has_header('ETag'):
response['ETag'] = '"%s"' % md5_constructor(response.content).hexdigest()
if not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date()
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Adds headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set([header.lower() for header in vary_headers])
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def _generate_cache_key(request, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = md5_constructor()
for header in headerlist:
value = request.META.get(header, None)
if value is not None:
ctx.update(value)
path = md5_constructor(iri_to_uri(request.path))
return 'views.decorators.cache.cache_page.%s.%s.%s' % (
key_prefix, path.hexdigest(), ctx.hexdigest())
def _generate_cache_header_key(key_prefix, request):
"""Returns a cache key for the header cache."""
path = md5_constructor(iri_to_uri(request.path))
return 'views.decorators.cache.cache_header.%s.%s' % (key_prefix, path.hexdigest())
def get_cache_key(request, key_prefix=None):
"""
Returns a cache key based on the request path. It can be used in the
request phase because it pulls the list of headers to take into account
from the global path registry and uses those to build a cache key to check
against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
headerlist = cache.get(cache_key, None)
if headerlist is not None:
return _generate_cache_key(request, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None):
"""
Learns what headers to take into account for some request path from the
response object. It stores those headers in a global path registry so that
later access to that path will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if response.has_header('Vary'):
headerlist = ['HTTP_'+header.upper().replace('-', '_')
for header in cc_delim_re.split(response['Vary'])]
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.path
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, [], key_prefix)
def _to_tuple(s):
t = s.split('=',1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
| |
# Copyright 2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
from glob import glob
from osrf_pycommon.process_utils import AsyncSubprocessProtocol
from catkin_tools.common import mkdir_p
from catkin_tools.terminal_color import fmt
from .events import ExecutionEvent
MAX_LOGFILE_HISTORY = 10
if type(u'') == str:
def _encode(string):
"""Encode a Python 3 str into bytes.
:type data: str
"""
return string.encode('utf-8')
else:
def _encode(string):
"""Encode a Python 2 str into bytes.
:type data: str
"""
return string.decode('utf-8').encode('utf-8')
class IOBufferContainer(object):
"""A simple buffer container for use in logging.
This class will open a logfile for a given job stage and write to it
continuously while receiving stdout and stderr.
"""
def __init__(self, label, job_id, stage_label, event_queue, log_path):
self.label = label
self.job_id = job_id
self.stage_label = stage_label
self.event_queue = event_queue
self.log_path = log_path
self.is_open = False
self.stdout_buffer = b""
self.stderr_buffer = b""
self.interleaved_buffer = b""
# Construct the logfile path for this job and stage
logfile_dir_path = os.path.join(log_path, self.job_id)
self.logfile_basename = os.path.join(logfile_dir_path, '.'.join([self.label, self.stage_label]))
self.logfile_name = '{}.log'.format(self.logfile_basename)
# Create the logfile dir if it doesn't exist
if not os.path.exists(logfile_dir_path):
mkdir_p(logfile_dir_path)
# Get the existing number of logfiles
# TODO: Make this number global across all build stages
existing_logfile_indices = sorted([int(lf.split('.')[-2])
for lf in glob('{}.*.log'.format(self.logfile_basename))])
if len(existing_logfile_indices) == 0:
self.logfile_index = 0
else:
self.logfile_index = 1 + existing_logfile_indices[-1]
# Generate the logfile name
self.unique_logfile_name = '{}.{:0>{}}.log'.format(self.logfile_basename, self.logfile_index, 3)
# Remove colliding file if necessary
if os.path.exists(self.logfile_name):
os.unlink(self.logfile_name)
# Open logfile
self.log_file = open(self.logfile_name, 'wb')
self.is_open = True
def close(self):
# Close logfile
self.log_file.close()
self.is_open = False
# Copy logfile to unique name
shutil.copy(self.logfile_name, self.unique_logfile_name)
# Remove older logfiles
for logfile_name in glob('{}.*.log'.format(self.logfile_basename)):
if (self.logfile_index - int(logfile_name.split('.')[-2])) >= MAX_LOGFILE_HISTORY:
os.unlink(logfile_name)
# Save output from stderr (these don't get deleted until cleaning the logfile directory)
if len(self.stderr_buffer) > 0:
with open(self.unique_logfile_name + '.stderr', 'wb') as logfile:
logfile.write(self.stderr_buffer)
def get_interleaved_log(self):
"""get decoded interleaved log."""
try:
return self._decode(self.interleaved_buffer)
except UnicodeDecodeError:
return "interleaved_log: some output cannot be displayed.\n"
def get_stdout_log(self):
"""get decoded stdout log."""
try:
return self._decode(self.stdout_buffer)
except UnicodeDecodeError:
return "stdout_log: some output cannot be displayed.\n"
def get_stderr_log(self):
"""get decoded stderr log."""
try:
return self._decode(self.stderr_buffer)
except UnicodeDecodeError:
return "stderr_log: some output cannot be displayed.\n"
@staticmethod
def _encode(data):
"""Encode a Python str into bytes.
:type data: str
"""
return _encode(data)
@staticmethod
def _decode(data):
"""Decode bytes into Python str.
:type data: bytes
"""
return data.decode('utf-8', 'replace')
def __del__(self):
if self.is_open:
self.close()
@classmethod
def factory(cls, label, job_id, stage_label, event_queue, log_path):
"""Factory method for constructing with job metadata."""
def init_proxy(*args, **kwargs):
return cls(label, job_id, stage_label, event_queue, log_path, *args, **kwargs)
return init_proxy
class IOBufferLogger(IOBufferContainer):
"""This is a logging class to be used instead of sys.stdout and sys.stderr
in FunctionStage operations.
This class also generates `stdout` and `stderr` events.
"""
def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs):
IOBufferContainer.__init__(self, label, job_id, stage_label, event_queue, log_path)
def out(self, data, end='\n'):
"""
:type data: str
:type end: str
"""
# Buffer the encoded data
data += end
encoded_data = self._encode(data)
self.stdout_buffer += encoded_data
self.interleaved_buffer += encoded_data
# Save the encoded data
self.log_file.write(encoded_data)
# Emit event with decoded Python str
self.event_queue.put(ExecutionEvent(
'STDOUT',
job_id=self.job_id,
stage_label=self.stage_label,
data=data))
def err(self, data, end='\n'):
"""
:type data: str
:type end: str
"""
# Buffer the encoded data
data += end
encoded_data = self._encode(data)
self.stderr_buffer += encoded_data
self.interleaved_buffer += encoded_data
# Save the encoded data
self.log_file.write(encoded_data)
# Emit event with decoded Python str
self.event_queue.put(ExecutionEvent(
'STDERR',
job_id=self.job_id,
stage_label=self.stage_label,
data=data))
class IOBufferProtocol(IOBufferContainer, AsyncSubprocessProtocol):
"""An asyncio protocol that collects stdout and stderr.
This class also generates `stdout` and `stderr` events.
Since the underlying asyncio API constructs the actual protocols, this
class provides a factory method to inject the job and stage information
into the created protocol.
"""
def __init__(self, label, job_id, stage_label, event_queue, log_path, *args, **kwargs):
IOBufferContainer.__init__(self, label, job_id, stage_label, event_queue, log_path)
AsyncSubprocessProtocol.__init__(self, *args, **kwargs)
self.intermediate_stdout_buffer = b''
self.intermediate_stderr_buffer = b''
@staticmethod
def _split(data):
try:
last_break = data.rindex(b'\n') + 1
return data[0:last_break], data[last_break:]
except ValueError:
return b'', data
def on_stdout_received(self, data):
"""
:type data: encoded bytes
"""
data, self.intermediate_stdout_buffer = self._split(self.intermediate_stdout_buffer + data)
self.stdout_buffer += data
self.interleaved_buffer += data
self.log_file.write(data)
# Get the decoded Python str
decoded_data = self._decode(data)
# Emit event with decoded Python str
self.event_queue.put(ExecutionEvent(
'STDOUT',
job_id=self.job_id,
stage_label=self.stage_label,
data=decoded_data))
def on_stderr_received(self, data):
"""
:type data: encoded bytes
"""
data, self.intermediate_stderr_buffer = self._split(self.intermediate_stderr_buffer + data)
self.stderr_buffer += data
self.interleaved_buffer += data
self.log_file.write(data)
# Get the decoded Python str
decoded_data = self._decode(data)
# Emit event with decoded Python str
self.event_queue.put(ExecutionEvent(
'STDERR',
job_id=self.job_id,
stage_label=self.stage_label,
data=decoded_data))
def on_process_exited2(self, returncode):
"""
Dump anything remaining in the intermediate buffers.
"""
if len(self.intermediate_stdout_buffer) > 0:
self.on_stdout_received(self.intermediate_stdout_buffer + b'\n')
if len(self.intermediate_stderr_buffer) > 0:
self.on_stderr_received(self.intermediate_stderr_buffer + b'\n')
class CatkinTestResultsIOBufferProtocol(IOBufferProtocol):
"""An IOBufferProtocol which parses the output of catkin_test_results"""
def on_stdout_received(self, data):
lines = data.decode().splitlines()
clines = []
for line in lines:
match = re.match(r'(.*): (\d+) tests, (\d+) errors, (\d+) failures, (\d+) skipped', line)
if match:
line = fmt('@!{}@|: {} tests, @{rf}{} errors@|, @{rf}{} failures@|, @{kf}{} skipped@|')
line = line.format(*match.groups())
clines.append(line)
cdata = '\n'.join(clines) + '\n'
super(CatkinTestResultsIOBufferProtocol, self).on_stdout_received(cdata.encode())
| |
from keras.models import Model
from keras.layers import Input, Masking, Dense, LSTM
from keras.layers import Dropout, TimeDistributed, Bidirectional, merge
from keras.layers.embeddings import Embedding
from keras.utils import np_utils
from keras.optimizers import RMSprop
import numpy as np
import pandas as pd
import sys
import math
import os
from datetime import datetime
# add path
sys.path.append('../')
sys.path.append('../tools')
from tools import conf
from tools import load_data
from tools import prepare
from tools import plot
np.random.seed(0)
# train hyperparameters
step_length = conf.ner_step_length
pos_length = conf.ner_pos_length
chunk_length = conf.ner_chunk_length
gazetteer_length = conf.gazetteer_length
feature_length = conf.ner_feature_length_2
emb_vocab = conf.senna_vocab
emb_length = conf.senna_length
hash_vocab = conf.ner_hash_vocab
hash_length = conf.ner_hash_length
output_length = conf.ner_BIOES_length
batch_size = conf.batch_size
nb_epoch = 70 #conf.nb_epoch
model_name = os.path.basename(__file__)[:-3]
folder_path = 'model/%s'%model_name
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
# the data, shuffled and split between train and test sets
train_data = load_data.load_ner(dataset='eng.train', form='BIOES')
dev_data = load_data.load_ner(dataset='eng.testa', form='BIOES')
train_samples = len(train_data)
dev_samples = len(dev_data)
print('train shape:', train_samples)
print('dev shape:', dev_samples)
print()
word_embedding = pd.read_csv('../preprocessing/senna/embeddings.txt', delimiter=' ', header=None)
word_embedding = word_embedding.values
word_embedding = np.concatenate([np.zeros((1,emb_length)),word_embedding, np.random.uniform(-1,1,(1,emb_length))])
# hash_embedding = pd.read_csv('../preprocessing/ner-auto-encoder-2/auto-encoder-embeddings.txt', delimiter=' ', header=None)
# hash_embedding = hash_embedding.values
# hash_embedding = np.concatenate([np.zeros((1,hash_length)),hash_embedding, np.random.rand(1,hash_length)])
embed_index_input = Input(shape=(step_length,))
embedding = Embedding(emb_vocab+2, emb_length, weights=[word_embedding], mask_zero=True, input_length=step_length)(embed_index_input)
hash_representation_input = Input(shape=(step_length,feature_length))
# encoder_embedding = Embedding(hash_vocab+2, hash_length, weights=[hash_embedding], mask_zero=True, input_length=step_length)(hash_index_input)
pos_input = Input(shape=(step_length, pos_length))
#chunk_input = Input(shape=(step_length, chunk_length))
gazetteer_input = Input(shape=(step_length, gazetteer_length))
senna_hash_pos_chunk_gazetteer_merge = merge([embedding, hash_representation_input, pos_input, gazetteer_input], mode='concat')
input_mask = Masking(mask_value=0)(senna_hash_pos_chunk_gazetteer_merge)
dp_1 = Dropout(0.5)(input_mask)
hidden_1 = Bidirectional(LSTM(128, return_sequences=True))(dp_1)
hidden_2 = Bidirectional(LSTM(64, return_sequences=True))(hidden_1)
dp_2 = Dropout(0.5)(hidden_2)
output = TimeDistributed(Dense(output_length, activation='softmax'))(dp_2)
model = Model(input=[embed_index_input,hash_representation_input,pos_input, gazetteer_input], output=output)
rmsprop = RMSprop(lr=0.0005)
model.compile(loss='categorical_crossentropy',
optimizer=rmsprop,
metrics=['accuracy'])
print(model.summary())
number_of_train_batches = int(math.ceil(float(train_samples)/batch_size))
number_of_dev_batches = int(math.ceil(float(dev_samples)/batch_size))
print('start train %s ...\n'%model_name)
best_accuracy = 0
best_epoch = 0
all_train_loss = []
all_dev_loss = []
all_dev_accuracy = []
log = open('%s/model_log.txt'%folder_path, 'w')
start_time = datetime.now()
print('train start at %s\n'%str(start_time))
log.write('train start at %s\n\n'%str(start_time))
for epoch in range(nb_epoch):
start = datetime.now()
print('-'*60)
print('epoch %d start at %s'%(epoch, str(start)))
log.write('-'*60+'\n')
log.write('epoch %d start at %s\n'%(epoch, str(start)))
train_loss = 0
dev_loss = 0
np.random.shuffle(train_data)
for i in range(number_of_train_batches):
train_batch = train_data[i*batch_size: (i+1)*batch_size]
embed_index, hash_repesentation, pos, chunk, label, length, sentence = prepare.prepare_ner_raw(batch=train_batch, form='BIOES', gram='bi')
hash_repesentation = [each.toarray() for each in hash_repesentation]
hash_repesentation = np.array([np.concatenate([h, np.zeros((step_length-length[l], feature_length))]) for l, h in enumerate(hash_repesentation)])
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
# chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
gazetteer, length_2 = prepare.prepare_gazetteer(batch=train_batch)
gazetteer = np.array([(np.concatenate([a, np.zeros((step_length-length_2[l], gazetteer_length))])) for l,a in enumerate(gazetteer)])
y = np.array([np_utils.to_categorical(each, output_length) for each in label])
train_metrics = model.train_on_batch([embed_index, hash_repesentation, pos, gazetteer], y)
train_loss += train_metrics[0]
all_train_loss.append(train_loss)
correct_predict = 0
all_predict = 0
for j in range(number_of_dev_batches):
dev_batch = dev_data[j*batch_size: (j+1)*batch_size]
embed_index, hash_repesentation, pos, chunk, label, length, sentence = prepare.prepare_ner_raw(batch=dev_batch, form='BIOES', gram='bi')
hash_repesentation = [each.toarray() for each in hash_repesentation]
hash_repesentation = np.array([np.concatenate([h, np.zeros((step_length-length[l], feature_length))]) for l, h in enumerate(hash_repesentation)])
pos = np.array([(np.concatenate([np_utils.to_categorical(p, pos_length), np.zeros((step_length-length[l], pos_length))])) for l,p in enumerate(pos)])
# chunk = np.array([(np.concatenate([np_utils.to_categorical(c, chunk_length), np.zeros((step_length-length[l], chunk_length))])) for l,c in enumerate(chunk)])
gazetteer, length_2 = prepare.prepare_gazetteer(batch=dev_batch)
gazetteer = np.array([(np.concatenate([a, np.zeros((step_length-length_2[l], gazetteer_length))])) for l,a in enumerate(gazetteer)])
y = np.array([np_utils.to_categorical(each, output_length) for each in label])
# for loss
dev_metrics = model.test_on_batch([embed_index, hash_repesentation, pos, gazetteer], y)
dev_loss += dev_metrics[0]
# for accuracy
prob = model.predict_on_batch([embed_index, hash_repesentation, pos, gazetteer])
for i, l in enumerate(length):
predict_label = np_utils.categorical_probas_to_classes(prob[i])
correct_predict += np.sum(predict_label[:l]==label[i][:l])
all_predict += np.sum(length)
epcoh_accuracy = float(correct_predict)/all_predict
all_dev_accuracy.append(epcoh_accuracy)
all_dev_loss.append(dev_loss)
if epcoh_accuracy>=best_accuracy:
best_accuracy = epcoh_accuracy
best_epoch = epoch
end = datetime.now()
model.save('%s/model_epoch_%d.h5'%(folder_path, epoch), overwrite=True)
print('epoch %d end at %s'%(epoch, str(end)))
print('epoch %d train loss: %f'%(epoch, train_loss))
print('epoch %d dev loss: %f'%(epoch, dev_loss))
print('epoch %d dev accuracy: %f'%(epoch, epcoh_accuracy))
print('best epoch now: %d\n'%best_epoch)
log.write('epoch %d end at %s\n'%(epoch, str(end)))
log.write('epoch %d train loss: %f\n'%(epoch, train_loss))
log.write('epoch %d dev loss: %f\n'%(epoch, dev_loss))
log.write('epoch %d dev accuracy: %f\n'%(epoch, epcoh_accuracy))
log.write('best epoch now: %d\n\n'%best_epoch)
end_time = datetime.now()
print('train end at %s\n'%str(end_time))
log.write('train end at %s\n\n'%str(end_time))
timedelta = end_time - start_time
print('train cost time: %s\n'%str(timedelta))
print('best epoch last: %d\n'%best_epoch)
log.write('train cost time: %s\n\n'%str(timedelta))
log.write('best epoch last: %d\n\n'%best_epoch)
plot.plot_loss(all_train_loss, all_dev_loss, folder_path=folder_path, title='%s'%model_name)
plot.plot_accuracy(all_dev_accuracy, folder_path=folder_path, title='%s'%model_name)
| |
import ply.lex as lex
import ply.yacc as yacc
import pprint
import sys
reserved = {
'if' : 'IF',
'for' : 'FOR',
'define' : 'DEFINE',
'return' : 'RETURN',
'function' : 'FUNCTION',
'true': 'TRUE',
'false': 'FALSE',
'var' : 'VAR',
'if' : 'IF',
'else' : 'ELSE'
}
tokens = [
'NUMBER',
'PLUS',
'MINUS',
'TIMES',
'DIVIDE',
'LPAREN',
'RPAREN',
'LBRACE',
'RBRACE',
'LBRACKET',
'RBRACKET',
'COMMA',
'SEMICOLON',
'SQUOTEMARK',
'DQUOTEMARK',
'COLON',
'POINT',
'STRING',
'ID',
'EQUAL',
'QMARK',
'SGN'
] + list(reserved.values())
t_POINT = r'\.'
t_COLON = r':'
t_COMMA = r','
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACE = r'{'
t_RBRACE = r'}'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_SEMICOLON = r';'
t_SQUOTEMARK= r'\''
t_DQUOTEMARK= r'"'
t_QMARK = r'\?'
t_EQUAL = r'='
t_ignore = ' \t'
def wrapper(t):
return t if isinstance(t, list) else [t]
def t_COMMENT(t):
r'''(/\*.*\*/)|(//.*)'''
# print t.value
pass
def t_STRING(t):
r'''("([^"\n]*(\\")*[^"\n]*)?")|('([^'\n]*(\\')*[^'\n]*)?')|(/[^/]+/[^/,;]+)'''
try:
t.value = eval(t.value)
except:
pass
return t
def t_ID(t):
r'''[a-zA-Z_][a-zA-Z_0-9]*'''
t.type = reserved.get(t.value,'ID') # Check for reserved words
return t
def t_NUMBER(t):
r'(\+|-)*\d+(\.?)\d*'
try:
t.value = int(t.value)
except:
t.value = float(t.value)
return t
def t_SGN(t):
r'''(\+|\-|\*|/|\=|\.|\>|\<|&|\|)+'''
return t
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
def p_program(p):
''' program : DEFINE LPAREN function RPAREN
| DEFINE LPAREN function RPAREN SEMICOLON
| DEFINE LPAREN array COMMA function RPAREN
| DEFINE LPAREN array COMMA function RPAREN SEMICOLON
'''
if len(p) == 5:
p[0] = {
'__type__' : 'program',
'parameter' : None,
'function' : p[3]
}
elif len(p) == 6:
p[0] = {
'__type__' : 'program',
'parameter' : None,
'function' : p[3]
}
else:
p[0] = {
'__type__' : 'program',
'parameter' : p[3],
'function' : p[5]
}
def p_array(p):
''' array : LBRACKET RBRACKET
| LBRACKET parameters RBRACKET
'''
if len(p) == 3:
p[0] = []
elif len(p) == 4:
p[0] = p[2]
def p_paremeter(p):
''' parameters : expression COMMA parameters
| expression
'''
if len(p) == 2:
p[0] = [p[1]]
else:
p[3].insert(0, p[1])
p[0] = p[3]
def p_expression(p):
''' expression : expression SGN expression
| expression QMARK expression COLON expression
| expression array
| LPAREN expression RPAREN
| VAR parameters
| NUMBER
| ID
| STRING
| TRUE
| FALSE
| dictionary
| array
| function
'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = wrapper(p[1]) + wrapper(p[2])
elif len(p) == 4:
p[0] = wrapper(p[1]) + wrapper(p[2]) + wrapper(p[3])
elif len(p) == 5:
p[0] = wrapper(p[1]) + wrapper(p[2]) + wrapper(p[3]) + wrapper(p[4])
def p_dictionary(p):
''' dictionary : LBRACE RBRACE
| LBRACE kv_pairs
kv_pairs : RBRACE
| expression COLON expression RBRACE
| expression COLON expression COMMA kv_pairs
'''
if len(p) == 2:
p[0] = {}
elif len(p) == 3:
p[0] = p[2] if isinstance(p[2], dict) else {}
elif len(p) == 5:
p[0] = {p[1]: p[3]}
elif len(p) == 6:
p[5][p[1]] = p[3]
p[0] = p[5]
def p_if(p):
''' if_statement : IF LPAREN expression RPAREN statement else_statement
| IF LPAREN expression RPAREN LBRACE statements RBRACE else_statement
'''
def p_else(p):
''' else_statement : empty
| ELSE statement
| ELSE LBRACE statements RBRACE
'''
def p_function(p):
''' function : ID LPAREN RPAREN
| ID LPAREN parameters RPAREN
| FUNCTION LPAREN RPAREN LBRACE statements RBRACE
| FUNCTION ID LPAREN RPAREN LBRACE statements RBRACE
| FUNCTION LPAREN parameters RPAREN LBRACE statements RBRACE
| FUNCTION ID LPAREN parameters RPAREN LBRACE statements RBRACE
'''
if len(p) == 4:
p[0] = {
'__type__' : 'function_call',
'parameters': None,
'function' : p[1]
}
elif len(p) == 5:
p[0] = {
'__type__' : 'function_call',
'parameters': p[3],
'function' : p[1]
}
elif len(p) == 7:
p[0] = {
'__type__' : 'function',
'parameters': None,
'statements': p[5]
}
elif len(p) == 8:
p[0] = {
'__type__' : 'function',
'parameters': None,
'statements': p[6],
'name': p[2]
}
elif len(p) == 9:
p[0] = {
'__type__' : 'function',
'parameters': p[3],
'statements': p[6]
}
else:
p[0] = {
'__type__' : 'function',
'parameters': p[4],
'statements': p[7],
'name': p[2]
}
def p_statements(p):
''' statements : empty
| nestatements
nestatements : statement
| statement nestatements
'''
if p[1]:
if len(p) == 2:
p[0] = p[1] if isinstance(p[1], list) else [p[1], ]
else:
p[2].insert(0, p[1])
p[0] = p[2]
else:
p[0] = []
def p_statement(p):
''' statement : if_statement
| function
| expression SEMICOLON
| RETURN expression SEMICOLON
'''
if len(p) == 3:
p[0] = {
'__type__' : 'statement',
'statement' : p[1]
}
elif len(p) == 4:
p[0] = {
'__type__' : 'statement',
'statement' : p[2]
}
def p_empty(p):
'empty :'
pass
def p_error(p):
print("Syntax error at '%s' '%d' " % (p.value, p.lineno))
if __name__ == "__main__":
if len(sys.argv) < 2:
print "useage: python parser.py JS_FILENAME"
exit()
try:
fd = open(sys.argv[1])
except:
print "Can not open file"
exit()
data = ""
for line in fd.readlines():
data += line
fd.close()
lexer = lex.lex()
# lexer.input(data)
# while True:
# tok = lexer.token()
# if not tok:
# break
# # print(tok)
parser = yacc.yacc()
result = parser.parse(data)
pprint.pprint(result, width=40, depth=8)
| |
#
# Histogram.py -- Histogram plugin for Ginga reference viewer
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy
from ginga.gw import Widgets, Plot
from ginga import GingaPlugin
from ginga import AutoCuts
from ginga.util import plots
class Histogram(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Histogram, self).__init__(fv, fitsimage)
self.layertag = 'histogram-canvas'
self.histtag = None
# If True, limits X axis to lo/hi cut levels
self.xlimbycuts = True
# get Histogram preferences
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_Histogram')
self.settings.addDefaults(draw_then_move=True, num_bins=2048,
hist_color='aquamarine')
self.settings.load(onError='silent')
# Set up histogram control parameters
self.histcolor = self.settings.get('hist_color', 'aquamarine')
self.numbins = self.settings.get('num_bins', 2048)
self.autocuts = AutoCuts.Histogram(self.logger)
self.dc = self.fv.get_draw_classes()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.enable_edit(True)
canvas.set_drawtype('rectangle', color='cyan', linestyle='dash',
drawdims=True)
canvas.set_callback('draw-event', self.draw_cb)
canvas.set_callback('edit-event', self.edit_cb)
canvas.add_draw_mode('move', down=self.drag,
move=self.drag, up=self.update)
canvas.register_for_cursor_drawing(self.fitsimage)
canvas.set_surface(self.fitsimage)
canvas.set_draw_mode('draw')
self.canvas = canvas
fitssettings = fitsimage.get_settings()
for name in ['cuts']:
fitssettings.getSetting(name).add_callback(
'set', self.cutset_ext_cb, fitsimage)
self.gui_up = False
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
# Make the cuts plot
vbox, sw, orientation = Widgets.get_oriented_box(container)
vbox.set_border_width(4)
vbox.set_spacing(2)
msg_font = self.fv.get_font("sansFont", 12)
tw = Widgets.TextArea(wrap=True, editable=False)
tw.set_font(msg_font)
self.tw = tw
fr = Widgets.Expander("Instructions")
fr.set_widget(tw)
vbox.add_widget(fr, stretch=0)
self.plot = plots.Plot(logger=self.logger,
width=400, height=400)
ax = self.plot.add_axis()
ax.grid(True)
w = Plot.PlotWidget(self.plot)
w.resize(400, 400)
vbox.add_widget(w, stretch=1)
captions = (('Cut Low:', 'label', 'Cut Low', 'entry'),
('Cut High:', 'label', 'Cut High', 'entry',
'Cut Levels', 'button'),
('Auto Levels', 'button'),
('Log Histogram', 'checkbutton',
'Plot By Cuts', 'checkbutton'),
('NumBins:', 'label', 'NumBins', 'entry'),
('Full Image', 'button'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.cut_levels.set_tooltip("Set cut levels manually")
b.auto_levels.set_tooltip("Set cut levels by algorithm")
b.cut_low.set_tooltip("Set low cut level (press Enter)")
b.cut_high.set_tooltip("Set high cut level (press Enter)")
b.log_histogram.set_tooltip("Use the log of the pixel values for the "
"histogram (empty bins map to 10^-1)")
b.plot_by_cuts.set_tooltip("Only show the part of the histogram "
"between the cuts")
b.numbins.set_tooltip("Number of bins for the histogram")
b.full_image.set_tooltip("Use the full image for calculating the "
"histogram")
b.numbins.set_text(str(self.numbins))
b.cut_low.add_callback('activated', lambda w: self.cut_levels())
b.cut_high.add_callback('activated', lambda w: self.cut_levels())
b.cut_levels.add_callback('activated', lambda w: self.cut_levels())
b.auto_levels.add_callback('activated', lambda w: self.auto_levels())
b.log_histogram.set_state(self.plot.logy)
b.log_histogram.add_callback('activated', self.log_histogram_cb)
b.plot_by_cuts.set_state(self.xlimbycuts)
b.plot_by_cuts.add_callback('activated', self.plot_by_cuts_cb)
b.numbins.add_callback('activated', lambda w: self.set_numbins_cb())
b.full_image.add_callback('activated', lambda w: self.full_image_cb())
vbox.add_widget(w, stretch=0)
mode = self.canvas.get_draw_mode()
hbox = Widgets.HBox()
btn1 = Widgets.RadioButton("Move")
btn1.set_state(mode == 'move')
btn1.add_callback('activated',
lambda w, val: self.set_mode_cb('move', val))
btn1.set_tooltip("Choose this to position box")
self.w.btn_move = btn1
hbox.add_widget(btn1)
btn2 = Widgets.RadioButton("Draw", group=btn1)
btn2.set_state(mode == 'draw')
btn2.add_callback('activated',
lambda w, val: self.set_mode_cb('draw', val))
btn2.set_tooltip("Choose this to draw a replacement box")
self.w.btn_draw = btn2
hbox.add_widget(btn2)
btn3 = Widgets.RadioButton("Edit", group=btn1)
btn3.set_state(mode == 'edit')
btn3.add_callback('activated',
lambda w, val: self.set_mode_cb('edit', val))
btn3.set_tooltip("Choose this to edit a box")
self.w.btn_edit = btn3
hbox.add_widget(btn3)
if self.histtag is None:
self.w.btn_move.set_enabled(False)
self.w.btn_edit.set_enabled(False)
hbox.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(hbox, stretch=0)
## spacer = Widgets.Label('')
## vbox.add_widget(spacer, stretch=1)
top.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(3)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.gui_up = True
def instructions(self):
self.tw.set_text("""Draw (or redraw) a region with the right mouse button. Click or drag left mouse button to reposition region.""")
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
self.instructions()
self.plot.set_titles(rtitle="Histogram")
# insert canvas, if not already
p_canvas = self.fitsimage.get_canvas()
try:
obj = p_canvas.get_object_by_tag(self.layertag)
except KeyError:
# Add ruler layer
p_canvas.add(self.canvas, tag=self.layertag)
#self.canvas.delete_all_objects()
self.resume()
def pause(self):
self.canvas.ui_setActive(False)
def resume(self):
# turn off any mode user may be in
self.modes_off()
self.canvas.ui_setActive(True)
self.fv.show_status("Draw a rectangle with the right mouse button")
def stop(self):
# remove the rect from the canvas
## try:
## self.canvas.delete_object_by_tag(self.histtag)
## except:
## pass
##self.histtag = None
# remove the canvas from the image
p_canvas = self.fitsimage.get_canvas()
try:
p_canvas.delete_object_by_tag(self.layertag)
except:
pass
self.gui_up = False
self.fv.show_status("")
def full_image_cb(self):
canvas = self.canvas
try:
canvas.delete_object_by_tag(self.histtag)
except:
pass
image = self.fitsimage.get_image()
width, height = image.get_size()
x1, y1, x2, y2 = 0, 0, width-1, height-1
tag = canvas.add(self.dc.Rectangle(x1, y1, x2, y2,
color='cyan',
linestyle='dash'))
self.draw_cb(canvas, tag)
def histogram(self, image, x1, y1, x2, y2, z=None, pct=1.0, numbins=2048):
if z is not None:
data = image.get_data()
data = data[y1:y2, x1:x2, z]
else:
tup = image.cutout_adjust(x1, y1, x2, y2)
data = tup[0]
return self.autocuts.calc_histogram(data, pct=pct, numbins=numbins)
def redo(self):
if self.histtag is None:
return
obj = self.canvas.get_object_by_tag(self.histtag)
if obj.kind != 'compound':
return True
bbox = obj.objects[0]
# Do histogram on the points within the rect
image = self.fitsimage.get_image()
self.plot.clear()
numbins = self.numbins
## pct = 1.0
## i = int(numbins * (1.0 - pct))
## j = int(numbins * pct)
depth = image.get_depth()
if depth != 3:
res = self.histogram(image, int(bbox.x1), int(bbox.y1),
int(bbox.x2), int(bbox.y2),
pct=1.0, numbins=numbins)
# used with 'steps-post' drawstyle, this x and y assignment
# gives correct histogram-steps
x = res.bins
y = numpy.append(res.dist, res.dist[-1])
## y, x = y[i:j+1], x[i:j+1]
ymax = y.max()
if self.plot.logy:
y = numpy.choose(y > 0, (.1, y))
self.plot.plot(x, y, xtitle="Pixel value", ytitle="Number",
title="Pixel Value Distribution",
color='blue', alpha=1.0, drawstyle='steps-post')
else:
colors = ('red', 'green', 'blue')
ymax = 0
for z in range(depth):
res = self.histogram(image, int(bbox.x1), int(bbox.y1),
int(bbox.x2), int(bbox.y2),
z=z, pct=1.0, numbins=numbins)
# used with 'steps-post' drawstyle, this x and y assignment
# gives correct histogram-steps
x = res.bins
y = numpy.append(res.dist, res.dist[-1])
## y, x = y[i:j+1], x[i:j+1]
ymax = max(ymax, y.max())
if self.plot.logy:
y = numpy.choose(y > 0, (.1, y))
self.plot.plot(x, y, xtitle="Pixel value", ytitle="Number",
title="Pixel Value Distribution",
color=colors[z], alpha=0.33,
drawstyle='steps-post')
# show cut levels
loval, hival = self.fitsimage.get_cut_levels()
self.loline = self.plot.ax.axvline(loval, 0.0, 0.99,
linestyle='-', color='red')
self.hiline = self.plot.ax.axvline(hival, 0.0, 0.99,
linestyle='-', color='green')
if self.xlimbycuts:
self.plot.ax.set_xlim(loval, hival)
# Make x axis labels a little more readable
## lbls = self.plot.ax.xaxis.get_ticklabels()
## for lbl in lbls:
## lbl.set(rotation=45, horizontalalignment='right')
self.w.cut_low.set_text(str(loval))
self.w.cut_high.set_text(str(hival))
self.plot.fig.canvas.draw()
self.fv.show_status("Click or drag left mouse button to move region")
return True
def update(self, canvas, event, data_x, data_y, viewer):
obj = self.canvas.get_object_by_tag(self.histtag)
if obj.kind == 'compound':
bbox = obj.objects[0]
elif obj.kind == 'rectangle':
bbox = obj
else:
return True
# calculate center of bbox
wd = bbox.x2 - bbox.x1
dw = wd // 2
ht = bbox.y2 - bbox.y1
dh = ht // 2
x, y = bbox.x1 + dw, bbox.y1 + dh
# calculate offsets of move
dx = (data_x - x)
dy = (data_y - y)
# calculate new coords
x1, y1, x2, y2 = bbox.x1+dx, bbox.y1+dy, bbox.x2+dx, bbox.y2+dy
try:
canvas.delete_object_by_tag(self.histtag)
except:
pass
tag = canvas.add(self.dc.Rectangle(
x1, y1, x2, y2, color='cyan', linestyle='dash'))
self.draw_cb(canvas, tag)
return True
def drag(self, canvas, event, data_x, data_y, viewer):
obj = self.canvas.get_object_by_tag(self.histtag)
if obj.kind == 'compound':
bbox = obj.objects[0]
elif obj.kind == 'rectangle':
bbox = obj
else:
return True
# calculate center of bbox
wd = bbox.x2 - bbox.x1
dw = wd // 2
ht = bbox.y2 - bbox.y1
dh = ht // 2
x, y = bbox.x1 + dw, bbox.y1 + dh
# calculate offsets of move
dx = (data_x - x)
dy = (data_y - y)
# calculate new coords
x1, y1, x2, y2 = bbox.x1+dx, bbox.y1+dy, bbox.x2+dx, bbox.y2+dy
if obj.kind == 'compound':
try:
canvas.delete_object_by_tag(self.histtag)
except:
pass
self.histtag = canvas.add(self.dc.Rectangle(
x1, y1, x2, y2, color='cyan', linestyle='dash'))
else:
bbox.x1, bbox.y1, bbox.x2, bbox.y2 = x1, y1, x2, y2
canvas.redraw(whence=3)
return True
def draw_cb(self, canvas, tag):
obj = canvas.get_object_by_tag(tag)
if obj.kind != 'rectangle':
return True
canvas.delete_object_by_tag(tag)
if self.histtag:
try:
canvas.delete_object_by_tag(self.histtag)
except:
pass
x1, y1, x2, y2 = obj.get_llur()
tag = canvas.add(self.dc.CompoundObject(
self.dc.Rectangle(x1, y1, x2, y2,
color=self.histcolor),
self.dc.Text(x1, y2+4, "Histogram",
color=self.histcolor)))
self.histtag = tag
self.w.btn_move.set_enabled(True)
self.w.btn_edit.set_enabled(True)
move_flag = self.settings.get('draw_then_move', True)
if move_flag:
self.set_mode('move')
return self.redo()
def edit_cb(self, canvas, obj):
if obj.kind != 'rectangle':
return True
# Get the compound object that sits on the canvas.
# Make sure edited rectangle was our histogram rectangle.
c_obj = self.canvas.get_object_by_tag(self.histtag)
if ((c_obj.kind != 'compound') or (len(c_obj.objects) < 2) or
(c_obj.objects[0] != obj)):
return False
# reposition other elements to match
x1, y1, x2, y2 = obj.get_llur()
text = c_obj.objects[1]
text.x, text.y = x1, y2 + 4
self.fitsimage.redraw(whence=3)
return self.redo()
def cut_levels(self):
reslvls = None
try:
loval = float(self.w.cut_low.get_text())
hival = float(self.w.cut_high.get_text())
reslvls = self.fitsimage.cut_levels(loval, hival)
except Exception as e:
errmsg = 'Error cutting levels: {0}'.format(str(e))
self.fv.show_status(errmsg)
self.logger.error(errmsg)
else:
if self.xlimbycuts:
self.redo()
return reslvls
def auto_levels(self):
self.fitsimage.auto_levels()
def cutset_ext_cb(self, setting, value, fitsimage):
if not self.gui_up:
return
t_ = fitsimage.get_settings()
loval, hival = t_['cuts']
try:
self.loline.remove()
self.hiline.remove()
except:
pass
self.loline = self.plot.ax.axvline(loval, 0.0, 0.99,
linestyle='-', color='black')
self.hiline = self.plot.ax.axvline(hival, 0.0, 0.99,
linestyle='-', color='black')
self.w.cut_low.set_text(str(loval))
self.w.cut_high.set_text(str(hival))
#self.plot.fig.canvas.draw()
self.redo()
def set_numbins_cb(self):
self.numbins = int(self.w.numbins.get_text())
self.redo()
def log_histogram_cb(self, w, val):
self.plot.logy = val
if (self.histtag is not None) and self.gui_up:
# self.histtag is None means no data is loaded yet
self.redo()
def plot_by_cuts_cb(self, w, val):
self.xlimbycuts = val
if (self.histtag is not None) and self.gui_up:
# self.histtag is None means no data is loaded yet
self.redo()
def edit_select_box(self):
if self.histtag is not None:
obj = self.canvas.get_object_by_tag(self.histtag)
if obj.kind != 'compound':
return True
# drill down to reference shape
bbox = obj.objects[0]
self.canvas.edit_select(bbox)
else:
self.canvas.clear_selected()
self.canvas.update_canvas()
def set_mode_cb(self, mode, tf):
"""Called when one of the Move/Draw/Edit radio buttons is selected."""
if tf:
self.canvas.set_draw_mode(mode)
if mode == 'edit':
self.edit_select_box()
return True
def set_mode(self, mode):
self.canvas.set_draw_mode(mode)
self.w.btn_move.set_state(mode == 'move')
self.w.btn_draw.set_state(mode == 'draw')
self.w.btn_edit.set_state(mode == 'edit')
def __str__(self):
return 'histogram'
# END
| |
#!/usr/bin/env python3
"""
Pegasus Metadata is a tool to query metadata collected by Pegasus workflows.
Usage: pegasus-metadata [-h] [-v] [-c] {task,file,workflow} ... submit_dir
"""
##
# Copyright 2007-2012 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import argparse
import logging
import sys
from pathlib import Path
from Pegasus.db import connection
from Pegasus.db.connection import ConnectionError, DBType
from Pegasus.service.monitoring.queries import (
StampedeDBNotFoundError,
StampedeWorkflowQueries,
)
from Pegasus.tools import utils
def configure_logging(verbosity=0):
verbosity = min(3, verbosity)
log_levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
utils.configureLogging(level=log_levels[verbosity])
def get_workflow_uuid(submit_dir):
bdump_yml = Path(submit_dir) / "braindump.yml"
bdump_txt = Path(submit_dir) / "braindump.txt"
if bdump_yml.exists() is False and bdump_txt.exists() is False:
raise ValueError("Not a valid workflow submit directory: %r" % submit_dir)
braindump = utils.slurp_braindb(submit_dir)
return braindump["root_wf_uuid"], braindump["wf_uuid"]
def get_workflow_uri(submit_dir):
return connection.url_by_submitdir(submit_dir, DBType.WORKFLOW)
def render_metas(metas, indent=""):
if not metas:
print("%sNo metadata found" % indent)
return
max_key_len = 0
for meta in metas:
max_key_len = max(max_key_len, len(meta.key))
max_key_len += 1
for meta in metas:
print("{}{}: {}".format(indent, meta.key.ljust(max_key_len), meta.value))
def workflow_metadata(recursive=False, submit_dir=".", *args, **kwargs):
logging.debug("workflow_metadata")
try:
root_wf_uuid, wf_uuid = get_workflow_uuid(submit_dir)
logging.debug("Workflow UUID: %s" % wf_uuid)
db_uri = get_workflow_uri(submit_dir)
queries = StampedeWorkflowQueries(db_uri)
if recursive:
wfs = queries.get_workflows(
root_wf_uuid, order="w.root_wf_id, w.parent_wf_id"
)
for wf in wfs.records:
print("Workflow %s" % wf.wf_uuid)
workflow_metas = queries.get_workflow_meta(wf_uuid).records
render_metas(workflow_metas, " ")
else:
workflow_metas = queries.get_workflow_meta(wf_uuid).records
print("Workflow %s" % wf_uuid)
render_metas(workflow_metas, " ")
except ValueError as e:
logging.error(e)
sys.exit(1)
except ConnectionError as e:
logging.error(e)
sys.exit(2)
except StampedeDBNotFoundError as e:
logging.error(e)
sys.exit(3)
def task_metadata(abs_task_id=None, submit_dir=".", *args, **kwargs):
logging.debug("task_metadata")
if not abs_task_id:
logging.error("task_id is required")
sys.exit(1)
try:
root_wf_uuid, wf_uuid = get_workflow_uuid(submit_dir)
logging.debug("Workflow UUID: %s" % wf_uuid)
db_uri = get_workflow_uri(submit_dir)
queries = StampedeWorkflowQueries(db_uri)
logging.debug("Get task metadata for abs_task_id %s" % abs_task_id)
workflow = queries.get_workflow_tasks(
wf_uuid, query="t.abs_task_id == %r" % abs_task_id
)
if workflow.total_filtered == 0:
raise ValueError("Invalid task_name %r" % abs_task_id)
task_id = workflow.records[0].task_id
task_metas = queries.get_task_meta(task_id).records
print("Task", abs_task_id)
render_metas(task_metas, " ")
except ValueError as e:
logging.error(e)
sys.exit(1)
except ConnectionError as e:
logging.error(e)
sys.exit(2)
except StampedeDBNotFoundError as e:
logging.error(e)
sys.exit(3)
def file_metadata(
file_name=None, list_files=False, trace=False, submit_dir=".", *args, **kwargs
):
logging.debug("task_metadata")
if trace and not file_name:
logging.error("file_name is required when trace is True")
sys.exit(1)
if not file_name:
list_files = True
logging.info("file_name not provided, will list metadata for all files")
try:
root_wf_uuid, wf_uuid = get_workflow_uuid(submit_dir)
logging.debug("Workflow UUID: %s" % wf_uuid)
db_uri = get_workflow_uri(submit_dir)
queries = StampedeWorkflowQueries(db_uri)
if list_files:
logging.debug("Get file metadata for all files")
workflow_files = queries.get_workflow_files(wf_uuid)
if workflow_files.total_filtered == 0:
print("No files found")
else:
logging.debug("Get file metadata for lfn %r" % file_name)
workflow_files = queries.get_workflow_files(
wf_uuid, query="l.lfn == %r" % file_name
)
if workflow_files.total_filtered == 0:
raise ValueError("Invalid file %r" % file_name)
if trace:
wf_file = workflow_files.records.values()[0]
wf_id = wf_file.extras.wf_id
task_id = wf_file.extras.task_id
# Get workflow information
root_wf = None
wf = queries.get_workflow(wf_id)
# If workflow is hierarchical workflow, get root workflow information
if wf_id != wf.root_wf_id:
root_wf = queries.get_workflow(wf.root_wf_id)
if root_wf:
root_wf_metas = queries.get_workflow_meta(root_wf.wf_id).records
print("Root Workflow %s" % root_wf.wf_uuid)
render_metas(root_wf_metas, " ")
print()
wf_metas = queries.get_workflow_meta(wf_id).records
print("Workflow %s" % wf.wf_uuid)
render_metas(wf_metas, " ")
print()
task = queries.get_task(task_id)
task_metas = queries.get_task_meta(task_id).records
print("Task %s" % task.abs_task_id)
render_metas(task_metas, " ")
print()
for wf_file in workflow_files.records:
print("File %s" % wf_file.lfn)
render_metas(wf_file.meta, " ")
except ValueError as e:
logging.error(e)
sys.exit(1)
except ConnectionError as e:
logging.error(e)
sys.exit(2)
except StampedeDBNotFoundError as e:
logging.error(e)
sys.exit(3)
def main():
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"-v", "--verbose", default=0, action="count", help="Logging verbosity"
)
parent_parser.add_argument(
"submit_dir", nargs="?", default=".", help="Workflow submit directory"
)
parser = argparse.ArgumentParser(description="Pegasus Metadata Query Tool")
sub_parser = parser.add_subparsers(
title="Metadata types", description="Types of metadata that can be queried"
)
# Workflow Metadata Options
workflow = sub_parser.add_parser("workflow", parents=[parent_parser])
workflow.add_argument("-r", "--recursive", default=False, action="store_true")
workflow.set_defaults(func=workflow_metadata)
# Task Metadata Options
task = sub_parser.add_parser("task", parents=[parent_parser])
task.add_argument("-i", "--task-id", dest="abs_task_id", required=True)
task.set_defaults(func=task_metadata)
# File Metadata Options
file = sub_parser.add_parser("file", parents=[parent_parser])
file.add_argument("-n", "--file-name")
file.add_argument(
"-l", "--list", default=False, action="store_true", dest="list_files"
)
file.add_argument("-t", "--trace", default=False, action="store_true")
file.set_defaults(func=file_metadata)
args = parser.parse_args(sys.argv[1:])
configure_logging(args.verbose)
args.func(**vars(args))
if __name__ == "__main__":
main()
| |
import string
from model_mommy import mommy
from datetime import datetime
from django_rq import job
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from django.contrib.auth.models import User
from django.utils import timezone
from dateutil.parser import parse as extract_date
from django.conf import settings
from survey.models import (InterviewerAccess, ODKAccess, USSDAccess, Interview, Interviewer, QuestionSetChannel,
EnumerationArea, Survey, SurveyAllocation, Question, QuestionSet, Batch, BatchQuestion,
QuestionOption)
from survey.forms.question import get_question_form
# import all question types
from survey.models import (Answer, NumericalAnswer, TextAnswer, MultiChoiceAnswer, MultiSelectAnswer, GeopointAnswer,
ImageAnswer, AudioAnswer, VideoAnswer, DateAnswer, AutoResponse)
from survey.utils.decorators import static_var
from survey.tests.base_test import BaseTest
from survey.forms.answer import SurveyAllocationForm, AddMoreLoopForm
from .survey_base_test import SurveyBaseTest
from survey.utils.views_helper import activate_super_powers
class InterviewsTest(SurveyBaseTest):
def test_name(self):
interview = self.interview
self.assertEquals(str(interview), '%s: %s' % (interview.id, interview.question_set.name))
def test_is_closed(self):
self.assertEquals(self.interview.closure_date is not None, self.interview.is_closed())
def test_interview_qset_gives_property_maps_to_correct_type(self):
self.assertEquals(self.qset.id, self.interview.qset.id)
self.assertEquals(self.qset.__class__, self.interview.qset.__class__)
def test_interview_is_considered_stared_when_last_question_is_not_none(self):
self.assertEquals(self.interview.last_question, None)
self.assertFalse(self.interview.has_started)
def test_question_text_is_given_when_no_response_is_supplied(self):
self._create_ussd_non_group_questions(self.qset)
interview = self.interview
first_question = interview.question_set.start_question
# confirm if its the Numerical answer
self.assertEquals(first_question.answer_type, NumericalAnswer.choice_name())
# interview has not started
self.assertEquals(interview.has_started, False)
self.assertEquals(Answer.objects.count(), 0)
response = interview.respond() # first question is numerical
self.assertEquals(response, first_question.text)
def test_last_question_is_updated_after_response(self):
self._create_ussd_non_group_questions(self.qset)
interview = self.interview
first_question = interview.question_set.start_question
# confirm if its the Numerical answer
self.assertEquals(first_question.answer_type, NumericalAnswer.choice_name())
response = interview.respond()
interview.refresh_from_db()
self.assertEquals(interview.has_started, True)
self.assertEquals(interview.last_question.id, first_question.id)
def _validate_response(self, question, answer, interview=None):
if interview is None:
interview = self.interview
answer_count = Answer.objects.count()
questions = self.qset.flow_questions
interview.respond(reply=answer, answers_context={})
interview.refresh_from_db()
self.assertEquals(Answer.objects.count(), answer_count+1)
next_question = question.next_question(answer)
# just confirm text value of this answer was saved
self.assertTrue(interview.get_answer(question), str(answer))
question = Question.get(id=question.id)
# next test is valid
if questions.index(question) < len(questions) - 1:
self.assertEquals(next_question.id, questions[questions.index(question)+1].id)
self.assertEquals(next_question.id, interview.last_question.id)
def test_interview_response_flow(self):
self._create_ussd_non_group_questions(self.qset)
interview = self.interview
self._try_interview(interview)
def _try_interview(self, interview):
first_question = interview.question_set.start_question
response = interview.respond() # first question is numerical
self.assertEquals(response, first_question.text)
self._validate_response(first_question, 1, interview=interview) # numerical question
self._validate_response(self.qset.flow_questions[1], 'Howdy', interview=interview) # text question
self._validate_response(self.qset.flow_questions[2], 'N', interview=interview) # Multichoice
# auto response is internally an integer answer only that its generated by code (but outside models)
self._validate_response(self.qset.flow_questions[3], 1, interview=interview) # Auto response
# now assert that the interview is closed.
self.assertTrue(interview.is_closed())
def test_interviews_belonging_to_a_survey(self):
self._create_ussd_non_group_questions(self.qset)
interview = mommy.make(Interview, interviewer=self.interviewer, survey=self.survey, ea=self.ea,
interview_channel=self.access_channel, question_set=self.qset)
self._try_interview(interview)
self.assertEquals(Interview.interviews(self.survey).exclude(survey=self.survey).count(), 0)
def test_interviews_in_a_location(self):
self._create_ussd_non_group_questions(self.qset)
location1 = self.ea.locations.first()
interview = mommy.make(Interview, interviewer=self.interviewer, survey=self.survey, ea=self.ea,
interview_channel=self.access_channel, question_set=self.qset)
self._try_interview(interview)
interview = mommy.make(Interview, interviewer=self.interviewer, survey=self.survey, ea=self.ea,
interview_channel=self.access_channel, question_set=self.qset)
self._try_interview(interview)
self.assertEquals(Interview.interviews_in(location1, include_self=True).count(), Interview.objects.count())
self.assertEquals(Interview.interviews_in(location1, survey=self.survey, include_self=True).count(),
Interview.objects.count())
# test another location doesnt have any interviews
location2 = EnumerationArea.objects.exclude(locations__in=self.ea.locations.all()).first().locations.first()
self.assertEquals(Interview.interviews_in(location2, include_self=True).count(), 0)
self.assertEquals(Interview.interviews_in(location2, survey=self.survey, include_self=True).count(), 0)
def _load_other_client(self):
self.client = Client()
User.objects.create_user(username='useless', email='demo3@kant.com', password='I_Suck')
user = User.objects.create_user('demo13', 'demo3@kant.com', 'demo13')
self.assign_permission_to(user, 'can_have_super_powers')
self.assign_permission_to(user, 'can_view_users')
self.client.login(username='demo13', password='demo13')
return user
def test_bulk_answer_questions(self):
self._create_ussd_non_group_questions(self.qset)
answers = []
n_quest = Question.objects.get(answer_type=NumericalAnswer.choice_name())
t_quest = Question.objects.get(answer_type=TextAnswer.choice_name())
m_quest = Question.objects.get(answer_type=MultiChoiceAnswer.choice_name())
# first is numeric, then text, then multichioice
answers = [{n_quest.id: 1, t_quest.id: 'Hey Man', m_quest.id: 'Y'},
{n_quest.id: 5, t_quest.id: 'Hey Boy', m_quest.id: 'Y'},
{n_quest.id: 15, t_quest.id: 'Hey Girl!', m_quest.id: 'N'},
{n_quest.id: 15, t_quest.id: 'Hey Part!'}
]
question_map = {n_quest.id: n_quest, t_quest.id: t_quest, m_quest.id: m_quest}
interview = self.interview
Interview.save_answers(self.qset, self.survey, self.ea,
self.access_channel, question_map, answers)
# confirm that 11 answers has been created
self.assertEquals(NumericalAnswer.objects.count(), 4)
self.assertEquals(TextAnswer.objects.count(), 4)
self.assertEquals(MultiChoiceAnswer.objects.count(), 3)
self.assertEquals(TextAnswer.objects.first().to_text().lower(), 'Hey Man'.lower())
self.assertEquals(MultiChoiceAnswer.objects.first().as_text.lower(), 'Y'.lower())
self.assertEquals(MultiChoiceAnswer.objects.first().as_value, str(QuestionOption.objects.get(text='Y').order))
# now test wipe data
request = RequestFactory().get('.')
request.user = self._load_other_client()
activate_super_powers(request)
url = reverse('wipe_survey_data', args=(self.survey.id,))
answer_count = Answer.objects.count()
self.assertTrue(answer_count > 0)
response = self.client.get(url)
self.assertEquals(Answer.objects.count(), 0)
def test_respond_on_closed_interview(self):
self.interview.closure_date = timezone.now()
self.interview.save()
self.assertEquals(self.interview.respond(), None)
def test_respond_start_question_interview(self):
self._create_ussd_group_questions()
self.assertEquals(self.interview.respond(),
self.qset.g_first_question.display_text(channel=ODKAccess.choice_name()))
class InterviewsTestExtra(SurveyBaseTest):
def test_first_question_is_loop_first(self):
self._create_ussd_group_questions()
# test first question is group first
self.assertEquals(self.interview.respond(),
self.qset.g_first_question.display_text(channel=ODKAccess.choice_name(), context={}))
# test running again gives same results
self.assertEquals(self.interview.respond(),
self.qset.g_first_question.display_text(channel=ODKAccess.choice_name(), context={}))
def test_interviews_in_exclude_self(self):
location = self.ea.locations.first()
interviews = Interview.interviews_in(location.parent)
self.assertTrue(interviews.filter(id=self.interview.id).exists())
def test_answers_unicode_rep(self):
self._create_ussd_non_group_questions()
n_question = Question.objects.filter(answer_type=NumericalAnswer.choice_name()).first()
answer = NumericalAnswer.create(self.interview, n_question, 1)
self.assertEquals(str(answer.as_text), unicode(answer))
# test update (since numeric makes use of thr parent implementation)
answer.update(2)
self.assertEquals(answer.as_value, 2)
# just test to label also :)
self.assertEquals(answer.to_label(), 2)
#test to pretty_print
self.assertEquals(str(answer.pretty_print()), '2')
def test_get_answer_class_with_doesnt_exist(self):
self.assertRaises(ValueError, Answer.get_class, 'Fake_Anwer')
def _prep_answers(self):
self._create_test_non_group_questions(self.qset)
answers = []
n_quest = Question.objects.get(answer_type=NumericalAnswer.choice_name())
t_quest = Question.objects.get(answer_type=TextAnswer.choice_name())
m_quest = Question.objects.get(answer_type=MultiChoiceAnswer.choice_name())
# first is numeric, then text, then multichioice
answers = [{n_quest.id: 1, t_quest.id: 'Hey Man', m_quest.id: 'Y'},
{n_quest.id: 5, t_quest.id: 'Our Hey Boy', m_quest.id: 'Y'},
{n_quest.id: 27, t_quest.id: 'Hey Girl!', m_quest.id: 'N'},
{n_quest.id: 12, t_quest.id: 'Hey Raster!', m_quest.id: 'N'},
{n_quest.id: 19, t_quest.id: 'This bad boy'}
]
question_map = {n_quest.id: n_quest, t_quest.id: t_quest, m_quest.id: m_quest}
interview = self.interview
interviews = Interview.save_answers(self.qset, self.survey, self.ea,
self.access_channel, question_map, answers)
# confirm that 11 answers has been created
self.assertEquals(NumericalAnswer.objects.count(), 5)
self.assertEquals(TextAnswer.objects.count(), 5)
self.assertEquals(MultiChoiceAnswer.objects.count(), 4)
self.assertEquals(TextAnswer.objects.first().to_text().lower(), 'Hey Man'.lower())
self.assertEquals(MultiChoiceAnswer.objects.first().as_text.lower(), 'Y'.lower())
multichoice = MultiChoiceAnswer.objects.first()
self.assertEquals(multichoice.as_value,
str(QuestionOption.objects.get(text='Y', question=multichoice.question).order))
return Interview.objects.filter(id__in=[i.id for i in interviews])
def test_answer_qs_filters(self):
interviews = self._prep_answers()
fetched_interviews = Answer.fetch_contains('answer__as_value', 'Hey', qs=interviews) # 4 intervies meet this
self.assertEquals(fetched_interviews.count(), 4)
fetched_interviews = Answer.fetch_starts_with('answer__as_value', 'Hey', qs=interviews) # 3 intervies meet this
self.assertEquals(fetched_interviews.count(), 3)
fetched_interviews = Answer.fetch_ends_with('answer__as_value', 'boy', qs=interviews) # 2 intervies meet this
self.assertEquals(fetched_interviews.count(), 2)
fetched_answers = Answer.fetch_contains('as_value', 'boy') # 2 intervies meet this
self.assertEquals(fetched_answers.count(), 2)
fetched_answers = Answer.fetch_starts_with('as_value', 'This') # 1 intervies meet this
self.assertEquals(fetched_answers.count(), 1)
fetched_answers = Answer.fetch_ends_with('as_value', 'boy') # 2 intervies meet this
self.assertEquals(fetched_answers.count(), 2)
def test_odk_answer_methods(self):
# test odk contain
path = '/qset/qset1/surveyQuestions/q1'
value = 'me doing somthing'
self.assertEquals(Answer.odk_contains(path, value), "regex(%s, '.*(%s).*')" % (path, value))
self.assertEquals(Answer.odk_starts_with(path, value), "regex(%s, '^(%s).*')" % (path, value))
self.assertEquals(Answer.odk_ends_with(path, value), "regex(%s, '.*(%s)$')" % (path, value))
value = 4
upperlmt = 10
self.assertEquals(Answer.odk_greater_than(path, value), "%s > '%s'" % (path, value))
self.assertEquals(Answer.odk_less_than(path, value), "%s < '%s'" % (path, value))
self.assertEquals(Answer.odk_between(path, value, upperlmt),
"(%s > '%s') and (%s <= '%s')" % (path, value, path, upperlmt))
self.assertEquals(NumericalAnswer.odk_less_than(path, value), "%s < %s" % (path, value))
self.assertEquals(NumericalAnswer.odk_between(path, value, upperlmt),
"(%s > %s) and (%s <= %s)" % (path, value, path, upperlmt))
value = '20-07-2017'
self.assertEquals(DateAnswer.odk_greater_than(path, value),
"%s > %s" % (path, DateAnswer.to_odk_date(value)))
self.assertEquals(DateAnswer.odk_less_than(path, value),
"%s < %s" % (path, DateAnswer.to_odk_date(value)))
upperlmt = '25-08-2017'
self.assertEquals(DateAnswer.odk_between(path, value, upperlmt),
"(%s > %s) and (%s <= %s)" % (path,
DateAnswer.to_odk_date(value),
path, DateAnswer.to_odk_date(upperlmt)))
def test_answer_value_methods(self):
value = 'me doing somthing'
test_answer1 = 'nothing good'
self.assertFalse(Answer.equals(test_answer1, value))
self.assertTrue(Answer.equals(value, value))
self.assertTrue(Answer.starts_with(value, 'me d'))
self.assertFalse(Answer.ends_with(value, 'no thing'))
self.assertTrue(Answer.ends_with(value, 'somthing'))
self.assertFalse(Answer.greater_than(5, 9))
self.assertTrue(Answer.greater_than(9, 5))
self.assertTrue(Answer.less_than(5, 9))
self.assertFalse(Answer.less_than(9, 5))
self.assertFalse(Answer.between(9, 5, 7))
self.assertTrue(Answer.between(9, 5, 11))
self.assertTrue(Answer.passes_test('17 > 10'))
self.assertFalse(NumericalAnswer.greater_than(5, 9))
self.assertTrue(NumericalAnswer.greater_than(9, 5))
self.assertTrue(NumericalAnswer.less_than(5, 9))
self.assertFalse(NumericalAnswer.less_than(9, 5))
self.assertFalse(NumericalAnswer.between(9, 5, 7))
self.assertTrue(NumericalAnswer.between(9, 5, 11))
self.assertFalse(TextAnswer.equals(test_answer1, value))
self.assertTrue(TextAnswer.equals(value, value))
self.assertFalse(MultiChoiceAnswer.equals(test_answer1, value))
self.assertTrue(MultiChoiceAnswer.equals(value, value))
self.assertFalse(MultiSelectAnswer.equals(test_answer1, value))
self.assertTrue(MultiSelectAnswer.equals(value, value))
self.assertFalse(DateAnswer.greater_than('12-09-2017', '12-09-2017'))
self.assertTrue(DateAnswer.greater_than('13-09-2017', '12-09-2017'))
self.assertFalse(DateAnswer.less_than('18-09-2017', '12-09-2017'))
self.assertTrue(DateAnswer.less_than('13-09-2017', '17-09-2017'))
self.assertFalse(DateAnswer.between('18-09-2017', '12-09-2017', '16-09-2017'))
self.assertTrue(DateAnswer.between('14-09-2017', '12-09-2017', '16-09-2017'))
def test_other_answer_methods(self):
interviews = self._prep_answers()
m_answer = MultiChoiceAnswer.objects.last()
self.assertEqual(m_answer.pretty_print(as_label=False), m_answer.value.text)
self.assertEqual(m_answer.pretty_print(as_label=True), m_answer.value.order)
multiselect_question = Question.objects.filter(answer_type=MultiSelectAnswer.choice_name()).last()
MultiSelectAnswer.create(self.interview, multiselect_question, 'Y N')
self.assertEqual(MultiSelectAnswer.objects.count(), 1)
multiselect = MultiSelectAnswer.objects.last()
self.assertEqual(multiselect.to_text(), ' and '.join(['Y', 'N']))
self.assertEqual(multiselect.to_label(), ' and '.join(['1', '2']))
self.assertEqual(multiselect.pretty_print(as_label=False), multiselect.to_text())
self.assertEqual(multiselect.pretty_print(as_label=True), multiselect.to_label())
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import forms
from django import http
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.test import helpers as test
from horizon import workflows
PROJECT_ID = "a23lkjre389fwenj"
INSTANCE_ID = "sdlkjhf9832roiw"
def local_callback_func(request, context):
return "one"
def other_callback_func(request, context):
return "two"
def extra_callback_func(request, context):
return "extra"
class TestActionOne(workflows.Action):
project_id = forms.ChoiceField(label=_("Project"))
user_id = forms.ChoiceField(label=_("User"))
class Meta:
name = _("Test Action One")
slug = "test_action_one"
def populate_project_id_choices(self, request, context):
return [(PROJECT_ID, "test_project")]
def populate_user_id_choices(self, request, context):
return [(request.user.id, request.user.username)]
def handle(self, request, context):
return {"foo": "bar"}
class TestActionTwo(workflows.Action):
instance_id = forms.CharField(label=_("Instance"))
class Meta:
name = _("Test Action Two")
slug = "test_action_two"
class TestActionThree(workflows.Action):
extra = forms.CharField(widget=forms.widgets.Textarea)
class Meta:
name = _("Test Action Three")
slug = "test_action_three"
class AdminAction(workflows.Action):
admin_id = forms.CharField(label=_("Admin"))
class Meta:
name = _("Admin Action")
slug = "admin_action"
permissions = ("horizon.test",)
class TestStepOne(workflows.Step):
action_class = TestActionOne
contributes = ("project_id", "user_id")
class TestStepTwo(workflows.Step):
action_class = TestActionTwo
depends_on = ("project_id",)
contributes = ("instance_id",)
connections = {"project_id": (local_callback_func,
"horizon.test.tests.workflows.other_callback_func")}
class TestExtraStep(workflows.Step):
action_class = TestActionThree
depends_on = ("project_id",)
contributes = ("extra_data",)
connections = {"project_id": (extra_callback_func,)}
after = TestStepOne
before = TestStepTwo
class AdminStep(workflows.Step):
action_class = AdminAction
contributes = ("admin_id",)
after = TestStepOne
before = TestStepTwo
class TestWorkflow(workflows.Workflow):
slug = "test_workflow"
default_steps = (TestStepOne, TestStepTwo)
class TestWorkflowView(workflows.WorkflowView):
workflow_class = TestWorkflow
template_name = "workflow.html"
class WorkflowsTests(test.TestCase):
def setUp(self):
super(WorkflowsTests, self).setUp()
def tearDown(self):
super(WorkflowsTests, self).tearDown()
self._reset_workflow()
def _reset_workflow(self):
TestWorkflow._cls_registry = set([])
def test_workflow_construction(self):
TestWorkflow.register(TestExtraStep)
flow = TestWorkflow(self.request)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestExtraStep: test_action_three>',
'<TestStepTwo: test_action_two>'])
self.assertEqual(flow.depends_on, set(['project_id']))
def test_step_construction(self):
step_one = TestStepOne(TestWorkflow(self.request))
# Action slug is moved from Meta by metaclass, and
# Step inherits slug from action.
self.assertEqual(step_one.name, TestActionOne.name)
self.assertEqual(step_one.slug, TestActionOne.slug)
# Handlers should be empty since there are no connections.
self.assertEqual(step_one._handlers, {})
step_two = TestStepTwo(TestWorkflow(self.request))
# Handlers should be populated since we do have connections.
self.assertEqual(step_two._handlers["project_id"],
[local_callback_func, other_callback_func])
def test_step_invalid_callback(self):
# This should raise an exception
class InvalidStep(TestStepTwo):
connections = {"project_id": ('local_callback_func',)}
with self.assertRaises(ValueError):
InvalidStep(TestWorkflow(self.request))
def test_connection_handlers_called(self):
TestWorkflow.register(TestExtraStep)
flow = TestWorkflow(self.request)
# This should set the value without any errors, but trigger nothing
flow.context['does_not_exist'] = False
self.assertEqual(flow.context['does_not_exist'], False)
# The order here is relevant. Note that we inserted "extra" between
# steps one and two, and one has no handlers, so we should see
# a response from extra, then one from each of step two's handlers.
val = flow.context.set('project_id', PROJECT_ID)
self.assertEqual(val, [('test_action_three', 'extra'),
('test_action_two', 'one'),
('test_action_two', 'two')])
def test_workflow_validation(self):
flow = TestWorkflow(self.request)
# Missing items fail validation.
with self.assertRaises(exceptions.WorkflowValidationError):
flow.is_valid()
# All required items pass validation.
seed = {"project_id": PROJECT_ID,
"user_id": self.user.id,
"instance_id": INSTANCE_ID}
req = self.factory.post("/", seed)
req.user = self.user
flow = TestWorkflow(req, context_seed={"project_id": PROJECT_ID})
for step in flow.steps:
if not step.action.is_valid():
self.fail("Step %s was unexpectedly invalid: %s"
% (step.slug, step.action.errors))
self.assertTrue(flow.is_valid())
# Additional items shouldn't affect validation
flow.context.set("extra_data", "foo")
self.assertTrue(flow.is_valid())
def test_workflow_finalization(self):
flow = TestWorkflow(self.request)
self.assertTrue(flow.finalize())
def test_workflow_view(self):
view = TestWorkflowView.as_view()
req = self.factory.get("/")
res = view(req)
self.assertEqual(res.status_code, 200)
def test_workflow_registration(self):
req = self.factory.get("/foo")
flow = TestWorkflow(req)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestStepTwo: test_action_two>'])
TestWorkflow.register(TestExtraStep)
flow = TestWorkflow(req)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestExtraStep: test_action_three>',
'<TestStepTwo: test_action_two>'])
def test_workflow_render(self):
TestWorkflow.register(TestExtraStep)
req = self.factory.get("/foo")
flow = TestWorkflow(req)
output = http.HttpResponse(flow.render())
self.assertContains(output, unicode(flow.name))
self.assertContains(output, unicode(TestActionOne.name))
self.assertContains(output, unicode(TestActionTwo.name))
self.assertContains(output, unicode(TestActionThree.name))
def test_has_permissions(self):
self.assertQuerysetEqual(TestWorkflow._cls_registry, [])
TestWorkflow.register(AdminStep)
flow = TestWorkflow(self.request)
step = AdminStep(flow)
self.assertItemsEqual(step.permissions,
("horizon.test",))
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<TestStepTwo: test_action_two>'])
self.set_permissions(['test'])
self.request.user = self.user
flow = TestWorkflow(self.request)
self.assertQuerysetEqual(flow.steps,
['<TestStepOne: test_action_one>',
'<AdminStep: admin_action>',
'<TestStepTwo: test_action_two>'])
def test_entry_point(self):
req = self.factory.get("/foo")
flow = TestWorkflow(req)
self.assertEqual(flow.get_entry_point(), "test_action_one")
flow = TestWorkflow(req, entry_point="test_action_two")
self.assertEqual(flow.get_entry_point(), "test_action_two")
| |
"""Support for Sonarr."""
from datetime import datetime
import logging
import time
from pytz import timezone
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_PORT,
CONF_SSL,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_DAYS = "days"
CONF_INCLUDED = "include_paths"
CONF_UNIT = "unit"
CONF_URLBASE = "urlbase"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8989
DEFAULT_URLBASE = ""
DEFAULT_DAYS = "1"
DEFAULT_UNIT = "GB"
SENSOR_TYPES = {
"diskspace": ["Disk Space", "GB", "mdi:harddisk"],
"queue": ["Queue", "Episodes", "mdi:download"],
"upcoming": ["Upcoming", "Episodes", "mdi:television"],
"wanted": ["Wanted", "Episodes", "mdi:television"],
"series": ["Series", "Shows", "mdi:television"],
"commands": ["Commands", "Commands", "mdi:code-braces"],
"status": ["Status", "Status", "mdi:information"],
}
ENDPOINTS = {
"diskspace": "http{0}://{1}:{2}/{3}api/diskspace",
"queue": "http{0}://{1}:{2}/{3}api/queue",
"upcoming": "http{0}://{1}:{2}/{3}api/calendar?start={4}&end={5}",
"wanted": "http{0}://{1}:{2}/{3}api/wanted/missing",
"series": "http{0}://{1}:{2}/{3}api/series",
"commands": "http{0}://{1}:{2}/{3}api/command",
"status": "http{0}://{1}:{2}/{3}api/system/status",
}
# Support to Yottabytes for the future, why not
BYTE_SIZES = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_DAYS, default=DEFAULT_DAYS): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_INCLUDED, default=[]): cv.ensure_list,
vol.Optional(CONF_MONITORED_CONDITIONS, default=["upcoming"]): vol.All(
cv.ensure_list, [vol.In(list(SENSOR_TYPES))]
),
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_UNIT, default=DEFAULT_UNIT): vol.In(BYTE_SIZES),
vol.Optional(CONF_URLBASE, default=DEFAULT_URLBASE): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Sonarr platform."""
conditions = config.get(CONF_MONITORED_CONDITIONS)
add_entities([SonarrSensor(hass, config, sensor) for sensor in conditions], True)
class SonarrSensor(Entity):
"""Implementation of the Sonarr sensor."""
def __init__(self, hass, conf, sensor_type):
"""Create Sonarr entity."""
self.conf = conf
self.host = conf.get(CONF_HOST)
self.port = conf.get(CONF_PORT)
self.urlbase = conf.get(CONF_URLBASE)
if self.urlbase:
self.urlbase = "{}/".format(self.urlbase.strip("/"))
self.apikey = conf.get(CONF_API_KEY)
self.included = conf.get(CONF_INCLUDED)
self.days = int(conf.get(CONF_DAYS))
self.ssl = "s" if conf.get(CONF_SSL) else ""
self._state = None
self.data = []
self._tz = timezone(str(hass.config.time_zone))
self.type = sensor_type
self._name = SENSOR_TYPES[self.type][0]
if self.type == "diskspace":
self._unit = conf.get(CONF_UNIT)
else:
self._unit = SENSOR_TYPES[self.type][1]
self._icon = SENSOR_TYPES[self.type][2]
self._available = False
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format("Sonarr", self._name)
@property
def state(self):
"""Return sensor state."""
return self._state
@property
def available(self):
"""Return sensor availability."""
return self._available
@property
def unit_of_measurement(self):
"""Return the unit of the sensor."""
return self._unit
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
attributes = {}
if self.type == "upcoming":
for show in self.data:
attributes[show["series"]["title"]] = "S{:02d}E{:02d}".format(
show["seasonNumber"], show["episodeNumber"]
)
elif self.type == "queue":
for show in self.data:
remaining = 1 if show["size"] == 0 else show["sizeleft"] / show["size"]
attributes[
show["series"]["title"]
+ " S{:02d}E{:02d}".format(
show["episode"]["seasonNumber"],
show["episode"]["episodeNumber"],
)
] = "{:.2f}%".format(100 * (1 - (remaining)))
elif self.type == "wanted":
for show in self.data:
attributes[
show["series"]["title"]
+ " S{:02d}E{:02d}".format(
show["seasonNumber"], show["episodeNumber"]
)
] = show["airDate"]
elif self.type == "commands":
for command in self.data:
attributes[command["name"]] = command["state"]
elif self.type == "diskspace":
for data in self.data:
attributes[data["path"]] = "{:.2f}/{:.2f}{} ({:.2f}%)".format(
to_unit(data["freeSpace"], self._unit),
to_unit(data["totalSpace"], self._unit),
self._unit,
(
to_unit(data["freeSpace"], self._unit)
/ to_unit(data["totalSpace"], self._unit)
* 100
),
)
elif self.type == "series":
for show in self.data:
if "episodeFileCount" not in show or "episodeCount" not in show:
attributes[show["title"]] = "N/A"
else:
attributes[show["title"]] = "{}/{} Episodes".format(
show["episodeFileCount"], show["episodeCount"]
)
elif self.type == "status":
attributes = self.data
return attributes
@property
def icon(self):
"""Return the icon of the sensor."""
return self._icon
def update(self):
"""Update the data for the sensor."""
start = get_date(self._tz)
end = get_date(self._tz, self.days)
try:
res = requests.get(
ENDPOINTS[self.type].format(
self.ssl, self.host, self.port, self.urlbase, start, end
),
headers={"X-Api-Key": self.apikey},
timeout=10,
)
except OSError:
_LOGGER.warning("Host %s is not available", self.host)
self._available = False
self._state = None
return
if res.status_code == 200:
if self.type in ["upcoming", "queue", "series", "commands"]:
if self.days == 1 and self.type == "upcoming":
# Sonarr API returns an empty array if start and end dates
# are the same, so we need to filter to just today
self.data = list(
filter(lambda x: x["airDate"] == str(start), res.json())
)
else:
self.data = res.json()
self._state = len(self.data)
elif self.type == "wanted":
data = res.json()
res = requests.get(
"{}?pageSize={}".format(
ENDPOINTS[self.type].format(
self.ssl, self.host, self.port, self.urlbase
),
data["totalRecords"],
),
headers={"X-Api-Key": self.apikey},
timeout=10,
)
self.data = res.json()["records"]
self._state = len(self.data)
elif self.type == "diskspace":
# If included paths are not provided, use all data
if self.included == []:
self.data = res.json()
else:
# Filter to only show lists that are included
self.data = list(
filter(lambda x: x["path"] in self.included, res.json())
)
self._state = "{:.2f}".format(
to_unit(sum([data["freeSpace"] for data in self.data]), self._unit)
)
elif self.type == "status":
self.data = res.json()
self._state = self.data["version"]
self._available = True
def get_date(zone, offset=0):
"""Get date based on timezone and offset of days."""
day = 60 * 60 * 24
return datetime.date(datetime.fromtimestamp(time.time() + day * offset, tz=zone))
def to_unit(value, unit):
"""Convert bytes to give unit."""
return value / 1024 ** BYTE_SIZES.index(unit)
| |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
import tempfile
from typing import List, Any, Optional
from py4j.java_gateway import JavaObject
from pyflink.common import Configuration, WatermarkStrategy
from pyflink.common.execution_config import ExecutionConfig
from pyflink.common.job_client import JobClient
from pyflink.common.job_execution_result import JobExecutionResult
from pyflink.common.restart_strategy import RestartStrategies, RestartStrategyConfiguration
from pyflink.common.typeinfo import TypeInformation, Types
from pyflink.datastream import SlotSharingGroup
from pyflink.datastream.checkpoint_config import CheckpointConfig
from pyflink.datastream.checkpointing_mode import CheckpointingMode
from pyflink.datastream.connectors import Source
from pyflink.datastream.data_stream import DataStream
from pyflink.datastream.execution_mode import RuntimeExecutionMode
from pyflink.datastream.functions import SourceFunction
from pyflink.datastream.state_backend import _from_j_state_backend, StateBackend
from pyflink.datastream.time_characteristic import TimeCharacteristic
from pyflink.java_gateway import get_gateway
from pyflink.serializers import PickleSerializer
from pyflink.util.java_utils import load_java_class, add_jars_to_context_class_loader, \
invoke_method, get_field_value, is_local_deployment, get_j_env_configuration
__all__ = ['StreamExecutionEnvironment']
class StreamExecutionEnvironment(object):
"""
The StreamExecutionEnvironment is the context in which a streaming program is executed. A
*LocalStreamEnvironment* will cause execution in the attached JVM, a
*RemoteStreamEnvironment* will cause execution on a remote setup.
The environment provides methods to control the job execution (such as setting the parallelism
or the fault tolerance/checkpointing parameters) and to interact with the outside world (data
access).
"""
def __init__(self, j_stream_execution_environment, serializer=PickleSerializer()):
self._j_stream_execution_environment = j_stream_execution_environment
self.serializer = serializer
self._open()
def get_config(self) -> ExecutionConfig:
"""
Gets the config object.
:return: The :class:`~pyflink.common.ExecutionConfig` object.
"""
return ExecutionConfig(self._j_stream_execution_environment.getConfig())
def set_parallelism(self, parallelism: int) -> 'StreamExecutionEnvironment':
"""
Sets the parallelism for operations executed through this environment.
Setting a parallelism of x here will cause all operators (such as map,
batchReduce) to run with x parallel instances. This method overrides the
default parallelism for this environment. The
*LocalStreamEnvironment* uses by default a value equal to the
number of hardware contexts (CPU cores / threads). When executing the
program via the command line client from a JAR file, the default degree
of parallelism is the one configured for that setup.
:param parallelism: The parallelism.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setParallelism(parallelism)
return self
def set_max_parallelism(self, max_parallelism: int) -> 'StreamExecutionEnvironment':
"""
Sets the maximum degree of parallelism defined for the program. The upper limit (inclusive)
is 32767.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:param max_parallelism: Maximum degree of parallelism to be used for the program,
with 0 < maxParallelism <= 2^15 - 1.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setMaxParallelism(max_parallelism)
return self
def register_slot_sharing_group(self, slot_sharing_group: SlotSharingGroup) \
-> 'StreamExecutionEnvironment':
"""
Register a slot sharing group with its resource spec.
Note that a slot sharing group hints the scheduler that the grouped operators CAN be
deployed into a shared slot. There's no guarantee that the scheduler always deploy the
grouped operators together. In cases grouped operators are deployed into separate slots, the
slot resources will be derived from the specified group requirements.
:param slot_sharing_group: Which contains name and its resource spec.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.registerSlotSharingGroup(
slot_sharing_group.get_java_slot_sharing_group())
return self
def get_parallelism(self) -> int:
"""
Gets the parallelism with which operation are executed by default.
Operations can individually override this value to use a specific
parallelism.
:return: The parallelism used by operations, unless they override that value.
"""
return self._j_stream_execution_environment.getParallelism()
def get_max_parallelism(self) -> int:
"""
Gets the maximum degree of parallelism defined for the program.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:return: Maximum degree of parallelism.
"""
return self._j_stream_execution_environment.getMaxParallelism()
def set_runtime_mode(self, execution_mode: RuntimeExecutionMode):
"""
Sets the runtime execution mode for the application
:class:`~pyflink.datastream.execution_mode.RuntimeExecutionMode`. This
is equivalent to setting the `execution.runtime-mode` in your application's
configuration file.
We recommend users to NOT use this method but set the `execution.runtime-mode` using
the command-line when submitting the application. Keeping the application code
configuration-free allows for more flexibility as the same application will be able to be
executed in any execution mode.
:param execution_mode: The desired execution mode.
:return: The execution environment of your application.
.. versionadded:: 1.13.0
"""
return self._j_stream_execution_environment.setRuntimeMode(
execution_mode._to_j_execution_mode())
def set_buffer_timeout(self, timeout_millis: int) -> 'StreamExecutionEnvironment':
"""
Sets the maximum time frequency (milliseconds) for the flushing of the
output buffers. By default the output buffers flush frequently to provide
low latency and to aid smooth developer experience. Setting the parameter
can result in three logical modes:
- A positive integer triggers flushing periodically by that integer
- 0 triggers flushing after every record thus minimizing latency
- -1 triggers flushing only when the output buffer is full thus maximizing throughput
:param timeout_millis: The maximum time between two output flushes.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setBufferTimeout(timeout_millis)
return self
def get_buffer_timeout(self) -> int:
"""
Gets the maximum time frequency (milliseconds) for the flushing of the
output buffers. For clarification on the extremal values see
:func:`set_buffer_timeout`.
:return: The timeout of the buffer.
"""
return self._j_stream_execution_environment.getBufferTimeout()
def disable_operator_chaining(self) -> 'StreamExecutionEnvironment':
"""
Disables operator chaining for streaming operators. Operator chaining
allows non-shuffle operations to be co-located in the same thread fully
avoiding serialization and de-serialization.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.disableOperatorChaining()
return self
def is_chaining_enabled(self) -> bool:
"""
Returns whether operator chaining is enabled.
:return: True if chaining is enabled, false otherwise.
"""
return self._j_stream_execution_environment.isChainingEnabled()
def get_checkpoint_config(self) -> CheckpointConfig:
"""
Gets the checkpoint config, which defines values like checkpoint interval, delay between
checkpoints, etc.
:return: The :class:`~pyflink.datastream.CheckpointConfig`.
"""
j_checkpoint_config = self._j_stream_execution_environment.getCheckpointConfig()
return CheckpointConfig(j_checkpoint_config)
def enable_checkpointing(self, interval: int, mode: CheckpointingMode = None) \
-> 'StreamExecutionEnvironment':
"""
Enables checkpointing for the streaming job. The distributed state of the streaming
dataflow will be periodically snapshotted. In case of a failure, the streaming
dataflow will be restarted from the latest completed checkpoint.
The job draws checkpoints periodically, in the given interval. The system uses the
given :class:`~pyflink.datastream.CheckpointingMode` for the checkpointing ("exactly once"
vs "at least once"). The state will be stored in the configured state backend.
.. note::
Checkpointing iterative streaming dataflows in not properly supported at
the moment. For that reason, iterative jobs will not be started if used
with enabled checkpointing.
Example:
::
>>> env.enable_checkpointing(300000, CheckpointingMode.AT_LEAST_ONCE)
:param interval: Time interval between state checkpoints in milliseconds.
:param mode: The checkpointing mode, selecting between "exactly once" and "at least once"
guaranteed.
:return: This object.
"""
if mode is None:
self._j_stream_execution_environment = \
self._j_stream_execution_environment.enableCheckpointing(interval)
else:
j_checkpointing_mode = CheckpointingMode._to_j_checkpointing_mode(mode)
self._j_stream_execution_environment.enableCheckpointing(
interval,
j_checkpointing_mode)
return self
def get_checkpoint_interval(self) -> int:
"""
Returns the checkpointing interval or -1 if checkpointing is disabled.
Shorthand for get_checkpoint_config().get_checkpoint_interval().
:return: The checkpointing interval or -1.
"""
return self._j_stream_execution_environment.getCheckpointInterval()
def get_checkpointing_mode(self) -> CheckpointingMode:
"""
Returns the checkpointing mode (exactly-once vs. at-least-once).
Shorthand for get_checkpoint_config().get_checkpointing_mode().
:return: The :class:`~pyflink.datastream.CheckpointingMode`.
"""
j_checkpointing_mode = self._j_stream_execution_environment.getCheckpointingMode()
return CheckpointingMode._from_j_checkpointing_mode(j_checkpointing_mode)
def get_state_backend(self) -> StateBackend:
"""
Gets the state backend that defines how to store and checkpoint state.
.. seealso:: :func:`set_state_backend`
:return: The :class:`StateBackend`.
"""
j_state_backend = self._j_stream_execution_environment.getStateBackend()
return _from_j_state_backend(j_state_backend)
def set_state_backend(self, state_backend: StateBackend) -> 'StreamExecutionEnvironment':
"""
Sets the state backend that describes how to store and checkpoint operator state. It
defines both which data structures hold state during execution (for example hash tables,
RockDB, or other data stores) as well as where checkpointed data will be persisted.
The :class:`~pyflink.datastream.MemoryStateBackend` for example maintains the state in heap
memory, as objects. It is lightweight without extra dependencies, but can checkpoint only
small states(some counters).
In contrast, the :class:`~pyflink.datastream.FsStateBackend` stores checkpoints of the state
(also maintained as heap objects) in files. When using a replicated file system (like HDFS,
S3, Alluxio, etc) this will guarantee that state is not lost upon failures of
individual nodes and that streaming program can be executed highly available and strongly
consistent(assuming that Flink is run in high-availability mode).
The build-in state backend includes:
:class:`~pyflink.datastream.MemoryStateBackend`,
:class:`~pyflink.datastream.FsStateBackend`
and :class:`~pyflink.datastream.RocksDBStateBackend`.
.. seealso:: :func:`get_state_backend`
Example:
::
>>> env.set_state_backend(EmbeddedRocksDBStateBackend())
:param state_backend: The :class:`StateBackend`.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setStateBackend(state_backend._j_state_backend)
return self
def enable_changelog_state_backend(self, enabled: bool) -> 'StreamExecutionEnvironment':
"""
Enable the change log for current state backend. This change log allows operators to persist
state changes in a very fine-grained manner. Currently, the change log only applies to keyed
state, so non-keyed operator state and channel state are persisted as usual. The 'state'
here refers to 'keyed state'. Details are as follows:
* Stateful operators write the state changes to that log (logging the state), in addition \
to applying them to the state tables in RocksDB or the in-mem Hashtable.
* An operator can acknowledge a checkpoint as soon as the changes in the log have reached \
the durable checkpoint storage.
* The state tables are persisted periodically, independent of the checkpoints. We call \
this the materialization of the state on the checkpoint storage.
* Once the state is materialized on checkpoint storage, the state changelog can be \
truncated to the corresponding point.
It establish a way to drastically reduce the checkpoint interval for streaming
applications across state backends. For more details please check the FLIP-158.
If this method is not called explicitly, it means no preference for enabling the change
log. Configs for change log enabling will override in different config levels
(job/local/cluster).
.. seealso:: :func:`is_changelog_state_backend_enabled`
:param enabled: True if enable the change log for state backend explicitly, otherwise
disable the change log.
:return: This object.
.. versionadded:: 1.14.0
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.enableChangelogStateBackend(enabled)
return self
def is_changelog_state_backend_enabled(self) -> Optional[bool]:
"""
Gets the enable status of change log for state backend.
.. seealso:: :func:`enable_changelog_state_backend`
:return: An :class:`Optional[bool]` for the enable status of change log for state backend.
Could be None if user never specify this by calling
:func:`enable_changelog_state_backend`.
.. versionadded:: 1.14.0
"""
j_ternary_boolean = self._j_stream_execution_environment.isChangelogStateBackendEnabled()
return j_ternary_boolean.getAsBoolean()
def set_default_savepoint_directory(self, directory: str) -> 'StreamExecutionEnvironment':
"""
Sets the default savepoint directory, where savepoints will be written to if none
is explicitly provided when triggered.
Example:
::
>>> env.set_default_savepoint_directory("hdfs://savepoints")
:param directory The savepoint directory
:return: This object.
"""
self._j_stream_execution_environment.setDefaultSavepointDirectory(directory)
return self
def get_default_savepoint_directory(self) -> Optional[str]:
"""
Gets the default savepoint directory for this Job.
"""
j_path = self._j_stream_execution_environment.getDefaultSavepointDirectory()
if j_path is None:
return None
else:
return j_path.toString()
def set_restart_strategy(self, restart_strategy_configuration: RestartStrategyConfiguration):
"""
Sets the restart strategy configuration. The configuration specifies which restart strategy
will be used for the execution graph in case of a restart.
Example:
::
>>> env.set_restart_strategy(RestartStrategies.no_restart())
:param restart_strategy_configuration: Restart strategy configuration to be set.
:return:
"""
self._j_stream_execution_environment.setRestartStrategy(
restart_strategy_configuration._j_restart_strategy_configuration)
def get_restart_strategy(self) -> RestartStrategyConfiguration:
"""
Returns the specified restart strategy configuration.
:return: The restart strategy configuration to be used.
"""
return RestartStrategies._from_j_restart_strategy(
self._j_stream_execution_environment.getRestartStrategy())
def add_default_kryo_serializer(self, type_class_name: str, serializer_class_name: str):
"""
Adds a new Kryo default serializer to the Runtime.
Example:
::
>>> env.add_default_kryo_serializer("com.aaa.bbb.TypeClass", "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with the
given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_stream_execution_environment.addDefaultKryoSerializer(type_clz, j_serializer_clz)
def register_type_with_kryo_serializer(self, type_class_name: str, serializer_class_name: str):
"""
Registers the given Serializer via its class as a serializer for the given type at the
KryoSerializer.
Example:
::
>>> env.register_type_with_kryo_serializer("com.aaa.bbb.TypeClass",
... "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with
the given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_stream_execution_environment.registerTypeWithKryoSerializer(
type_clz, j_serializer_clz)
def register_type(self, type_class_name: str):
"""
Registers the given type with the serialization stack. If the type is eventually
serialized as a POJO, then the type is registered with the POJO serializer. If the
type ends up being serialized with Kryo, then it will be registered at Kryo to make
sure that only tags are written.
Example:
::
>>> env.register_type("com.aaa.bbb.TypeClass")
:param type_class_name: The full-qualified java class name of the type to register.
"""
type_clz = load_java_class(type_class_name)
self._j_stream_execution_environment.registerType(type_clz)
def set_stream_time_characteristic(self, characteristic: TimeCharacteristic):
"""
Sets the time characteristic for all streams create from this environment, e.g., processing
time, event time, or ingestion time.
If you set the characteristic to IngestionTime of EventTime this will set a default
watermark update interval of 200 ms. If this is not applicable for your application
you should change it using
:func:`pyflink.common.ExecutionConfig.set_auto_watermark_interval`.
Example:
::
>>> env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
:param characteristic: The time characteristic, which could be
:data:`TimeCharacteristic.ProcessingTime`,
:data:`TimeCharacteristic.IngestionTime`,
:data:`TimeCharacteristic.EventTime`.
"""
j_characteristic = TimeCharacteristic._to_j_time_characteristic(characteristic)
self._j_stream_execution_environment.setStreamTimeCharacteristic(j_characteristic)
def get_stream_time_characteristic(self) -> 'TimeCharacteristic':
"""
Gets the time characteristic.
.. seealso:: :func:`set_stream_time_characteristic`
:return: The :class:`TimeCharacteristic`.
"""
j_characteristic = self._j_stream_execution_environment.getStreamTimeCharacteristic()
return TimeCharacteristic._from_j_time_characteristic(j_characteristic)
def configure(self, configuration: Configuration):
"""
Sets all relevant options contained in the :class:`~pyflink.common.Configuration`. such as
e.g. `pipeline.time-characteristic`. It will reconfigure
:class:`~pyflink.datastream.StreamExecutionEnvironment`,
:class:`~pyflink.common.ExecutionConfig` and :class:`~pyflink.datastream.CheckpointConfig`.
It will change the value of a setting only if a corresponding option was set in the
`configuration`. If a key is not present, the current value of a field will remain
untouched.
:param configuration: a configuration to read the values from.
.. versionadded:: 1.15.0
"""
self._j_stream_execution_environment.configure(configuration._j_configuration,
get_gateway().jvm.Thread.currentThread()
.getContextClassLoader())
def add_python_file(self, file_path: str):
"""
Adds a python dependency which could be python files, python packages or
local directories. They will be added to the PYTHONPATH of the python UDF worker.
Please make sure that these dependencies can be imported.
:param file_path: The path of the python dependency.
"""
jvm = get_gateway().jvm
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil\
.getEnvironmentConfig(self._j_stream_execution_environment)
python_files = env_config.getString(jvm.PythonOptions.PYTHON_FILES.key(), None)
if python_files is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join([file_path, python_files])
else:
python_files = file_path
env_config.setString(jvm.PythonOptions.PYTHON_FILES.key(), python_files)
def set_python_requirements(self, requirements_file_path: str,
requirements_cache_dir: str = None):
"""
Specifies a requirements.txt file which defines the third-party dependencies.
These dependencies will be installed to a temporary directory and added to the
PYTHONPATH of the python UDF worker.
For the dependencies which could not be accessed in the cluster, a directory which contains
the installation packages of these dependencies could be specified using the parameter
"requirements_cached_dir". It will be uploaded to the cluster to support offline
installation.
Example:
::
# commands executed in shell
$ echo numpy==1.16.5 > requirements.txt
$ pip download -d cached_dir -r requirements.txt --no-binary :all:
# python code
>>> stream_env.set_python_requirements("requirements.txt", "cached_dir")
.. note::
Please make sure the installation packages matches the platform of the cluster
and the python version used. These packages will be installed using pip,
so also make sure the version of Pip (version >= 7.1.0) and the version of
SetupTools (version >= 37.0.0).
:param requirements_file_path: The path of "requirements.txt" file.
:param requirements_cache_dir: The path of the local directory which contains the
installation packages.
"""
jvm = get_gateway().jvm
python_requirements = requirements_file_path
if requirements_cache_dir is not None:
python_requirements = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[python_requirements, requirements_cache_dir])
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
env_config.setString(jvm.PythonOptions.PYTHON_REQUIREMENTS.key(), python_requirements)
def add_python_archive(self, archive_path: str, target_dir: str = None):
"""
Adds a python archive file. The file will be extracted to the working directory of
python UDF worker.
If the parameter "target_dir" is specified, the archive file will be extracted to a
directory named ${target_dir}. Otherwise, the archive file will be extracted to a
directory with the same name of the archive file.
If python UDF depends on a specific python version which does not exist in the cluster,
this method can be used to upload the virtual environment.
Note that the path of the python interpreter contained in the uploaded environment
should be specified via the method :func:`pyflink.table.TableConfig.set_python_executable`.
The files uploaded via this method are also accessible in UDFs via relative path.
Example:
::
# command executed in shell
# assert the relative path of python interpreter is py_env/bin/python
$ zip -r py_env.zip py_env
# python code
>>> stream_env.add_python_archive("py_env.zip")
>>> stream_env.set_python_executable("py_env.zip/py_env/bin/python")
# or
>>> stream_env.add_python_archive("py_env.zip", "myenv")
>>> stream_env.set_python_executable("myenv/py_env/bin/python")
# the files contained in the archive file can be accessed in UDF
>>> def my_udf():
... with open("myenv/py_env/data/data.txt") as f:
... ...
.. note::
Please make sure the uploaded python environment matches the platform that the cluster
is running on and that the python version must be 3.6 or higher.
.. note::
Currently only zip-format is supported. i.e. zip, jar, whl, egg, etc.
The other archive formats such as tar, tar.gz, 7z, rar, etc are not supported.
:param archive_path: The archive file path.
:param target_dir: Optional, the target dir name that the archive file extracted to.
"""
jvm = get_gateway().jvm
if target_dir is not None:
archive_path = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[archive_path, target_dir])
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
python_archives = env_config.getString(jvm.PythonOptions.PYTHON_ARCHIVES.key(), None)
if python_archives is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join(
[python_archives, archive_path])
else:
python_files = archive_path
env_config.setString(jvm.PythonOptions.PYTHON_ARCHIVES.key(), python_files)
def set_python_executable(self, python_exec: str):
"""
Sets the path of the python interpreter which is used to execute the python udf workers.
e.g. "/usr/local/bin/python3".
If python UDF depends on a specific python version which does not exist in the cluster,
the method :func:`pyflink.datastream.StreamExecutionEnvironment.add_python_archive` can be
used to upload a virtual environment. The path of the python interpreter contained in the
uploaded environment can be specified via this method.
Example:
::
# command executed in shell
# assume that the relative path of python interpreter is py_env/bin/python
$ zip -r py_env.zip py_env
# python code
>>> stream_env.add_python_archive("py_env.zip")
>>> stream_env.set_python_executable("py_env.zip/py_env/bin/python")
.. note::
Please make sure the uploaded python environment matches the platform that the cluster
is running on and that the python version must be 3.6 or higher.
.. note::
The python udf worker depends on Apache Beam (version == 2.27.0).
Please ensure that the specified environment meets the above requirements.
:param python_exec: The path of python interpreter.
"""
jvm = get_gateway().jvm
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
env_config.setString(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), python_exec)
def add_jars(self, *jars_path: str):
"""
Adds a list of jar files that will be uploaded to the cluster and referenced by the job.
:param jars_path: Path of jars.
"""
add_jars_to_context_class_loader(jars_path)
jvm = get_gateway().jvm
jars_key = jvm.org.apache.flink.configuration.PipelineOptions.JARS.key()
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
old_jar_paths = env_config.getString(jars_key, None)
joined_jars_path = ';'.join(jars_path)
if old_jar_paths and old_jar_paths.strip():
joined_jars_path = ';'.join([old_jar_paths, joined_jars_path])
env_config.setString(jars_key, joined_jars_path)
def add_classpaths(self, *classpaths: str):
"""
Adds a list of URLs that are added to the classpath of each user code classloader of the
program. Paths must specify a protocol (e.g. file://) and be accessible on all nodes
:param classpaths: Classpaths that will be added.
"""
add_jars_to_context_class_loader(classpaths)
jvm = get_gateway().jvm
classpaths_key = jvm.org.apache.flink.configuration.PipelineOptions.CLASSPATHS.key()
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
old_classpaths = env_config.getString(classpaths_key, None)
joined_classpaths = ';'.join(list(classpaths))
if old_classpaths and old_classpaths.strip():
joined_classpaths = ';'.join([old_classpaths, joined_classpaths])
env_config.setString(classpaths_key, joined_classpaths)
def get_default_local_parallelism(self) -> int:
"""
Gets the default parallelism that will be used for the local execution environment.
:return: The default local parallelism.
"""
return self._j_stream_execution_environment.getDefaultLocalParallelism()
def set_default_local_parallelism(self, parallelism: int):
"""
Sets the default parallelism that will be used for the local execution environment.
:param parallelism: The parallelism to use as the default local parallelism.
"""
self._j_stream_execution_environment.setDefaultLocalParallelism(parallelism)
def execute(self, job_name: str = None) -> JobExecutionResult:
"""
Triggers the program execution. The environment will execute all parts of
the program that have resulted in a "sink" operation. Sink operations are
for example printing results or forwarding them to a message queue.
The program execution will be logged and displayed with the provided name
:param job_name: Desired name of the job, optional.
:return: The result of the job execution, containing elapsed time and accumulators.
"""
j_stream_graph = self._generate_stream_graph(clear_transformations=True, job_name=job_name)
return JobExecutionResult(self._j_stream_execution_environment.execute(j_stream_graph))
def execute_async(self, job_name: str = 'Flink Streaming Job') -> JobClient:
"""
Triggers the program asynchronously. The environment will execute all parts of the program
that have resulted in a "sink" operation. Sink operations are for example printing results
or forwarding them to a message queue.
The program execution will be logged and displayed with a generated default name.
:param job_name: Desired name of the job.
:return: A JobClient that can be used to communicate with the submitted job, completed on
submission succeeded.
"""
j_stream_graph = self._generate_stream_graph(clear_transformations=True, job_name=job_name)
j_job_client = self._j_stream_execution_environment.executeAsync(j_stream_graph)
return JobClient(j_job_client=j_job_client)
def get_execution_plan(self) -> str:
"""
Creates the plan with which the system will execute the program, and returns it as
a String using a JSON representation of the execution data flow graph.
Note that this needs to be called, before the plan is executed.
If the compiler could not be instantiated, or the master could not
be contacted to retrieve information relevant to the execution planning,
an exception will be thrown.
:return: The execution plan of the program, as a JSON String.
"""
j_stream_graph = self._generate_stream_graph(False)
return j_stream_graph.getStreamingPlanAsJSON()
@staticmethod
def get_execution_environment() -> 'StreamExecutionEnvironment':
"""
Creates an execution environment that represents the context in which the
program is currently executed. If the program is invoked standalone, this
method returns a local execution environment.
:return: The execution environment of the context in which the program is executed.
"""
gateway = get_gateway()
j_stream_exection_environment = gateway.jvm.org.apache.flink.streaming.api.environment\
.StreamExecutionEnvironment.getExecutionEnvironment()
return StreamExecutionEnvironment(j_stream_exection_environment)
def add_source(self, source_func: SourceFunction, source_name: str = 'Custom Source',
type_info: TypeInformation = None) -> 'DataStream':
"""
Adds a data source to the streaming topology.
:param source_func: the user defined function.
:param source_name: name of the data source. Optional.
:param type_info: type of the returned stream. Optional.
:return: the data stream constructed.
"""
if type_info:
j_type_info = type_info.get_java_type_info()
else:
j_type_info = None
j_data_stream = self._j_stream_execution_environment.addSource(source_func
.get_java_function(),
source_name,
j_type_info)
return DataStream(j_data_stream=j_data_stream)
def from_source(self,
source: Source,
watermark_strategy: WatermarkStrategy,
source_name: str,
type_info: TypeInformation = None) -> 'DataStream':
"""
Adds a data :class:`~pyflink.datastream.connectors.Source` to the environment to get a
:class:`~pyflink.datastream.DataStream`.
The result will be either a bounded data stream (that can be processed in a batch way) or
an unbounded data stream (that must be processed in a streaming way), based on the
boundedness property of the source.
This method takes an explicit type information for the produced data stream, so that
callers can define directly what type/serializer will be used for the produced stream. For
sources that describe their produced type, the parameter type_info should not be specified
to avoid specifying the produced type redundantly.
.. versionadded:: 1.13.0
"""
if type_info:
j_type_info = type_info.get_java_type_info()
else:
j_type_info = None
j_data_stream = self._j_stream_execution_environment.fromSource(
source.get_java_function(),
watermark_strategy._j_watermark_strategy,
source_name,
j_type_info)
return DataStream(j_data_stream=j_data_stream)
def read_text_file(self, file_path: str, charset_name: str = "UTF-8") -> DataStream:
"""
Reads the given file line-by-line and creates a DataStream that contains a string with the
contents of each such line. The charset with the given name will be used to read the files.
Note that this interface is not fault tolerant that is supposed to be used for test purpose.
:param file_path: The path of the file, as a URI (e.g., "file:///some/local/file" or
"hdfs://host:port/file/path")
:param charset_name: The name of the character set used to read the file.
:return: The DataStream that represents the data read from the given file as text lines.
"""
return DataStream(self._j_stream_execution_environment
.readTextFile(file_path, charset_name))
def from_collection(self, collection: List[Any],
type_info: TypeInformation = None) -> DataStream:
"""
Creates a data stream from the given non-empty collection. The type of the data stream is
that of the elements in the collection.
Note that this operation will result in a non-parallel data stream source, i.e. a data
stream source with parallelism one.
:param collection: The collection of elements to create the data stream from.
:param type_info: The TypeInformation for the produced data stream
:return: the data stream representing the given collection.
"""
if type_info is not None:
collection = [type_info.to_internal_type(element) for element in collection]
return self._from_collection(collection, type_info)
def _from_collection(self, elements: List[Any],
type_info: TypeInformation = None) -> DataStream:
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = self.serializer
try:
with temp_file:
# dumps elements to a temporary file by pickle serializer.
serializer.serialize(elements, temp_file)
gateway = get_gateway()
# if user does not defined the element data types, read the pickled data as a byte array
# list.
if type_info is None:
j_objs = gateway.jvm.PythonBridgeUtils.readPickledBytes(temp_file.name)
out_put_type_info = Types.PICKLED_BYTE_ARRAY() # type: TypeInformation
else:
j_objs = gateway.jvm.PythonBridgeUtils.readPythonObjects(temp_file.name)
out_put_type_info = type_info
# Since flink python module depends on table module, we can make use of utils of it when
# implementing python DataStream API.
PythonTableUtils = gateway.jvm\
.org.apache.flink.table.utils.python.PythonTableUtils
execution_config = self._j_stream_execution_environment.getConfig()
j_input_format = PythonTableUtils.getCollectionInputFormat(
j_objs,
out_put_type_info.get_java_type_info(),
execution_config
)
JInputFormatSourceFunction = gateway.jvm.org.apache.flink.streaming.api.functions.\
source.InputFormatSourceFunction
JBoundedness = gateway.jvm.org.apache.flink.api.connector.source.Boundedness
j_data_stream_source = invoke_method(
self._j_stream_execution_environment,
"org.apache.flink.streaming.api.environment.StreamExecutionEnvironment",
"addSource",
[JInputFormatSourceFunction(j_input_format, out_put_type_info.get_java_type_info()),
"Collection Source",
out_put_type_info.get_java_type_info(),
JBoundedness.BOUNDED],
["org.apache.flink.streaming.api.functions.source.SourceFunction",
"java.lang.String",
"org.apache.flink.api.common.typeinfo.TypeInformation",
"org.apache.flink.api.connector.source.Boundedness"])
j_data_stream_source.forceNonParallel()
return DataStream(j_data_stream=j_data_stream_source)
finally:
os.unlink(temp_file.name)
def _generate_stream_graph(self, clear_transformations: bool = False, job_name: str = None) \
-> JavaObject:
gateway = get_gateway()
JPythonConfigUtil = gateway.jvm.org.apache.flink.python.util.PythonConfigUtil
JPythonConfigUtil.configPythonOperator(self._j_stream_execution_environment)
gateway.jvm.org.apache.flink.python.chain.PythonOperatorChainingOptimizer.apply(
self._j_stream_execution_environment)
JPythonConfigUtil.setPartitionCustomOperatorNumPartitions(
get_field_value(self._j_stream_execution_environment, "transformations"))
j_stream_graph = self._j_stream_execution_environment.getStreamGraph(clear_transformations)
if job_name is not None:
j_stream_graph.setJobName(job_name)
return j_stream_graph
def _open(self):
# start BeamFnLoopbackWorkerPoolServicer when executed in MiniCluster
j_configuration = get_j_env_configuration(self._j_stream_execution_environment)
def startup_loopback_server():
from pyflink.common import Configuration
from pyflink.fn_execution.beam.beam_worker_pool_service import \
BeamFnLoopbackWorkerPoolServicer
config = Configuration(j_configuration=j_configuration)
config.set_string(
"PYFLINK_LOOPBACK_SERVER_ADDRESS", BeamFnLoopbackWorkerPoolServicer().start())
python_worker_execution_mode = os.environ.get('_python_worker_execution_mode')
if python_worker_execution_mode is None:
if is_local_deployment(j_configuration):
startup_loopback_server()
elif python_worker_execution_mode == 'loopback':
if is_local_deployment(j_configuration):
startup_loopback_server()
else:
raise ValueError("Loopback mode is enabled, however the job wasn't configured to "
"run in local deployment mode")
elif python_worker_execution_mode != 'process':
raise ValueError(
"It only supports to execute the Python worker in 'loopback' mode and 'process' "
"mode, unknown mode '%s' is configured" % python_worker_execution_mode)
def is_unaligned_checkpoints_enabled(self):
"""
Returns whether Unaligned Checkpoints are enabled.
"""
return self._j_stream_execution_environment.isUnalignedCheckpointsEnabled()
def is_force_unaligned_checkpoints(self):
"""
Returns whether Unaligned Checkpoints are force-enabled.
"""
return self._j_stream_execution_environment.isForceUnalignedCheckpoints()
| |
# Python
import pytest
import mock
# DRF
from rest_framework import status
from rest_framework.response import Response
from rest_framework.exceptions import PermissionDenied
# AWX
from awx.api.generics import (
ParentMixin,
SubListCreateAttachDetachAPIView, SubListAttachDetachAPIView,
DeleteLastUnattachLabelMixin,
ResourceAccessList,
ListAPIView
)
from awx.main.models import Organization, Credential
@pytest.fixture
def get_object_or_404(mocker):
# pytest patch without return_value generates a random value, we are counting on this
return mocker.patch('awx.api.generics.get_object_or_404')
@pytest.fixture
def get_object_or_400(mocker):
return mocker.patch('awx.api.generics.get_object_or_400')
@pytest.fixture
def mock_response_new(mocker):
m = mocker.patch('awx.api.generics.Response.__new__')
m.return_value = m
return m
@pytest.fixture
def mock_organization():
return Organization(pk=4, name="Unsaved Org")
@pytest.fixture
def parent_relationship_factory(mocker):
def rf(serializer_class, relationship_name, relationship_value=mocker.Mock()):
mock_parent_relationship = mocker.MagicMock(**{'%s.add.return_value' % relationship_name: relationship_value})
mocker.patch('awx.api.generics.ParentMixin.get_parent_object', return_value=mock_parent_relationship)
serializer = serializer_class()
[setattr(serializer, x, '') for x in ['relationship', 'model', 'parent_model']]
serializer.relationship = relationship_name
return (serializer, mock_parent_relationship)
return rf
# TODO: Test create and associate failure (i.e. id doesn't exist, record already exists, permission denied)
# TODO: Mock and check return (Response)
class TestSubListCreateAttachDetachAPIView:
def test_attach_validate_ok(self, mocker):
mock_request = mocker.MagicMock(data=dict(id=1))
serializer = SubListCreateAttachDetachAPIView()
(sub_id, res) = serializer.attach_validate(mock_request)
assert sub_id == 1
assert res is None
def test_attach_validate_invalid_type(self, mocker):
mock_request = mocker.MagicMock(data=dict(id='foobar'))
serializer = SubListCreateAttachDetachAPIView()
(sub_id, res) = serializer.attach_validate(mock_request)
assert type(res) is Response
def test_attach_create_and_associate(self, mocker, get_object_or_400, parent_relationship_factory, mock_response_new):
(serializer, mock_parent_relationship) = parent_relationship_factory(SubListCreateAttachDetachAPIView, 'wife')
create_return_value = mocker.MagicMock(status_code=status.HTTP_201_CREATED)
serializer.create = mocker.Mock(return_value=create_return_value)
mock_request = mocker.MagicMock(data=dict())
ret = serializer.attach(mock_request, None, None)
assert ret == mock_response_new
serializer.create.assert_called_with(mock_request, None, None)
mock_parent_relationship.wife.add.assert_called_with(get_object_or_400.return_value)
mock_response_new.assert_called_with(Response, create_return_value.data, status=status.HTTP_201_CREATED, headers={'Location': create_return_value['Location']})
def test_attach_associate_only(self, mocker, get_object_or_400, parent_relationship_factory, mock_response_new):
(serializer, mock_parent_relationship) = parent_relationship_factory(SubListCreateAttachDetachAPIView, 'wife')
serializer.create = mocker.Mock(return_value=mocker.MagicMock())
mock_request = mocker.MagicMock(data=dict(id=1))
ret = serializer.attach(mock_request, None, None)
assert ret == mock_response_new
serializer.create.assert_not_called()
mock_parent_relationship.wife.add.assert_called_with(get_object_or_400.return_value)
mock_response_new.assert_called_with(Response, status=status.HTTP_204_NO_CONTENT)
def test_unattach_validate_ok(self, mocker):
mock_request = mocker.MagicMock(data=dict(id=1))
serializer = SubListCreateAttachDetachAPIView()
(sub_id, res) = serializer.unattach_validate(mock_request)
assert sub_id == 1
assert res is None
def test_unattach_validate_invalid_type(self, mocker):
mock_request = mocker.MagicMock(data=dict(id='foobar'))
serializer = SubListCreateAttachDetachAPIView()
(sub_id, res) = serializer.unattach_validate(mock_request)
assert type(res) is Response
def test_unattach_validate_missing_id(self, mocker):
mock_request = mocker.MagicMock(data=dict())
serializer = SubListCreateAttachDetachAPIView()
(sub_id, res) = serializer.unattach_validate(mock_request)
assert sub_id is None
assert type(res) is Response
def test_unattach_by_id_ok(self, mocker, parent_relationship_factory, get_object_or_400):
(serializer, mock_parent_relationship) = parent_relationship_factory(SubListCreateAttachDetachAPIView, 'wife')
mock_request = mocker.MagicMock()
mock_sub = mocker.MagicMock(name="object to unattach")
get_object_or_400.return_value = mock_sub
res = serializer.unattach_by_id(mock_request, 1)
assert type(res) is Response
assert res.status_code == status.HTTP_204_NO_CONTENT
mock_parent_relationship.wife.remove.assert_called_with(mock_sub)
def test_unattach_ok(self, mocker):
mock_request = mocker.MagicMock()
mock_sub_id = mocker.MagicMock()
view = SubListCreateAttachDetachAPIView()
view.unattach_validate = mocker.MagicMock()
view.unattach_by_id = mocker.MagicMock()
view.unattach_validate.return_value = (mock_sub_id, None)
view.unattach(mock_request)
view.unattach_validate.assert_called_with(mock_request)
view.unattach_by_id.assert_called_with(mock_request, mock_sub_id)
def test_unattach_invalid(self, mocker):
mock_request = mocker.MagicMock()
mock_res = mocker.MagicMock()
view = SubListCreateAttachDetachAPIView()
view.unattach_validate = mocker.MagicMock()
view.unattach_by_id = mocker.MagicMock()
view.unattach_validate.return_value = (None, mock_res)
view.unattach(mock_request)
view.unattach_validate.assert_called_with(mock_request)
view.unattach_by_id.assert_not_called()
def test_attach_detatch_only(mocker):
mock_request = mocker.MagicMock()
mock_request.data = {'name': 'name for my new model'}
view = SubListAttachDetachAPIView()
view.model = mocker.MagicMock()
view.model._meta = mocker.MagicMock()
view.model._meta.verbose_name = "Foo Bar"
resp = view.post(mock_request)
assert 'Foo Bar' in resp.data['msg']
assert 'field is missing' in resp.data['msg']
class TestDeleteLastUnattachLabelMixin:
@mock.patch('__builtin__.super')
def test_unattach_ok(self, super, mocker):
mock_request = mocker.MagicMock()
mock_sub_id = mocker.MagicMock()
super.return_value = super
super.unattach_validate = mocker.MagicMock(return_value=(mock_sub_id, None))
super.unattach_by_id = mocker.MagicMock()
mock_model = mocker.MagicMock()
mock_model.objects.get.return_value = mock_model
mock_model.is_detached.return_value = True
view = DeleteLastUnattachLabelMixin()
view.model = mock_model
view.unattach(mock_request, None, None)
super.unattach_validate.assert_called_with(mock_request)
super.unattach_by_id.assert_called_with(mock_request, mock_sub_id)
mock_model.is_detached.assert_called_with()
mock_model.objects.get.assert_called_with(id=mock_sub_id)
mock_model.delete.assert_called_with()
@mock.patch('__builtin__.super')
def test_unattach_fail(self, super, mocker):
mock_request = mocker.MagicMock()
mock_response = mocker.MagicMock()
super.return_value = super
super.unattach_validate = mocker.MagicMock(return_value=(None, mock_response))
view = DeleteLastUnattachLabelMixin()
res = view.unattach(mock_request, None, None)
super.unattach_validate.assert_called_with(mock_request)
assert mock_response == res
class TestParentMixin:
def test_get_parent_object(self, mocker, get_object_or_404):
parent_mixin = ParentMixin()
parent_mixin.lookup_field = 'foo'
parent_mixin.kwargs = dict(foo='bar')
parent_mixin.parent_model = 'parent_model'
mock_parent_mixin = mocker.MagicMock(wraps=parent_mixin)
return_value = mock_parent_mixin.get_parent_object()
get_object_or_404.assert_called_with(parent_mixin.parent_model, **parent_mixin.kwargs)
assert get_object_or_404.return_value == return_value
class TestResourceAccessList:
def mock_request(self):
return mock.MagicMock(
user=mock.MagicMock(
is_anonymous=mock.MagicMock(return_value=False),
is_superuser=False
), method='GET')
def mock_view(self, parent=None):
view = ResourceAccessList()
view.parent_model = Organization
view.kwargs = {'pk': 4}
if parent:
view.get_parent_object = lambda: parent
return view
def test_parent_access_check_failed(self, mocker, mock_organization):
mock_access = mocker.MagicMock(__name__='for logger', return_value=False)
with mocker.patch('awx.main.access.BaseAccess.can_read', mock_access):
with pytest.raises(PermissionDenied):
self.mock_view(parent=mock_organization).check_permissions(self.mock_request())
mock_access.assert_called_once_with(mock_organization)
def test_parent_access_check_worked(self, mocker, mock_organization):
mock_access = mocker.MagicMock(__name__='for logger', return_value=True)
with mocker.patch('awx.main.access.BaseAccess.can_read', mock_access):
self.mock_view(parent=mock_organization).check_permissions(self.mock_request())
mock_access.assert_called_once_with(mock_organization)
def test_related_search_reverse_FK_field():
view = ListAPIView()
view.model = Credential
assert 'jobtemplates__search' in view.related_search_fields
| |
import pickle
from matplotlib import pyplot
import os
import numpy as np
from pandas.io.parsers import read_csv
from sklearn.utils import shuffle
from lasagne import layers
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet, BatchIterator
FTRAIN = './data/training.csv'
FTEST = './data/test.csv'
SPECIALIST_SETTINGS = [
dict(
columns=(
'left_eye_center_x', 'left_eye_center_y',
'right_eye_center_x', 'right_eye_center_y',
),
flip_indices=((0, 2), (1, 3)),
),
dict(
columns=(
'nose_tip_x', 'nose_tip_y',
),
flip_indices=(),
kwargs=dict(dropout2_p=0.3, dropout3_p=0.4), # !
),
dict(
columns=(
'mouth_left_corner_x', 'mouth_left_corner_y',
'mouth_right_corner_x', 'mouth_right_corner_y',
'mouth_center_top_lip_x', 'mouth_center_top_lip_y',
),
flip_indices=((0, 2), (1, 3)),
),
dict(
columns=(
'mouth_center_bottom_lip_x',
'mouth_center_bottom_lip_y',
),
flip_indices=(),
),
dict(
columns=(
'left_eye_inner_corner_x', 'left_eye_inner_corner_y',
'right_eye_inner_corner_x', 'right_eye_inner_corner_y',
'left_eye_outer_corner_x', 'left_eye_outer_corner_y',
'right_eye_outer_corner_x', 'right_eye_outer_corner_y',
),
flip_indices=((0, 2), (1, 3), (4, 6), (5, 7)),
),
dict(
columns=(
'left_eyebrow_inner_end_x', 'left_eyebrow_inner_end_y',
'right_eyebrow_inner_end_x', 'right_eyebrow_inner_end_y',
'left_eyebrow_outer_end_x', 'left_eyebrow_outer_end_y',
'right_eyebrow_outer_end_x', 'right_eyebrow_outer_end_y',
),
flip_indices=((0, 2), (1, 3), (4, 6), (5, 7)),
),
]
def load(test=False, cols=None):
"""Loads data from FTEST if *test* is True, otherwise from FTRAIN.
Pass a list of *cols* if you're only interested in a subset of the
target columns.
"""
fname = FTEST if test else FTRAIN
df = read_csv(os.path.expanduser(fname)) # load pandas dataframe
# The Image column has pixel values separated by space; convert
# the values to numpy arrays:
df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' '))
if cols: # get a subset of columns
df = df[list(cols) + ['Image']]
print(df.count()) # prints the number of values for each column
df = df.dropna() # drop all rows that have missing values in them
X = np.vstack(df['Image'].values) / 255. # scale pixel values to [0, 1]
X = X.astype(np.float32)
if not test: # only FTRAIN has any target columns
y = df[df.columns[:-1]].values
y = (y - 48) / 48 # scale target coordinates to [-1, 1]
X, y = shuffle(X, y, random_state=42) # shuffle train data
y = y.astype(np.float32)
else:
y = None
return X, y
class FlipBatchIterator(BatchIterator):
flip_indices = [
(0, 2), (1, 3),
(4, 8), (5, 9), (6, 10), (7, 11),
(12, 16), (13, 17), (14, 18), (15, 19),
(22, 24), (23, 25),
]
def transform(self, Xb, yb):
Xb, yb = super(FlipBatchIterator, self).transform(Xb, yb)
# Flip half of the images in this batch at random:
bs = Xb.shape[0]
indices = np.random.choice(bs, bs / 2, replace=False)
Xb[indices] = Xb[indices, :, :, ::-1]
if yb is not None:
# Horizontal flip of all x coordinates:
yb[indices, ::2] = yb[indices, ::2] * -1
# Swap places, e.g. left_eye_center_x -> right_eye_center_x
for a, b in self.flip_indices:
yb[indices, a], yb[indices, b] = (
yb[indices, b], yb[indices, a])
return Xb, yb
import theano
def float32(k):
return np.cast['float32'](k)
class AdjustVariable(object):
def __init__(self, name, start=0.03, stop=0.001):
self.name = name
self.start, self.stop = start, stop
self.ls = None
def __call__(self, nn, train_history):
if self.ls is None:
self.ls = np.linspace(self.start, self.stop, nn.max_epochs)
epoch = train_history[-1]['epoch']
new_value = float32(self.ls[epoch - 1])
getattr(nn, self.name).set_value(new_value)
class EarlyStopping(object):
def __init__(self, patience=100):
self.patience = patience
self.best_valid = np.inf
self.best_valid_epoch = 0
self.best_weights = None
def __call__(self, nn, train_history):
current_valid = train_history[-1]['valid_loss']
current_epoch = train_history[-1]['epoch']
if current_valid < self.best_valid:
self.best_valid = current_valid
self.best_valid_epoch = current_epoch
self.best_weights = nn.get_all_params_values()
elif self.best_valid_epoch + self.patience < current_epoch:
print("Early stopping.")
print("Best valid loss was {:.6f} at epoch {}.".format(
self.best_valid, self.best_valid_epoch))
nn.load_params_from(self.best_weights)
raise StopIteration()
def backupCNN(nn, train_history):
nn.save_params_to('net-epoch-step.pickle')
net = NeuralNet(
layers=[
('input', layers.InputLayer),
('conv1', layers.Conv2DLayer),
('pool1', layers.MaxPool2DLayer),
('dropout1', layers.DropoutLayer), # !
('conv2', layers.Conv2DLayer),
('pool2', layers.MaxPool2DLayer),
('dropout2', layers.DropoutLayer), # !
('conv3', layers.Conv2DLayer),
('pool3', layers.MaxPool2DLayer),
('dropout3', layers.DropoutLayer), # !
('hidden4', layers.DenseLayer),
('dropout4', layers.DropoutLayer), # !
('hidden5', layers.DenseLayer),
('output', layers.DenseLayer),
],
input_shape=(None, 1, 96, 96),
conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2),
dropout1_p=0.1, # !
conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),
dropout2_p=0.2, # !
conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),
dropout3_p=0.3, # !
hidden4_num_units=1000, # !
dropout4_p=0.5,
hidden5_num_units=1000, # !
output_num_units=30, output_nonlinearity=None,
update_learning_rate=theano.shared(float32(0.03)),
update_momentum=theano.shared(float32(0.9)),
regression=True,
batch_iterator_train=FlipBatchIterator(batch_size=128),
on_epoch_finished=[
AdjustVariable('update_learning_rate', start=0.03, stop=0.0001),
AdjustVariable('update_momentum', start=0.9, stop=0.999),
EarlyStopping(patience=200),
backupCNN,
],
max_epochs=10000,
verbose=1,
)
def load2d(test=False, cols=None):
X, y = load(test=test)
X = X.reshape(-1, 1, 96, 96)
return X, y
import sys
sys.setrecursionlimit(10000)
X = None
y = None
if os.path.exists('X.pickle') and os.path.exists('y.pickle'):
X = pickle.load(open('X.pickle', 'rb'))
y = pickle.load(open('y.pickle', 'rb'))
else:
X, y = load2d()
with open('X.pickle', 'wb') as f:
pickle.dump(X, f, -1)
with open('y.pickle', 'wb') as f:
pickle.dump(y, f, -1)
if os.path.exists('net.pickle'):
print 'already learning end'
elif os.path.exists('net_epoch_backup.pickle'):
net.load_params_from('net_epoch_backup.pickle')
net.fit(X, y)
else:
net.fit(X, y)
if net is not None:
with open('net.pickle', 'wb') as f:
pickle.dump(y, f, -1)
| |
"""
Finite Discrete Random Variables - Prebuilt variable types
Contains
========
FiniteRV
DiscreteUniform
Die
Bernoulli
Coin
Binomial
Hypergeometric
"""
from __future__ import print_function, division
from sympy.core.compatibility import as_int
from sympy.core.logic import fuzzy_not, fuzzy_and
from sympy.stats.frv import (SingleFinitePSpace, SingleFiniteDistribution)
from sympy import (S, sympify, Rational, binomial, cacheit, Integer,
Dict, Basic)
__all__ = ['FiniteRV', 'DiscreteUniform', 'Die', 'Bernoulli', 'Coin',
'Binomial', 'Hypergeometric']
def rv(name, cls, *args):
density = cls(*args)
return SingleFinitePSpace(name, density).value
class FiniteDistributionHandmade(SingleFiniteDistribution):
@property
def dict(self):
return self.args[0]
def __new__(cls, density):
density = Dict(density)
return Basic.__new__(cls, density)
def FiniteRV(name, density):
"""
Create a Finite Random Variable given a dict representing the density.
Returns a RandomSymbol.
>>> from sympy.stats import FiniteRV, P, E
>>> density = {0: .1, 1: .2, 2: .3, 3: .4}
>>> X = FiniteRV('X', density)
>>> E(X)
2.00000000000000
>>> P(X>=2)
0.700000000000000
"""
return rv(name, FiniteDistributionHandmade, density)
class DiscreteUniformDistribution(SingleFiniteDistribution):
@property
def p(self):
return Rational(1, len(self.args))
@property
@cacheit
def dict(self):
return dict((k, self.p) for k in self.set)
@property
def set(self):
return self.args
def pdf(self, x):
if x in self.args:
return self.p
else:
return S.Zero
def DiscreteUniform(name, items):
"""
Create a Finite Random Variable representing a uniform distribution over
the input set.
Returns a RandomSymbol.
Examples
========
>>> from sympy.stats import DiscreteUniform, density
>>> from sympy import symbols
>>> X = DiscreteUniform('X', symbols('a b c')) # equally likely over a, b, c
>>> density(X).dict
{a: 1/3, b: 1/3, c: 1/3}
>>> Y = DiscreteUniform('Y', list(range(5))) # distribution over a range
>>> density(Y).dict
{0: 1/5, 1: 1/5, 2: 1/5, 3: 1/5, 4: 1/5}
"""
return rv(name, DiscreteUniformDistribution, *items)
class DieDistribution(SingleFiniteDistribution):
_argnames = ('sides',)
def __new__(cls, sides):
sides_sym = sympify(sides)
if fuzzy_not(fuzzy_and((sides_sym.is_integer, sides_sym.is_positive))):
raise ValueError("'sides' must be a positive integer.")
else:
return super(DieDistribution, cls).__new__(cls, sides)
@property
@cacheit
def dict(self):
sides = as_int(self.sides)
return super(DieDistribution, self).dict
@property
def set(self):
return list(map(Integer, list(range(1, self.sides+1))))
def pdf(self, x):
x = sympify(x)
if x.is_Integer and x >= 1 and x <= self.sides:
return Rational(1, self.sides)
else:
return 0
def Die(name, sides=6):
"""
Create a Finite Random Variable representing a fair die.
Returns a RandomSymbol.
>>> from sympy.stats import Die, density
>>> D6 = Die('D6', 6) # Six sided Die
>>> density(D6).dict
{1: 1/6, 2: 1/6, 3: 1/6, 4: 1/6, 5: 1/6, 6: 1/6}
>>> D4 = Die('D4', 4) # Four sided Die
>>> density(D4).dict
{1: 1/4, 2: 1/4, 3: 1/4, 4: 1/4}
"""
return rv(name, DieDistribution, sides)
class BernoulliDistribution(SingleFiniteDistribution):
_argnames = ('p', 'succ', 'fail')
@property
@cacheit
def dict(self):
return {self.succ: self.p, self.fail: 1 - self.p}
def Bernoulli(name, p, succ=1, fail=0):
"""
Create a Finite Random Variable representing a Bernoulli process.
Returns a RandomSymbol
>>> from sympy.stats import Bernoulli, density
>>> from sympy import S
>>> X = Bernoulli('X', S(3)/4) # 1-0 Bernoulli variable, probability = 3/4
>>> density(X).dict
{0: 1/4, 1: 3/4}
>>> X = Bernoulli('X', S.Half, 'Heads', 'Tails') # A fair coin toss
>>> density(X).dict
{Heads: 1/2, Tails: 1/2}
"""
return rv(name, BernoulliDistribution, p, succ, fail)
def Coin(name, p=S.Half):
"""
Create a Finite Random Variable representing a Coin toss.
Probability p is the chance of gettings "Heads." Half by default
Returns a RandomSymbol.
>>> from sympy.stats import Coin, density
>>> from sympy import Rational
>>> C = Coin('C') # A fair coin toss
>>> density(C).dict
{H: 1/2, T: 1/2}
>>> C2 = Coin('C2', Rational(3, 5)) # An unfair coin
>>> density(C2).dict
{H: 3/5, T: 2/5}
"""
return rv(name, BernoulliDistribution, p, 'H', 'T')
class BinomialDistribution(SingleFiniteDistribution):
_argnames = ('n', 'p', 'succ', 'fail')
def __new__(cls, *args):
n = args[BinomialDistribution._argnames.index('n')]
p = args[BinomialDistribution._argnames.index('p')]
n_sym = sympify(n)
p_sym = sympify(p)
if fuzzy_not(fuzzy_and((n_sym.is_integer, n_sym.is_nonnegative))):
raise ValueError("'n' must be positive integer. n = %s." % str(n))
elif fuzzy_not(fuzzy_and((p_sym.is_nonnegative, (p_sym - 1).is_nonpositive))):
raise ValueError("'p' must be: 0 <= p <= 1 . p = %s" % str(p))
else:
return super(BinomialDistribution, cls).__new__(cls, *args)
@property
@cacheit
def dict(self):
n, p, succ, fail = self.n, self.p, self.succ, self.fail
n = as_int(n)
return dict((k*succ + (n - k)*fail,
binomial(n, k) * p**k * (1 - p)**(n - k)) for k in range(0, n + 1))
def Binomial(name, n, p, succ=1, fail=0):
"""
Create a Finite Random Variable representing a binomial distribution.
Returns a RandomSymbol.
Examples
========
>>> from sympy.stats import Binomial, density
>>> from sympy import S
>>> X = Binomial('X', 4, S.Half) # Four "coin flips"
>>> density(X).dict
{0: 1/16, 1: 1/4, 2: 3/8, 3: 1/4, 4: 1/16}
"""
return rv(name, BinomialDistribution, n, p, succ, fail)
class HypergeometricDistribution(SingleFiniteDistribution):
_argnames = ('N', 'm', 'n')
@property
@cacheit
def dict(self):
N, m, n = self.N, self.m, self.n
N, m, n = list(map(sympify, (N, m, n)))
density = dict((sympify(k),
Rational(binomial(m, k) * binomial(N - m, n - k),
binomial(N, n)))
for k in range(max(0, n + m - N), min(m, n) + 1))
return density
def Hypergeometric(name, N, m, n):
"""
Create a Finite Random Variable representing a hypergeometric distribution.
Returns a RandomSymbol.
Examples
========
>>> from sympy.stats import Hypergeometric, density
>>> from sympy import S
>>> X = Hypergeometric('X', 10, 5, 3) # 10 marbles, 5 white (success), 3 draws
>>> density(X).dict
{0: 1/12, 1: 5/12, 2: 5/12, 3: 1/12}
"""
return rv(name, HypergeometricDistribution, N, m, n)
class RademacherDistribution(SingleFiniteDistribution):
@property
@cacheit
def dict(self):
return {-1: S.Half, 1: S.Half}
def Rademacher(name):
"""
Create a Finite Random Variable representing a Rademacher distribution.
Return a RandomSymbol.
Examples
========
>>> from sympy.stats import Rademacher, density
>>> X = Rademacher('X')
>>> density(X).dict
{-1: 1/2, 1: 1/2}
See Also
========
sympy.stats.Bernoulli
References
==========
.. [1] http://en.wikipedia.org/wiki/Rademacher_distribution
"""
return rv(name, RademacherDistribution)
| |
"""Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
#
# License: BSD, (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import atleast2d_or_csr
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`scipy.spatial.cKDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
warn_on_equidistant : boolean, optional. Defaults to True.
Generate a warning if equidistant neighbors are discarded.
For classification or regression based on k-neighbors, if
neighbor k and neighbor k+1 have identical distances but
different labels, then the result will be dependent on the
ordering of the training data.
If the fit method is ``'kd_tree'``, no warnings will be generated.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform',
algorithm='auto', leaf_size=30,
warn_on_equidistant=True, p=2):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size,
warn_on_equidistant=warn_on_equidistant,
p=p)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X: array
A 2-D array representing the test points.
Returns
-------
labels: array
List of class labels (one for each data sample).
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.kneighbors(X)
pred_labels = self._y[neigh_ind]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
mode, _ = stats.mode(pred_labels, axis=1)
else:
mode, _ = weighted_mode(pred_labels, weights, axis=1)
return self.classes_.take(mode.flatten().astype(np.int))
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X: array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Probabilities of the samples for each class in the model,
where classes are ordered arithmetically.
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.kneighbors(X)
pred_labels = self._y[neigh_ind]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(pred_labels)
probabilities = np.zeros((X.shape[0], self.classes_.size))
# a simple ':' index doesn't work right
all_rows = np.arange(X.shape[0])
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
probabilities[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
probabilities = (probabilities.T / probabilities.sum(axis=1)).T
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`scipy.spatial.cKDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label: int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, outlier_label=None):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X: array
A 2-D array representing the test points.
Returns
-------
labels: array
List of class labels (one for each data sample).
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.radius_neighbors(X)
pred_labels = [self._y[ind] for ind in neigh_ind]
if self.outlier_label:
outlier_label = np.array((self.outlier_label, ))
small_value = np.array((1e-6, ))
for i, pl in enumerate(pred_labels):
# Check that all have at least 1 neighbor
if len(pl) < 1:
pred_labels[i] = outlier_label
neigh_dist[i] = small_value
else:
for pl in pred_labels:
# Check that all have at least 1 neighbor
if len(pl) < 1:
raise ValueError('no neighbors found for a test sample, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them in your '
'dataset')
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
mode = np.array([stats.mode(pl)[0] for pl in pred_labels],
dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w) in zip(pred_labels, weights)],
dtype=np.int)
mode = mode.flatten().astype(np.int)
# map indices to classes
prediction = self.classes_.take(mode)
if self.outlier_label:
# reset outlier label
prediction[mode == outlier_label] = self.outlier_label
return prediction
| |
"""Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import u
from sympy.printing.pretty.stringpict import prettyForm, stringPict
from sympsi.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u("\u27E8")
_rbracket_ucode = u("\u27E9")
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u("\u2758")
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u('\u2571'), u('\u2572'), u('\u2502')
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in set([_lbracket, _lbracket_ucode]):
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in set([_rbracket, _rbracket_ucode]):
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in set([_straight_bracket, _straight_bracket_ucode]):
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympsi.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympsi.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympsi.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympsi.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympsi.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympsi import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympsi.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympsi import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympsi.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympsi import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympsi.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympsi import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympsi.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympsi.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympsi.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympsi.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympsi.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympsi.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympsi.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympsi.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympsi.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
| |
from mrjob.job import MRJob
from mrjob.protocol import PickleProtocol, PickleValueProtocol
import numpy as np
import lxmls.readers.pos_corpus as pcc
from lxmls.sequences.hmm import HMM
import pickle
def load_sequence(s, word_dict, tag_dict):
"""
seq = load_sequence(s, word_dict, tag_dict)
Load a sequence from a single line
word_dict & tag_dict should be loaded from the file ``word_tag_dict.pkl``
Parameters
----------
s : str
word_dict : dict
tag_dict : dict
Returns
-------
seq : Sequence object
"""
from lxmls.sequences.sequence_list import SequenceList
seq_list = SequenceList(word_dict, tag_dict)
words = []
tags = []
line = s.rstrip()
pairs = line.split(' ')
for pair in pairs:
fields = pair.split('_')
words.append(fields[0])
tags.append(fields[1])
seq_list.add_sequence(words, tags)
return seq_list[0]
def predict_sequence(sequence, hmm):
"""
log_likelihood, initial_counts, transition_counts, final_counts,\
emission_counts = predict_sequence(seq, hmm)
Run forward-backward on a single sentence.
Parameters
----------
sequence : Sequence object
hmm: HMM object
Returns
-------
log_likelihood : float
initial_counts : np.ndarray
transition_counts : ndarray
final_counts : ndarray
emission_counts : ndarray
"""
num_states = hmm.get_num_states() # Number of states.
num_observations = hmm.get_num_observations() # Number of observation symbols.
length = len(sequence.x) # Length of the sequence.
# Compute scores given the observation sequence.
initial_scores, transition_scores, final_scores, emission_scores = \
hmm.compute_scores(sequence)
state_posteriors, transition_posteriors, log_likelihood = \
hmm.compute_posteriors(initial_scores,
transition_scores,
final_scores,
emission_scores)
emission_counts = np.zeros((num_observations, num_states))
initial_counts = np.zeros(num_states)
transition_counts = np.zeros((num_states, num_states))
final_counts = np.zeros(num_states)
# Take care of initial position counts.
for y in range(num_states):
initial_counts[y] += state_posteriors[0, y]
# Take care of emission and transition counts.
for pos in range(length):
x = sequence.x[pos]
for y in range(num_states):
emission_counts[x, y] += state_posteriors[pos, y]
if pos > 0:
for y_prev in range(num_states):
transition_counts[y, y_prev] += transition_posteriors[pos-1, y, y_prev]
# Take care of final position counts.
for y in range(num_states):
final_counts[y] += state_posteriors[length-1, y]
return log_likelihood, initial_counts, transition_counts, final_counts, emission_counts
def load_parameters(filename, hmm, smoothing):
"""
load_parameters(filename, hmm, smoothing)
Load the HMM parameters stored in a text file.
Parameters
----------
filename : str
Filename
hmm : HMM object
Will be overwritten
smoothing : float
Smoothing factor to use
"""
hmm.clear_counts(smoothing)
f = open(filename)
for line in f:
if '\t' not in line:
continue
event, count = line.strip().split('\t')
count = float(count)
event = event[1:-1]
fields = event.split(' ')
if fields[0] == 'initial':
y = hmm.state_labels.get_label_id(fields[1])
hmm.initial_counts[y] += count
elif fields[0] == 'transition':
y = hmm.state_labels.get_label_id(fields[1])
y_prev = hmm.state_labels.get_label_id(fields[2])
hmm.transition_counts[y][y_prev] += count
elif fields[0] == 'final':
y = hmm.state_labels.get_label_id(fields[1])
hmm.final_counts[y] += count
elif fields[0] == 'emission':
x = hmm.observation_labels.get_label_id(fields[1].decode('string-escape'))
y = hmm.state_labels.get_label_id(fields[2])
hmm.emission_counts[x][y] += count
else:
continue
f.close()
hmm.compute_parameters()
# The students need to write this:
def combine_partials(counts, hmm):
"""
combine_partials(counts, hmm)
This function should combine the results of calling predict_sequence many
times and assign to the hmm member objects
Parameters
----------
counts : list of tuples
This is a list of results from the ``predict_sequence`` functions
"""
hmm.log_likelihood = 0
hmm.initial_counts = 0
hmm.transition_counts = 0
hmm.emission_counts = 0
hmm.final_counts = 0
for partial in counts:
hmm.log_likelihood += partial[0]
hmm.initial_counts += partial[1]
hmm.transition_counts += partial[2]
hmm.final_counts += partial[3]
hmm.emission_counts += partial[4]
# A single iteration of the distributed EM algorithm.
class EMStep(MRJob):
INTERNAL_PROTOCOL = PickleProtocol
OUTPUT_PROTOCOL = PickleValueProtocol
def __init__(self, *args, **kwargs):
MRJob.__init__(self, *args, **kwargs)
from os import path
filename = 'hmm.pkl'
if path.exists(filename):
self.hmm = pickle.loads(open(filename).read().decode('string-escape'))
else:
# Initialize the HMM parameters randomly.
self.hmm = HMM(word_dict, tag_dict)
self.hmm.initialize_random()
self.log_likelihood = 0
self.initial_counts = 0
self.emission_counts = 0
self.transition_counts = 0
self.final_counts = 0
def mapper(self, key, s):
seq = load_sequence(s, self.hmm.observation_labels, self.hmm.state_labels)
log_likelihood, initial_counts, transition_counts, final_counts, emission_counts = predict_sequence(
seq, self.hmm)
self.log_likelihood += log_likelihood
self.initial_counts += initial_counts
self.emission_counts += emission_counts
self.transition_counts += transition_counts
self.final_counts += final_counts
def mapper_final(self):
yield 'result', (self.log_likelihood,
self.initial_counts,
self.transition_counts,
self.final_counts,
self.emission_counts)
def reducer(self, key, counts):
combine_partials(counts, self.hmm)
self.hmm.compute_parameters()
yield 'hmm', self.hmm
# Load the word and tag dictionaries.
word_dict, tag_dict = pickle.load(open('word_tag_dict.pkl'))
if __name__ == '__main__':
em_step = EMStep()
em_step.run()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sequence-to-sequence model with an attention mechanism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import data_utils
class Seq2SeqModel(object):
"""Sequence-to-sequence model with attention and for multiple buckets.
This class implements a multi-layer recurrent neural network as encoder,
and an attention-based decoder. This is the same as the model described in
this paper: http://arxiv.org/abs/1412.7449 - please look there for details,
or into the seq2seq library for complete model implementation.
This class also allows to use GRU cells in addition to LSTM cells, and
sampled softmax to handle large output vocabulary size. A single-layer
version of this model, but with bi-directional encoder, was presented in
http://arxiv.org/abs/1409.0473
and sampled softmax is described in Section 3 of the following paper.
http://arxiv.org/abs/1412.2007
"""
def __init__(self,
source_vocab_size,
target_vocab_size,
buckets,
size,
num_layers,
max_gradient_norm,
batch_size,
learning_rate,
learning_rate_decay_factor,
use_lstm=False,
num_samples=512,
forward_only=False,
dtype=tf.float32):
"""Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input length
that will be processed in that bucket, and O specifies maximum output
length. Training instances that have inputs longer than I or outputs
longer than O will be pushed to the next bucket and padded accordingly.
We assume that the list is sorted, e.g., [(2, 4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the model.
dtype: the data type to use to store internal variables.
"""
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(
float(learning_rate), trainable=False, dtype=dtype)
self.learning_rate_decay_op = self.learning_rate.assign(
self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
# If we use sampled softmax, we need an output projection.
output_projection = None
softmax_loss_function = None
# Sampled softmax only makes sense if we sample less than vocabulary size.
if num_samples > 0 and num_samples < self.target_vocab_size:
w_t = tf.get_variable("proj_w", [self.target_vocab_size, size], dtype=dtype)
w = tf.transpose(w_t)
b = tf.get_variable("proj_b", [self.target_vocab_size], dtype=dtype)
output_projection = (w, b)
def sampled_loss(inputs, labels):
labels = tf.reshape(labels, [-1, 1])
# We need to compute the sampled_softmax_loss using 32bit floats to
# avoid numerical instabilities.
local_w_t = tf.cast(w_t, tf.float32)
local_b = tf.cast(b, tf.float32)
local_inputs = tf.cast(inputs, tf.float32)
return tf.cast(
tf.nn.sampled_softmax_loss(local_w_t, local_b, local_inputs, labels,
num_samples, self.target_vocab_size),
dtype)
softmax_loss_function = sampled_loss
# Create the internal multi-layer cell for our RNN.
single_cell = tf.nn.rnn_cell.GRUCell(size)
if use_lstm:
single_cell = tf.nn.rnn_cell.BasicLSTMCell(size)
cell = single_cell
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers)
# The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return tf.contrib.legacy_seq2seq.embedding_attention_seq2seq(
encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols=source_vocab_size,
num_decoder_symbols=target_vocab_size,
embedding_size=size,
output_projection=output_projection,
feed_previous=do_decode,
dtype=dtype)
# Feeds for inputs.
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in xrange(buckets[-1][0]): # Last bucket is the biggest one.
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="encoder{0}".format(i)))
for i in xrange(buckets[-1][1] + 1):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None],
name="decoder{0}".format(i)))
self.target_weights.append(tf.placeholder(dtype, shape=[None],
name="weight{0}".format(i)))
# Our targets are decoder inputs shifted by one.
targets = [self.decoder_inputs[i + 1]
for i in xrange(len(self.decoder_inputs) - 1)]
# Training outputs and losses.
if forward_only:
self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function)
# If we use output projection, we need to project outputs for decoding.
if output_projection is not None:
for b in xrange(len(buckets)):
self.outputs[b] = [
tf.matmul(output, output_projection[0]) + output_projection[1]
for output in self.outputs[b]
]
else:
self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, False),
softmax_loss_function=softmax_loss_function)
# Gradients and SGD update operation for training the model.
params = tf.trainable_variables()
if not forward_only:
self.gradient_norms = []
self.updates = []
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
for b in xrange(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
clipped_gradients, norm = tf.clip_by_global_norm(gradients,
max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(
zip(clipped_gradients, params), global_step=self.global_step))
self.saver = tf.train.Saver(tf.all_variables())
def step(self, session, encoder_inputs, decoder_inputs, target_weights,
bucket_id, forward_only):
"""Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
forward_only: whether to do the backward step or only forward.
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average perplexity, and the outputs.
Raises:
ValueError: if length of encoder_inputs, decoder_inputs, or
target_weights disagrees with bucket size for the specified bucket_id.
"""
# Check if the sizes match.
encoder_size, decoder_size = self.buckets[bucket_id]
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(target_weights) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(target_weights), decoder_size))
# Input feed: encoder inputs, decoder inputs, target_weights, as provided.
input_feed = {}
for l in xrange(encoder_size):
input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]
for l in xrange(decoder_size):
input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]
input_feed[self.target_weights[l].name] = target_weights[l]
# Since our targets are decoder inputs shifted by one, we need one more.
last_target = self.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)
# Output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [self.updates[bucket_id], # Update Op that does SGD.
self.gradient_norms[bucket_id], # Gradient norm.
self.losses[bucket_id]] # Loss for this batch.
else:
output_feed = [self.losses[bucket_id]] # Loss for this batch.
for l in xrange(decoder_size): # Output logits.
output_feed.append(self.outputs[bucket_id][l])
outputs = session.run(output_feed, input_feed)
if not forward_only:
return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.
else:
return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs.
def get_batch(self, data, bucket_id):
"""Get a random batch of data from the specified bucket, prepare for step.
To feed data in step(..) it must be a list of batch-major vectors, while
data here contains single length-major cases. So the main logic of this
function is to re-index data cases to be in the proper format for feeding.
Args:
data: a tuple of size len(self.buckets) in which each element contains
lists of pairs of input and output data that we use to create a batch.
bucket_id: integer, which bucket to get the batch for.
Returns:
The triple (encoder_inputs, decoder_inputs, target_weights) for
the constructed batch that has the proper format to call step(...) later.
"""
encoder_size, decoder_size = self.buckets[bucket_id]
encoder_inputs, decoder_inputs = [], []
# Get a random batch of encoder and decoder inputs from data,
# pad them if needed, reverse encoder inputs and add GO to decoder.
for _ in xrange(self.batch_size):
encoder_input, decoder_input = random.choice(data[bucket_id])
# Encoder inputs are padded and then reversed.
encoder_pad = [data_utils.PAD_ID] * (encoder_size - len(encoder_input))
encoder_inputs.append(list(reversed(encoder_input + encoder_pad)))
# Decoder inputs get an extra "GO" symbol, and are padded then.
decoder_pad_size = decoder_size - len(decoder_input) - 1
decoder_inputs.append([data_utils.GO_ID] + decoder_input +
[data_utils.PAD_ID] * decoder_pad_size)
# Now we create batch-major vectors from the data selected above.
batch_encoder_inputs, batch_decoder_inputs, batch_weights = [], [], []
# Batch encoder inputs are just re-indexed encoder_inputs.
for length_idx in xrange(encoder_size):
batch_encoder_inputs.append(
np.array([encoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Batch decoder inputs are re-indexed decoder_inputs, we create weights.
for length_idx in xrange(decoder_size):
batch_decoder_inputs.append(
np.array([decoder_inputs[batch_idx][length_idx]
for batch_idx in xrange(self.batch_size)], dtype=np.int32))
# Create target_weights to be 0 for targets that are padding.
batch_weight = np.ones(self.batch_size, dtype=np.float32)
for batch_idx in xrange(self.batch_size):
# We set weight to 0 if the corresponding target is a PAD symbol.
# The corresponding target is decoder_input shifted by 1 forward.
if length_idx < decoder_size - 1:
target = decoder_inputs[batch_idx][length_idx + 1]
if length_idx == decoder_size - 1 or target == data_utils.PAD_ID:
batch_weight[batch_idx] = 0.0
batch_weights.append(batch_weight)
return batch_encoder_inputs, batch_decoder_inputs, batch_weights
| |
# Copyright (c) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import importutils
storops = importutils.try_import('storops')
if storops:
# pylint: disable=import-error
from storops import exception as storops_ex
from storops.unity import enums
from manila.common import constants as const
from manila import exception
from manila.i18n import _
from manila.share.drivers.dell_emc.common.enas import utils as enas_utils
from manila.share.drivers.dell_emc.plugins.unity import utils
LOG = log.getLogger(__name__)
class UnityClient(object):
def __init__(self, host, username, password):
if storops is None:
LOG.error('StorOps is required to run EMC Unity driver.')
self.system = storops.UnitySystem(host, username, password)
def create_cifs_share(self, resource, share_name):
"""Create CIFS share from the resource.
:param resource: either UnityFilesystem or UnitySnap object
:param share_name: CIFS share name
:return: UnityCifsShare object
"""
try:
return resource.create_cifs_share(share_name)
except storops_ex.UnitySmbShareNameExistedError:
return self.get_share(share_name, 'CIFS')
def create_nfs_share(self, resource, share_name):
"""Create NFS share from the resource.
:param resource: either UnityFilesystem or UnitySnap object
:param share_name: NFS share name
:return: UnityNfsShare object
"""
try:
return resource.create_nfs_share(share_name)
except storops_ex.UnityNfsShareNameExistedError:
return self.get_share(share_name, 'NFS')
def create_nfs_filesystem_and_share(self, pool, nas_server,
share_name, size_gb):
"""Create filesystem and share from pool/NAS server.
:param pool: pool for file system creation
:param nas_server: nas server for file system creation
:param share_name: file system and share name
:param size_gb: file system size
"""
size = utils.gib_to_byte(size_gb)
pool.create_nfs_share(
nas_server, share_name, size, user_cap=True)
def get_share(self, name, share_proto):
# Validate the share protocol
proto = share_proto.upper()
if proto == 'CIFS':
return self.system.get_cifs_share(name=name)
elif proto == 'NFS':
return self.system.get_nfs_share(name=name)
else:
raise exception.BadConfigurationException(
reason=_('Invalid NAS protocol supplied: %s.') % share_proto)
@staticmethod
def delete_share(share):
share.delete()
def create_filesystem(self, pool, nas_server, share_name, size_gb, proto):
try:
size = utils.gib_to_byte(size_gb)
return pool.create_filesystem(nas_server,
share_name,
size,
proto=proto,
user_cap=True)
except storops_ex.UnityFileSystemNameAlreadyExisted:
LOG.debug('Filesystem %s already exists, '
'ignoring filesystem creation.', share_name)
return self.system.get_filesystem(name=share_name)
@staticmethod
def delete_filesystem(filesystem):
try:
filesystem.delete()
except storops_ex.UnityResourceNotFoundError:
LOG.info('Filesystem %s is already removed.', filesystem.name)
def create_nas_server(self, name, sp, pool, tenant=None):
try:
return self.system.create_nas_server(name, sp, pool,
tenant=tenant)
except storops_ex.UnityNasServerNameUsedError:
LOG.info('Share server %s already exists, ignoring share '
'server creation.', name)
return self.get_nas_server(name)
def get_nas_server(self, name):
try:
return self.system.get_nas_server(name=name)
except storops_ex.UnityResourceNotFoundError:
LOG.info('NAS server %s not found.', name)
raise
def delete_nas_server(self, name, username=None, password=None):
tenant = None
try:
nas_server = self.get_nas_server(name=name)
tenant = nas_server.tenant
nas_server.delete(username=username, password=password)
except storops_ex.UnityResourceNotFoundError:
LOG.info('NAS server %s not found.', name)
if tenant is not None:
self._delete_tenant(tenant)
@staticmethod
def _delete_tenant(tenant):
if tenant.nas_servers:
LOG.debug('There are NAS servers belonging to the tenant %s. '
'Do not delete it.',
tenant.get_id())
return
try:
tenant.delete(delete_hosts=True)
except storops_ex.UnityException as ex:
LOG.warning('Delete tenant %(tenant)s failed with error: '
'%(ex)s. Leave the tenant on the system.',
{'tenant': tenant.get_id(),
'ex': ex})
@staticmethod
def create_dns_server(nas_server, domain, dns_ip):
try:
nas_server.create_dns_server(domain, dns_ip)
except storops_ex.UnityOneDnsPerNasServerError:
LOG.info('DNS server %s already exists, '
'ignoring DNS server creation.', domain)
@staticmethod
def create_interface(nas_server, ip_addr, netmask, gateway, port_id,
vlan_id=None, prefix_length=None):
try:
nas_server.create_file_interface(port_id,
ip_addr,
netmask=netmask,
v6_prefix_length=prefix_length,
gateway=gateway,
vlan_id=vlan_id)
except storops_ex.UnityIpAddressUsedError:
raise exception.IPAddressInUse(ip=ip_addr)
@staticmethod
def enable_cifs_service(nas_server, domain, username, password):
try:
nas_server.enable_cifs_service(
nas_server.file_interface,
domain=domain,
domain_username=username,
domain_password=password)
except storops_ex.UnitySmbNameInUseError:
LOG.info('CIFS service on NAS server %s is '
'already enabled.', nas_server.name)
@staticmethod
def enable_nfs_service(nas_server):
try:
nas_server.enable_nfs_service()
except storops_ex.UnityNfsAlreadyEnabledError:
LOG.info('NFS service on NAS server %s is '
'already enabled.', nas_server.name)
@staticmethod
def create_snapshot(filesystem, name):
access_type = enums.FilesystemSnapAccessTypeEnum.CHECKPOINT
try:
return filesystem.create_snap(name, fs_access_type=access_type)
except storops_ex.UnitySnapNameInUseError:
LOG.info('Snapshot %(snap)s on Filesystem %(fs)s already '
'exists.', {'snap': name, 'fs': filesystem.name})
def create_snap_of_snap(self, src_snap, dst_snap_name):
if isinstance(src_snap, str):
snap = self.get_snapshot(name=src_snap)
else:
snap = src_snap
try:
return snap.create_snap(dst_snap_name)
except storops_ex.UnitySnapNameInUseError:
return self.get_snapshot(dst_snap_name)
def get_snapshot(self, name):
return self.system.get_snap(name=name)
@staticmethod
def delete_snapshot(snap):
try:
snap.delete()
except storops_ex.UnityResourceNotFoundError:
LOG.info('Snapshot %s is already removed.', snap.name)
def get_pool(self, name=None):
return self.system.get_pool(name=name)
def get_storage_processor(self, sp_id=None):
sp = self.system.get_sp(sp_id)
if sp_id is None:
# `sp` is a list of SPA and SPB.
return [s for s in sp if s is not None and s.existed]
else:
return sp if sp.existed else None
def cifs_clear_access(self, share_name, white_list=None):
share = self.system.get_cifs_share(name=share_name)
share.clear_access(white_list)
def nfs_clear_access(self, share_name, white_list=None):
share = self.system.get_nfs_share(name=share_name)
share.clear_access(white_list, force_create_host=True)
def cifs_allow_access(self, share_name, user_name, access_level):
share = self.system.get_cifs_share(name=share_name)
if access_level == const.ACCESS_LEVEL_RW:
cifs_access = enums.ACEAccessLevelEnum.WRITE
else:
cifs_access = enums.ACEAccessLevelEnum.READ
share.add_ace(user=user_name, access_level=cifs_access)
def nfs_allow_access(self, share_name, host_ip, access_level):
share = self.system.get_nfs_share(name=share_name)
host_ip = enas_utils.convert_ipv6_format_if_needed(host_ip)
if access_level == const.ACCESS_LEVEL_RW:
share.allow_read_write_access(host_ip, force_create_host=True)
share.allow_root_access(host_ip, force_create_host=True)
else:
share.allow_read_only_access(host_ip, force_create_host=True)
def cifs_deny_access(self, share_name, user_name):
share = self.system.get_cifs_share(name=share_name)
try:
share.delete_ace(user=user_name)
except storops_ex.UnityAclUserNotFoundError:
LOG.debug('ACL User "%(user)s" does not exist.',
{'user': user_name})
def nfs_deny_access(self, share_name, host_ip):
share = self.system.get_nfs_share(name=share_name)
try:
share.delete_access(host_ip)
except storops_ex.UnityHostNotFoundException:
LOG.info('%(host)s access to %(share)s is already removed.',
{'host': host_ip, 'share': share_name})
def get_file_ports(self):
ports = self.system.get_file_port()
link_up_ports = []
for port in ports:
if port.is_link_up and self._is_external_port(port.id):
link_up_ports.append(port)
return link_up_ports
def extend_filesystem(self, fs, new_size_gb):
size = utils.gib_to_byte(new_size_gb)
try:
fs.extend(size, user_cap=True)
except storops_ex.UnityNothingToModifyError:
LOG.debug('The size of the file system %(id)s is %(size)s '
'bytes.', {'id': fs.get_id(), 'size': size})
return size
def shrink_filesystem(self, share_id, fs, new_size_gb):
size = utils.gib_to_byte(new_size_gb)
try:
fs.shrink(size, user_cap=True)
except storops_ex.UnityNothingToModifyError:
LOG.debug('The size of the file system %(id)s is %(size)s '
'bytes.', {'id': fs.get_id(), 'size': size})
except storops_ex.UnityShareShrinkSizeTooSmallError:
LOG.error('The used size of the file system %(id)s is '
'bigger than input shrink size,'
'it may cause date loss.', {'id': fs.get_id()})
raise exception.ShareShrinkingPossibleDataLoss(share_id=share_id)
return size
@staticmethod
def _is_external_port(port_id):
return 'eth' in port_id or '_la' in port_id
def get_tenant(self, name, vlan_id):
if not vlan_id:
# Do not create vlan for flat network
return None
tenant = None
try:
tenant_name = "vlan_%(vlan_id)s_%(name)s" % {'vlan_id': vlan_id,
'name': name}
tenant = self.system.create_tenant(tenant_name, vlans=[vlan_id])
except (storops_ex.UnityVLANUsedByOtherTenantError,
storops_ex.UnityTenantNameInUseError,
storops_ex.UnityVLANAlreadyHasInterfaceError):
with excutils.save_and_reraise_exception() as exc:
tenant = self.system.get_tenant_use_vlan(vlan_id)
if tenant is not None:
LOG.debug("The VLAN %s is already added into a tenant. "
"Use the existing VLAN tenant.", vlan_id)
exc.reraise = False
except storops_ex.SystemAPINotSupported:
LOG.info("This system doesn't support tenant.")
return tenant
def restore_snapshot(self, snap_name):
snap = self.get_snapshot(snap_name)
return snap.restore(delete_backup=True)
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements a FloatWithUnit, which is a subclass of float. It
also defines supported units for some commonly used units for energy, length,
temperature, time and charge. FloatWithUnit also support conversion to one
another, and additions and subtractions perform automatic conversion if
units are detected. An ArrayWithUnit is also implemented, which is a subclass
of numpy's ndarray with similar unit features.
"""
import collections
import numbers
from functools import partial
import numpy as np
import scipy.constants as const
__author__ = "Shyue Ping Ong, Matteo Giantomassi"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong, Matteo Giantomassi"
__status__ = "Production"
__date__ = "Aug 30, 2013"
"""
Some conversion factors
"""
Ha_to_eV = 1 / const.physical_constants["electron volt-hartree relationship"][0]
eV_to_Ha = 1 / Ha_to_eV
Ry_to_eV = Ha_to_eV / 2
amu_to_kg = const.physical_constants["atomic mass unit-kilogram relationship"][0]
mile_to_meters = const.mile
bohr_to_angstrom = const.physical_constants["Bohr radius"][0] * 1e10
bohr_to_ang = bohr_to_angstrom
ang_to_bohr = 1 / bohr_to_ang
"""
Definitions of supported units. Values below are essentially scaling and
conversion factors. What matters is the relative values, not the absolute.
The SI units must have factor 1.
"""
BASE_UNITS = {
"length": {
"m": 1,
"km": 1000,
"mile": mile_to_meters,
"ang": 1e-10,
"cm": 1e-2,
"pm": 1e-12,
"bohr": bohr_to_angstrom * 1e-10,
},
"mass": {
"kg": 1,
"g": 1e-3,
"amu": amu_to_kg,
},
"time": {
"s": 1,
"min": 60,
"h": 3600,
"d": 3600 * 24,
},
"current": {
"A": 1
},
"temperature": {
"K": 1,
},
"amount": {
"mol": 1,
"atom": 1 / const.N_A
},
"intensity": {
"cd": 1
},
"memory": {
"byte": 1,
"Kb": 1024,
"Mb": 1024 ** 2,
"Gb": 1024 ** 3,
"Tb": 1024 ** 4,
},
}
# Accept kb, mb, gb ... as well.
BASE_UNITS["memory"].update({k.lower(): v
for k, v in BASE_UNITS["memory"].items()})
# This current list are supported derived units defined in terms of powers of
# SI base units and constants.
DERIVED_UNITS = {
"energy": {
"eV": {"kg": 1, "m": 2, "s": -2, const.e: 1},
"meV": {"kg": 1, "m": 2, "s": -2, const.e * 1e-3: 1},
"Ha": {"kg": 1, "m": 2, "s": -2, const.e * Ha_to_eV: 1},
"Ry": {"kg": 1, "m": 2, "s": -2, const.e * Ry_to_eV: 1},
"J": {"kg": 1, "m": 2, "s": -2},
"kJ": {"kg": 1, "m": 2, "s": -2, 1000: 1}
},
"charge": {
"C": {"A": 1, "s": 1},
"e": {"A": 1, "s": 1, const.e: 1},
},
"force": {
"N": {"kg": 1, "m": 1, "s": -2},
"KN": {"kg": 1, "m": 1, "s": -2, 1000: 1},
"MN": {"kg": 1, "m": 1, "s": -2, 1e6: 1},
"GN": {"kg": 1, "m": 1, "s": -2, 1e9: 1},
},
"frequency": {
"Hz": {"s": -1},
"KHz": {"s": -1, 1000: 1},
"MHz": {"s": -1, 1e6: 1},
"GHz": {"s": -1, 1e9: 1},
"THz": {"s": -1, 1e12: 1},
},
"pressure": {
"Pa": {"kg": 1, "m": -1, "s": -2},
"KPa": {"kg": 1, "m": -1, "s": -2, 1000: 1},
"MPa": {"kg": 1, "m": -1, "s": -2, 1e6: 1},
"GPa": {"kg": 1, "m": -1, "s": -2, 1e9: 1}
},
"power": {
"W": {"m": 2, "kg": 1, "s": -3},
"KW": {"m": 2, "kg": 1, "s": -3, 1000: 1},
"MW": {"m": 2, "kg": 1, "s": -3, 1e6: 1},
"GW": {"m": 2, "kg": 1, "s": -3, 1e9: 1}
},
"emf": {
"V": {"m": 2, "kg": 1, "s": -3, "A": -1}
},
"capacitance": {
"F": {"m": -2, "kg": -1, "s": 4, "A": 2}
},
"resistance": {
"ohm": {"m": 2, "kg": 1, "s": -3, "A": -2}
},
"conductance": {
"S": {"m": -2, "kg": -1, "s": 3, "A": 2}
},
"magnetic_flux": {
"Wb": {"m": 2, "kg": 1, "s": -2, "A": -1}
},
"cross_section": {
"barn": {"m": 2, 1E-28: 1},
"mbarn": {"m": 2, 1E-31: 1}
}
}
ALL_UNITS = dict(list(BASE_UNITS.items()) + list(DERIVED_UNITS.items())) # type: ignore
SUPPORTED_UNIT_NAMES = tuple([i for d in ALL_UNITS.values() for i in d.keys()])
# Mapping unit name --> unit type (unit names must be unique).
_UNAME2UTYPE = {} # type: ignore
for utype, d in ALL_UNITS.items():
assert not set(d.keys()).intersection(_UNAME2UTYPE.keys())
_UNAME2UTYPE.update({uname: utype for uname in d})
del utype, d
def _get_si_unit(unit):
unit_type = _UNAME2UTYPE[unit]
si_unit = filter(lambda k: BASE_UNITS[unit_type][k] == 1,
BASE_UNITS[unit_type].keys())
return list(si_unit)[0], BASE_UNITS[unit_type][unit]
class UnitError(BaseException):
"""
Exception class for unit errors.
"""
def _check_mappings(u):
for v in DERIVED_UNITS.values():
for k2, v2 in v.items():
if all([v2.get(ku, 0) == vu for ku, vu in u.items()]) and \
all([u.get(kv2, 0) == vv2 for kv2, vv2 in v2.items()]):
return {k2: 1}
return u
class Unit(collections.abc.Mapping):
"""
Represents a unit, e.g., "m" for meters, etc. Supports compound units.
Only integer powers are supported for units.
"""
Error = UnitError
def __init__(self, unit_def):
"""
Constructs a unit.
Args:
unit_def: A definition for the unit. Either a mapping of unit to
powers, e.g., {"m": 2, "s": -1} represents "m^2 s^-1",
or simply as a string "kg m^2 s^-1". Note that the supported
format uses "^" as the power operator and all units must be
space-separated.
"""
if isinstance(unit_def, str):
unit = collections.defaultdict(int)
import re
for m in re.finditer(r"([A-Za-z]+)\s*\^*\s*([\-0-9]*)", unit_def):
p = m.group(2)
p = 1 if not p else int(p)
k = m.group(1)
unit[k] += p
else:
unit = {k: v for k, v in dict(unit_def).items() if v != 0}
self._unit = _check_mappings(unit)
def __mul__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] += v
return Unit(new_units)
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
new_units = collections.defaultdict(int)
for k, v in self.items():
new_units[k] += v
for k, v in other.items():
new_units[k] -= v
return Unit(new_units)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, i):
return Unit({k: v * i for k, v in self.items()})
def __iter__(self):
return self._unit.__iter__()
def __getitem__(self, i):
return self._unit[i]
def __len__(self):
return len(self._unit)
def __repr__(self):
sorted_keys = sorted(self._unit.keys(),
key=lambda k: (-self._unit[k], k))
return " ".join(["{}^{}".format(k, self._unit[k])
if self._unit[k] != 1 else k
for k in sorted_keys if self._unit[k] != 0])
def __str__(self):
return self.__repr__()
@property
def as_base_units(self):
"""
Converts all units to base SI units, including derived units.
Returns:
(base_units_dict, scaling factor). base_units_dict will not
contain any constants, which are gathered in the scaling factor.
"""
b = collections.defaultdict(int)
factor = 1
for k, v in self.items():
derived = False
for d in DERIVED_UNITS.values():
if k in d:
for k2, v2 in d[k].items():
if isinstance(k2, numbers.Number):
factor *= k2 ** (v2 * v)
else:
b[k2] += v2 * v
derived = True
break
if not derived:
si, f = _get_si_unit(k)
b[si] += v
factor *= f ** v
return {k: v for k, v in b.items() if v != 0}, factor
def get_conversion_factor(self, new_unit):
"""
Returns a conversion factor between this unit and a new unit.
Compound units are supported, but must have the same powers in each
unit type.
Args:
new_unit: The new unit.
"""
uo_base, ofactor = self.as_base_units
un_base, nfactor = Unit(new_unit).as_base_units
units_new = sorted(un_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
units_old = sorted(uo_base.items(),
key=lambda d: _UNAME2UTYPE[d[0]])
factor = ofactor / nfactor
for uo, un in zip(units_old, units_new):
if uo[1] != un[1]:
raise UnitError("Units %s and %s are not compatible!" % (uo, un))
c = ALL_UNITS[_UNAME2UTYPE[uo[0]]]
factor *= (c[uo[0]] / c[un[0]]) ** uo[1]
return factor
class FloatWithUnit(float):
"""
Subclasses float to attach a unit type. Typically, you should use the
pre-defined unit type subclasses such as Energy, Length, etc. instead of
using FloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity). Note that FloatWithUnit does not override the eq
method for float, i.e., units are not checked when testing for equality.
The reason is to allow this class to be used transparently wherever floats
are expected.
>>> e = Energy(1.1, "Ha")
>>> a = Energy(1.1, "Ha")
>>> b = Energy(3, "eV")
>>> c = a + b
>>> print(c)
1.2102479761938871 Ha
>>> c.to("eV")
32.932522246000005 eV
"""
Error = UnitError
@classmethod
def from_string(cls, s):
"""
Initialize a FloatWithUnit from a string. Example Memory.from_string("1. Mb")
"""
# Extract num and unit string.
s = s.strip()
for i, char in enumerate(s):
if char.isalpha() or char.isspace():
break
else:
raise Exception("Unit is missing in string %s" % s)
num, unit = float(s[:i]), s[i:]
# Find unit type (set it to None if it cannot be detected)
for unit_type, d in BASE_UNITS.items():
if unit in d:
break
else:
unit_type = None
return cls(num, unit, unit_type=unit_type)
def __new__(cls, val, unit, unit_type=None):
"""Overrides __new__ since we are subclassing a Python primitive/"""
new = float.__new__(cls, val)
new._unit = Unit(unit)
new._unit_type = unit_type
return new
def __init__(self, val, unit, unit_type=None):
"""
Initializes a float with unit.
Args:
val (float): Value
unit (Unit): A unit. E.g., "C".
unit_type (str): A type of unit. E.g., "charge"
"""
if unit_type is not None and str(unit) not in ALL_UNITS[unit_type]:
raise UnitError(
"{} is not a supported unit for {}".format(unit, unit_type))
self._unit = Unit(unit)
self._unit_type = unit_type
def __repr__(self):
return super().__repr__()
def __str__(self):
s = super().__str__()
return "{} {}".format(s, self._unit)
def __add__(self, other):
if not hasattr(other, "unit_type"):
return super().__add__(other)
if other.unit_type != self._unit_type:
raise UnitError("Adding different types of units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) + val, unit_type=self._unit_type,
unit=self._unit)
def __sub__(self, other):
if not hasattr(other, "unit_type"):
return super().__sub__(other)
if other.unit_type != self._unit_type:
raise UnitError("Subtracting different units is not allowed")
val = other
if other.unit != self._unit:
val = other.to(self._unit)
return FloatWithUnit(float(self) - val, unit_type=self._unit_type,
unit=self._unit)
def __mul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other,
unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None,
unit=self._unit * other._unit)
def __rmul__(self, other):
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(float(self) * other,
unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(float(self) * other, unit_type=None,
unit=self._unit * other._unit)
def __pow__(self, i):
return FloatWithUnit(float(self) ** i, unit_type=None,
unit=self._unit ** i)
def __truediv__(self, other):
val = super().__truediv__(other)
if not isinstance(other, FloatWithUnit):
return FloatWithUnit(val, unit_type=self._unit_type,
unit=self._unit)
return FloatWithUnit(val, unit_type=None,
unit=self._unit / other._unit)
def __neg__(self):
return FloatWithUnit(super().__neg__(),
unit_type=self._unit_type,
unit=self._unit)
def __getnewargs__(self):
"""Function used by pickle to recreate object."""
# print(self.__dict__)
# FIXME
# There's a problem with _unit_type if we try to unpickle objects from file.
# since self._unit_type might not be defined. I think this is due to
# the use of decorators (property and unitized). In particular I have problems with "amu"
# likely due to weight in core.composition
if hasattr(self, "_unit_type"):
args = float(self), self._unit, self._unit_type
else:
args = float(self), self._unit, None
return args
def __getstate__(self):
state = self.__dict__.copy()
state["val"] = float(self)
# print("in getstate %s" % state)
return state
def __setstate__(self, state):
# print("in setstate %s" % state)
self._unit = state["_unit"]
@property
def unit_type(self) -> str:
"""
:return: The type of unit. Energy, Charge, etc.
"""
return self._unit_type
@property
def unit(self) -> str:
"""
:return: The unit, e.g., "eV".
"""
return self._unit
def to(self, new_unit):
"""
Conversion to a new_unit. Right now, only supports 1 to 1 mapping of
units of each type.
Args:
new_unit: New unit type.
Returns:
A FloatWithUnit object in the new units.
Example usage:
>>> e = Energy(1.1, "eV")
>>> e = Energy(1.1, "Ha")
>>> e.to("eV")
29.932522246 eV
"""
return FloatWithUnit(
self * self.unit.get_conversion_factor(new_unit),
unit_type=self._unit_type,
unit=new_unit)
@property
def as_base_units(self):
"""
Returns this FloatWithUnit in base SI units, including derived units.
Returns:
A FloatWithUnit object in base SI units
"""
return self.to(self.unit.as_base_units[0])
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return tuple(ALL_UNITS[self._unit_type].keys())
class ArrayWithUnit(np.ndarray):
"""
Subclasses `numpy.ndarray` to attach a unit type. Typically, you should
use the pre-defined unit type subclasses such as EnergyArray,
LengthArray, etc. instead of using ArrayWithFloatWithUnit directly.
Supports conversion, addition and subtraction of the same unit type. E.g.,
1 m + 20 cm will be automatically converted to 1.2 m (units follow the
leftmost quantity).
>>> a = EnergyArray([1, 2], "Ha")
>>> b = EnergyArray([1, 2], "eV")
>>> c = a + b
>>> print(c)
[ 1.03674933 2.07349865] Ha
>>> c.to("eV")
array([ 28.21138386, 56.42276772]) eV
"""
Error = UnitError
def __new__(cls, input_array, unit, unit_type=None):
"""
Override __new__.
"""
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attributes to the created instance
obj._unit = Unit(unit)
obj._unit_type = unit_type
return obj
def __array_finalize__(self, obj):
"""
See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html for
comments.
"""
if obj is None:
return
self._unit = getattr(obj, "_unit", None)
self._unit_type = getattr(obj, "_unit_type", None)
@property
def unit_type(self) -> str:
"""
:return: The type of unit. Energy, Charge, etc.
"""
return self._unit_type
@property
def unit(self) -> str:
"""
:return: The unit, e.g., "eV".
"""
return self._unit
def __reduce__(self):
# print("in reduce")
reduce = list(super().__reduce__())
# print("unit",self._unit)
# print(reduce[2])
reduce[2] = {"np_state": reduce[2], "_unit": self._unit}
return tuple(reduce)
def __setstate__(self, state):
# print("in setstate %s" % str(state))
super().__setstate__(state["np_state"])
self._unit = state["_unit"]
def __repr__(self):
return "{} {}".format(np.array(self).__repr__(), self.unit)
def __str__(self):
return "{} {}".format(np.array(self).__str__(), self.unit)
def __add__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Adding different types of units is"
" not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) + np.array(other),
unit_type=self.unit_type, unit=self.unit)
def __sub__(self, other):
if hasattr(other, "unit_type"):
if other.unit_type != self.unit_type:
raise UnitError("Subtracting different units is not allowed")
if other.unit != self.unit:
other = other.to(self.unit)
return self.__class__(np.array(self) - np.array(other),
unit_type=self.unit_type, unit=self.unit)
def __mul__(self, other):
# FIXME
# Here we have the most important difference between FloatWithUnit and
# ArrayWithFloatWithUnit:
# If other does not have units, I return an object with the same units
# as self.
# if other *has* units, I return an object *without* units since
# taking into account all the possible derived quantities would be
# too difficult.
# Moreover Energy(1.0) * Time(1.0, "s") returns 1.0 Ha that is a
# bit misleading.
# Same protocol for __div__
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__mul__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
# Cannot use super since it returns an instance of self.__class__
# while here we want a bare numpy array.
return self.__class__(
np.array(self).__mul__(np.array(other)),
unit=self.unit * other.unit)
def __rmul__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__rmul__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
return self.__class__(
np.array(self).__rmul__(np.array(other)),
unit=self.unit * other.unit)
def __div__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__div__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
return self.__class__(
np.array(self).__div__(np.array(other)),
unit=self.unit / other.unit)
def __truediv__(self, other):
if not hasattr(other, "unit_type"):
return self.__class__(np.array(self).__truediv__(np.array(other)),
unit_type=self._unit_type, unit=self._unit)
return self.__class__(
np.array(self).__truediv__(np.array(other)),
unit=self.unit / other.unit)
def __neg__(self):
return self.__class__(np.array(self).__neg__(),
unit_type=self.unit_type, unit=self.unit)
def to(self, new_unit):
"""
Conversion to a new_unit.
Args:
new_unit:
New unit type.
Returns:
A ArrayWithFloatWithUnit object in the new units.
Example usage:
>>> e = EnergyArray([1, 1.1], "Ha")
>>> e.to("eV")
array([ 27.21138386, 29.93252225]) eV
"""
return self.__class__(
np.array(self) * self.unit.get_conversion_factor(new_unit),
unit_type=self.unit_type, unit=new_unit)
@property
def as_base_units(self):
"""
Returns this ArrayWithUnit in base SI units, including derived units.
Returns:
An ArrayWithUnit object in base SI units
"""
return self.to(self.unit.as_base_units[0])
# TODO abstract base class property?
@property
def supported_units(self):
"""
Supported units for specific unit type.
"""
return ALL_UNITS[self.unit_type]
# TODO abstract base class method?
def conversions(self):
"""
Returns a string showing the available conversions.
Useful tool in interactive mode.
"""
return "\n".join(str(self.to(unit)) for unit in self.supported_units)
def _my_partial(func, *args, **kwargs):
"""
Partial returns a partial object and therefore we cannot inherit class
methods defined in FloatWithUnit. This function calls partial and patches
the new class before returning.
"""
newobj = partial(func, *args, **kwargs)
# monkey patch
newobj.from_string = FloatWithUnit.from_string
return newobj
Energy = partial(FloatWithUnit, unit_type="energy")
"""
A float with an energy unit.
Args:
val (float): Value
unit (Unit): E.g., eV, kJ, etc. Must be valid unit or UnitError is raised.
"""
EnergyArray = partial(ArrayWithUnit, unit_type="energy")
Length = partial(FloatWithUnit, unit_type="length")
"""
A float with a length unit.
Args:
val (float): Value
unit (Unit): E.g., m, ang, bohr, etc. Must be valid unit or UnitError is
raised.
"""
LengthArray = partial(ArrayWithUnit, unit_type="length")
Mass = partial(FloatWithUnit, unit_type="mass")
"""
A float with a mass unit.
Args:
val (float): Value
unit (Unit): E.g., amu, kg, etc. Must be valid unit or UnitError is
raised.
"""
MassArray = partial(ArrayWithUnit, unit_type="mass")
Temp = partial(FloatWithUnit, unit_type="temperature")
"""
A float with a temperature unit.
Args:
val (float): Value
unit (Unit): E.g., K. Only K (kelvin) is supported.
"""
TempArray = partial(ArrayWithUnit, unit_type="temperature")
Time = partial(FloatWithUnit, unit_type="time")
"""
A float with a time unit.
Args:
val (float): Value
unit (Unit): E.g., s, min, h. Must be valid unit or UnitError is
raised.
"""
TimeArray = partial(ArrayWithUnit, unit_type="time")
Charge = partial(FloatWithUnit, unit_type="charge")
"""
A float with a charge unit.
Args:
val (float): Value
unit (Unit): E.g., C, e (electron charge). Must be valid unit or UnitError
is raised.
"""
ChargeArray = partial(ArrayWithUnit, unit_type="charge")
Memory = _my_partial(FloatWithUnit, unit_type="memory")
"""
A float with a memory unit.
Args:
val (float): Value
unit (Unit): E.g., Kb, Mb, Gb, Tb. Must be valid unit or UnitError
is raised.
"""
def obj_with_unit(obj, unit):
"""
Returns a `FloatWithUnit` instance if obj is scalar, a dictionary of
objects with units if obj is a dict, else an instance of
`ArrayWithFloatWithUnit`.
Args:
unit: Specific units (eV, Ha, m, ang, etc.).
"""
unit_type = _UNAME2UTYPE[unit]
if isinstance(obj, numbers.Number):
return FloatWithUnit(obj, unit=unit, unit_type=unit_type)
if isinstance(obj, collections.Mapping):
return {k: obj_with_unit(v, unit) for k, v in obj.items()}
return ArrayWithUnit(obj, unit=unit, unit_type=unit_type)
def unitized(unit):
"""
Useful decorator to assign units to the output of a function. You can also
use it to standardize the output units of a function that already returns
a FloatWithUnit or ArrayWithUnit. For sequences, all values in the sequences
are assigned the same unit. It works with Python sequences only. The creation
of numpy arrays loses all unit information. For mapping types, the values
are assigned units.
Args:
unit: Specific unit (eV, Ha, m, ang, etc.).
Example usage::
@unitized(unit="kg")
def get_mass():
return 123.45
"""
def wrap(f):
def wrapped_f(*args, **kwargs):
val = f(*args, **kwargs)
unit_type = _UNAME2UTYPE[unit]
if isinstance(val, (FloatWithUnit, ArrayWithUnit)):
return val.to(unit)
if isinstance(val, collections.abc.Sequence):
# TODO: why don't we return a ArrayWithUnit?
# This complicated way is to ensure the sequence type is
# preserved (list or tuple).
return val.__class__([FloatWithUnit(i, unit_type=unit_type,
unit=unit) for i in val])
if isinstance(val, collections.abc.Mapping):
for k, v in val.items():
val[k] = FloatWithUnit(v, unit_type=unit_type, unit=unit)
elif isinstance(val, numbers.Number):
return FloatWithUnit(val, unit_type=unit_type, unit=unit)
elif val is None:
pass
else:
raise TypeError("Don't know how to assign units to %s" % str(val))
return val
return wrapped_f
return wrap
if __name__ == "__main__":
import doctest
doctest.testmod()
| |
"""
Flask-Cloudy
"""
import os
import datetime
import base64
import hmac
import hashlib
import warnings
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
from importlib import import_module
from flask import send_file, abort, url_for, request
import shortuuid
from libcloud.storage.types import Provider, ObjectDoesNotExistError
from libcloud.storage.providers import DRIVERS, get_driver
from libcloud.storage.base import Object as BaseObject, StorageDriver
from libcloud.storage.drivers import local
from six.moves.urllib.parse import urlparse, urlunparse, urljoin, urlencode
import slugify
SERVER_ENDPOINT = "FLASK_CLOUDY_SERVER"
EXTENSIONS = {
"TEXT": ["txt", "md"],
"DOCUMENT": ["rtf", "odf", "ods", "gnumeric", "abw", "doc", "docx", "xls", "xlsx"],
"IMAGE": ["jpg", "jpeg", "jpe", "png", "gif", "svg", "bmp", "webp"],
"AUDIO": ["wav", "mp3", "aac", "ogg", "oga", "flac"],
"DATA": ["csv", "ini", "json", "plist", "xml", "yaml", "yml"],
"SCRIPT": ["js", "php", "pl", "py", "rb", "sh"],
"ARCHIVE": ["gz", "bz2", "zip", "tar", "tgz", "txz", "7z"]
}
ALL_EXTENSIONS = EXTENSIONS["TEXT"] \
+ EXTENSIONS["DOCUMENT"] \
+ EXTENSIONS["IMAGE"] \
+ EXTENSIONS["AUDIO"] \
+ EXTENSIONS["DATA"] \
+ EXTENSIONS["ARCHIVE"]
class InvalidExtensionError(Exception):
pass
def get_file_name(filename):
"""
Return the filename without the path
:param filename:
:return: str
"""
return os.path.basename(filename)
def get_file_extension(filename):
"""
Return a file extension
:param filename:
:return: str
"""
return os.path.splitext(filename)[1][1:].lower()
def get_file_extension_type(filename):
"""
Return the group associated to the file
:param filename:
:return: str
"""
ext = get_file_extension(filename)
if ext:
for name, group in EXTENSIONS.items():
if ext in group:
return name
return "OTHER"
def get_driver_class(provider):
"""
Return the driver class
:param provider: str - provider name
:return:
"""
if "." in provider:
parts = provider.split('.')
kls = parts.pop()
path = '.'.join(parts)
module = import_module(path)
if not hasattr(module, kls):
raise ImportError('{0} provider not found at {1}'.format(
kls,
path))
driver = getattr(module, kls)
else:
driver = getattr(Provider, provider.upper())
return get_driver(driver)
def get_provider_name(driver):
"""
Return the provider name from the driver class
:param driver: obj
:return: str
"""
kls = driver.__class__.__name__
for d, prop in DRIVERS.items():
if prop[1] == kls:
return d
return None
class Storage(object):
container = None
driver = None
config = {}
allowed_extensions = EXTENSIONS["TEXT"] \
+ EXTENSIONS["DOCUMENT"] \
+ EXTENSIONS["IMAGE"] \
+ EXTENSIONS["AUDIO"] \
+ EXTENSIONS["DATA"]
def __init__(self,
provider=None,
key=None,
secret=None,
container=None,
allowed_extensions=None,
app=None,
**kwargs):
"""
Initiate the storage
:param provider: str - provider name
:param key: str - provider key
:param secret: str - provider secret
:param container: str - the name of the container (bucket or a dir name if local)
:param allowed_extensions: list - extensions allowed for upload
:param app: object - Flask instance
:param kwargs: any other params will pass to the provider initialization
:return:
"""
if app:
self.init_app(app)
if provider:
if allowed_extensions:
self.allowed_extensions = allowed_extensions
kwparams = {
"key": key,
"secret": secret
}
if "local" in provider.lower():
kwparams["key"] = container
container = ""
kwparams.update(kwargs)
self.driver = get_driver_class(provider)(**kwparams)
if not isinstance(self.driver, StorageDriver):
raise AttributeError("Invalid Driver")
self.container = self.driver.get_container(container)
def __iter__(self):
"""
ie: `for item in storage`
Iterate over all the objects in the container
:return: generator
"""
for obj in self.container.iterate_objects():
yield Object(obj=obj)
def __len__(self):
"""
ie: `len(storage)`
Return the total objects in the container
:return: int
"""
return len(self.container.list_objects())
def __contains__(self, object_name):
"""
ie: `if name in storage` or `if name not in storage`
Test if object exists
:param object_name: the object name
:return bool:
"""
try:
self.driver.get_object(self.container.name, object_name)
return True
except ObjectDoesNotExistError:
return False
def init_app(self, app):
"""
To initiate with Flask
:param app: Flask object
:return:
"""
provider = app.config.get("STORAGE_PROVIDER", None)
key = app.config.get("STORAGE_KEY", None)
secret = app.config.get("STORAGE_SECRET", None)
container = app.config.get("STORAGE_CONTAINER", None)
allowed_extensions = app.config.get("STORAGE_ALLOWED_EXTENSIONS", None)
serve_files = app.config.get("STORAGE_SERVER", False)
serve_files_url = app.config.get("STORAGE_SERVER_URL", "files")
self.config["serve_files"] = serve_files
self.config["serve_files_url"] = serve_files_url
if not provider:
raise ValueError("'STORAGE_PROVIDER' is missing")
if provider.upper() == "LOCAL":
if not os.path.isdir(container):
raise IOError("Local Container (directory) '%s' is not a "
"directory or doesn't exist for LOCAL provider" % container)
self.__init__(provider=provider,
key=key,
secret=secret,
container=container,
allowed_extensions=allowed_extensions)
self._register_file_server(app)
def get(self, object_name):
"""
Return an object or None if it doesn't exist
:param object_name:
:return: Object
"""
if object_name in self:
return Object(obj=self.container.get_object(object_name))
return None
def create(self, object_name, size=0, hash=None, extra=None, meta_data=None):
"""
create a new object
:param object_name:
:param size:
:param hash:
:param extra:
:param meta_data:
:return: Object
"""
obj = BaseObject(container=self.container,
driver=self.driver,
name=object_name,
size=size,
hash=hash,
extra=extra,
meta_data=meta_data)
return Object(obj=obj)
def upload(self,
file,
name=None,
prefix=None,
allowed_extensions=None,
overwrite=False,
public=False,
**kwargs):
"""
To upload file
:param file: FileStorage object or string location
:param name: The name of the object.
:param prefix: A prefix for the object. Can be in the form of directory tree
:param allowed_extensions: list of extensions to allow
:param overwrite: bool - To overwrite if file exists
:param public: bool - To set acl to private or public-read. Having acl in kwargs will override it
:param kwargs: extra params: ie: acl, meta_data etc.
:return: Object
"""
if "acl" not in kwargs:
kwargs["acl"] = "public-read" if public else "private"
extra = kwargs
# coming from an upload object
if isinstance(file, FileStorage):
extension = get_file_extension(file.filename)
if not name:
fname = get_file_name(file.filename).split("." + extension)[0]
name = slugify.slugify(fname)
else:
extension = get_file_extension(file)
if not name:
name = get_file_name(file)
if len(get_file_extension(name).strip()) == 0:
name += "." + extension
name = name.strip("/").strip()
if isinstance(self.driver, local.LocalStorageDriver):
name = secure_filename(name)
if prefix:
name = prefix.strip("/") + "/" + name
if not overwrite:
name = self._safe_object_name(name)
if not allowed_extensions:
allowed_extensions = self.allowed_extensions
if extension.lower() not in allowed_extensions:
raise InvalidExtensionError("Invalid file extension: '.%s' " % extension)
if isinstance(file, FileStorage):
obj = self.container.upload_object_via_stream(iterator=file,
object_name=name,
extra=extra)
else:
obj = self.container.upload_object(file_path=file,
object_name=name,
extra=extra)
return Object(obj=obj)
def _safe_object_name(self, object_name):
""" Add a UUID if to a object name if it exists. To prevent overwrites
:param object_name:
:return str:
"""
extension = get_file_extension(object_name)
file_name = os.path.splitext(object_name)[0]
while object_name in self:
uuid = shortuuid.uuid()
object_name = "%s__%s.%s" % (file_name, uuid, extension)
return object_name
def _register_file_server(self, app):
"""
File server
Only local files can be served
It's recommended to serve static files through NGINX instead of Python
Use this for development only
:param app: Flask app instance
"""
if isinstance(self.driver, local.LocalStorageDriver) \
and self.config["serve_files"]:
server_url = self.config["serve_files_url"].strip("/").strip()
if server_url:
url = "/%s/<path:object_name>" % server_url
@app.route(url, endpoint=SERVER_ENDPOINT)
def files_server(object_name):
obj = self.get(object_name)
if obj:
dl = request.args.get("dl")
name = request.args.get("name", obj.name)
if get_file_extension(name) != obj.extension:
name += ".%s" % obj.extension
_url = obj.get_cdn_url()
return send_file(_url,
as_attachment=True if dl else False,
attachment_filename=name,
conditional=True)
else:
abort(404)
else:
warnings.warn("Flask-Cloudy can't serve files. 'STORAGE_SERVER_FILES_URL' is not set")
class Object(object):
"""
The object file
@property
name
size
hash
extra
meta_data
driver
container
@method
download() use save_to() instead
delete()
"""
_obj = None
def __init__(self, obj, **kwargs):
self._obj = obj
self._kwargs = kwargs
def __getattr__(self, item):
return getattr(self._obj, item)
def __len__(self):
return self.size
def get_url(self, secure=False, longurl=False):
"""
Return the url
:param secure: bool - To use https
:param longurl: bool - On local, reference the local path with the domain
ie: http://site.com/files/object.png otherwise /files/object.png
:return: str
"""
driver_name = self.driver.name.lower()
try:
# Currently only Cloudfiles and Local supports it
url = self._obj.get_cdn_url()
if "local" in driver_name:
url = url_for(SERVER_ENDPOINT,
object_name=self.name,
_external=longurl)
except NotImplementedError as e:
object_path = '%s/%s' % (self.container.name, self.name)
if 's3' in driver_name:
base_url = 'http://%s' % self.driver.connection.host
url = urljoin(base_url, object_path)
elif 'google' in driver_name:
url = urljoin('http://storage.googleapis.com', object_path)
elif 'azure' in driver_name:
base_url = ('http://%s.blob.core.windows.net' % self.driver.key)
url = urljoin(base_url, object_path)
else:
raise e
if secure:
if 'cloudfiles' in driver_name:
parsed_url = urlparse(url)
if parsed_url.scheme != 'http':
return url
split_netloc = parsed_url.netloc.split('.')
split_netloc[1] = 'ssl'
url = urlunparse(
'https',
'.'.join(split_netloc),
parsed_url.path,
parsed_url.params, parsed_url.query,
parsed_url.fragment
)
if ('s3' in driver_name or
'google' in driver_name or
'azure' in driver_name):
url = url.replace('http://', 'https://')
return url
@property
def url(self):
"""
Returns the url of the object.
For Local it will return it without the domain name
:return: str
"""
return self.get_url()
@property
def full_url(self):
"""
Returns the full url with the domain, specially for Local storage
:return: str
"""
return self.get_url(longurl=True)
@property
def secure_url(self):
"""
Return the full url with https
:return:
"""
return self.get_url(secure=True, longurl=True)
@property
def extension(self):
"""
Return the extension of the object
:return:
"""
return get_file_extension(self.name)
@property
def type(self):
"""
Return the object type (IMAGE, AUDIO,...) or OTHER
:return:
"""
return get_file_extension_type(self.name)
@property
def provider_name(self):
"""
Return the provider name
:return: str
"""
return get_provider_name(self.driver)
@property
def path(self):
"""
Return the object path
:return: str
"""
return "%s/%s" % (self.container.name, self.name)
def save_to(self, destination, name=None, overwrite=False, delete_on_failure=True):
"""
To save the object in a local path
:param destination: str - The directory to save the object to
:param name: str - To rename the file name. Do not add extesion
:param overwrite:
:param delete_on_failure:
:return: The new location of the file or None
"""
if not os.path.isdir(destination):
raise IOError("'%s' is not a valid directory")
obj_path = "%s/%s" % (destination, self._obj.name)
if name:
obj_path = "%s/%s.%s" % (destination, name, self.extension)
file = self._obj.download(obj_path,
overwrite_existing=overwrite,
delete_on_failure=delete_on_failure)
return obj_path if file else None
def download_url(self, timeout=60, name=None):
"""
Trigger a browse download
:param timeout: int - Time in seconds to expire the download
:param name: str - for LOCAL only, to rename the file being downloaded
:return: str
"""
if "local" in self.driver.name.lower():
return url_for(SERVER_ENDPOINT,
object_name=self.name,
dl=1,
name=name,
_external=True)
else:
driver_name = self.driver.name.lower()
expires = (datetime.datetime.now()
+ datetime.timedelta(seconds=timeout)).strftime("%s")
if 's3' in driver_name or 'google' in driver_name:
s2s = "GET\n\n\n{expires}\n/{object_name}"\
.format(expires=expires, object_name=self.path)
h = hmac.new(self.driver.secret, s2s, hashlib.sha1)
s = base64.encodestring(h.digest()).strip()
_keyIdName = "AWSAccessKeyId" if "s3" in driver_name else "GoogleAccessId"
params = {
_keyIdName: self.driver.key,
"Expires": expires,
"Signature": s
}
urlkv = urlencode(params)
return "%s?%s" % (self.secure_url, urlkv)
elif 'cloudfiles' in driver_name:
return self.driver.ex_get_object_temp_url(self._obj,
method="GET",
timeout=expires)
else:
raise NotImplemented("This provider '%s' doesn't support or "
"doesn't have a signed url "
"implemented yet" % self.provider_name)
@property
def short_url(self):
"""
DEPRECATED
Returns the url of the object
For local it will return it WITHOUT the domain name
:return:
"""
warnings.warn("DEPRECATED: flask_cloudy.Object.short_url has been deprecated, use flask_cloudy.Object.url or flask_cloudy.Object.full_url")
return self.get_url()
| |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
import os
import pytest
import numpy as np
from pyspark.sql.types import ArrayType, DoubleType
from bigdl.orca import init_orca_context, stop_orca_context
from bigdl.orca.data import SparkXShards
from bigdl.orca.data.image.utils import chunks
from bigdl.orca.learn.utils import convert_predict_rdd_to_dataframe, _dataframe_to_xshards, \
convert_predict_xshards_to_dataframe, convert_predict_rdd_to_xshard, update_predict_xshards
resource_path = os.path.join(os.path.split(__file__)[0], "../resources")
class TestUtil(TestCase):
def setUp(self):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
self.sc = init_orca_context(cores=4)
def to_array_(v):
return v.toArray().tolist()
def flatten_(v):
result = []
for elem in v:
result.extend(elem.toArray().tolist())
return result
from pyspark.sql import SparkSession
spark = SparkSession(self.sc)
spark.udf.register("to_array", to_array_, ArrayType(DoubleType()))
spark.udf.register("flatten", flatten_, ArrayType(DoubleType()))
def tearDown(self):
""" teardown any state that was previously setup with a setup_method
call.
"""
stop_orca_context()
def test_convert_predict_rdd_to_dataframe(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
pred_rdd = rdd.map(lambda x: np.array([float(x)] * 50))
result_df = convert_predict_rdd_to_dataframe(df, pred_rdd)
expr = "sum(cast(feature <> to_array(prediction) as int)) as error"
assert result_df.selectExpr(expr).first()["error"] == 0
def test_convert_predict_rdd_to_dataframe_multi_output(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
pred_rdd = rdd.map(lambda x: [np.array([float(x)] * 25), np.array([float(x)] * 25)])
result_df = convert_predict_rdd_to_dataframe(df, pred_rdd)
expr = "sum(cast(feature <> flatten(prediction) as int)) as error"
assert result_df.selectExpr(expr).first()["error"] == 0
def test_convert_predict_rdd_to_xshard(self):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x]*50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(lambda x: {"x": np.stack(x)})
shards = SparkXShards(shards)
pred_rdd = self.sc.range(0, 110).map(lambda x: np.array([x]*50))
result_shards = convert_predict_rdd_to_xshard(shards, pred_rdd)
result = np.concatenate([shard["prediction"] for shard in result_shards.collect()])
expected_result = np.concatenate([shard["x"] for shard in result_shards.collect()])
assert np.array_equal(result, expected_result)
def test_convert_predict_rdd_to_xshard_multi_output(self):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x]*50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(lambda x: {"x": np.stack(x)})
shards = SparkXShards(shards)
pred_rdd = self.sc.range(0, 110).map(lambda x: [np.array([x]*24), np.array([x]*26)])
result_shards = convert_predict_rdd_to_xshard(shards, pred_rdd)
result = np.concatenate([np.concatenate(shard["prediction"], axis=1)
for shard in result_shards.collect()])
expected_result = np.concatenate([shard["x"] for shard in result_shards.collect()])
assert np.array_equal(result, expected_result)
def test_update_predict_xshard(self):
def get_xshards(key):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x] * 50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(
lambda x: {key: np.stack(x)})
shards = SparkXShards(shards)
return shards
data_shards = get_xshards("x")
pred_shards = get_xshards("prediction")
result_shards = update_predict_xshards(data_shards, pred_shards)
result = np.concatenate([shard["prediction"] for shard in result_shards.collect()])
expected_result = np.concatenate([shard["x"] for shard in result_shards.collect()])
assert np.array_equal(result, expected_result)
def test_update_predict_xshard_multi_output(self):
def get_data_xshards(key):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x] * 50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(
lambda x: {key: np.stack(x)})
shards = SparkXShards(shards)
return shards
def get_pred_xshards(key):
rdd = self.sc.range(0, 110).map(lambda x: np.array([x] * 50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(
lambda x: {key: np.stack(x)}).map(lambda x: {key: [x[key][:, :24], x[key][:, 24:]]})
shards = SparkXShards(shards)
return shards
data_shards = get_data_xshards("x")
pred_shards = get_pred_xshards("prediction")
result_shards = update_predict_xshards(data_shards, pred_shards)
result = np.concatenate([np.concatenate(shard["prediction"], axis=1)
for shard in result_shards.collect()])
expected_result = np.concatenate([shard["x"] for shard in result_shards.collect()])
assert np.array_equal(result, expected_result)
def test_convert_predict_xshards_to_dataframe(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
pred_shards = _dataframe_to_xshards(df, feature_cols=["feature"]).transform_shard(
lambda x: {"prediction": x["x"]})
result_df = convert_predict_xshards_to_dataframe(df, pred_shards)
expr = "sum(cast(feature <> to_array(prediction) as int)) as error"
assert result_df.selectExpr(expr).first()["error"] == 0
def test_convert_predict_xshards_to_dataframe_multi_output(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
pred_shards = _dataframe_to_xshards(df, feature_cols=["feature"]).transform_shard(
lambda x: {"prediction": [x["x"][:, :25], x["x"][:, 25:]]})
result_df = convert_predict_xshards_to_dataframe(df, pred_shards)
expr = "sum(cast(feature <> flatten(prediction) as int)) as error"
assert result_df.selectExpr(expr).first()["error"] == 0
def test_array2dict(self):
from bigdl.orca.learn.utils import arrays2dict
record_num = 100
shard_size = 30
data = [(np.float32(np.random.randn(1, 50)), np.float32([np.random.randint(0, 2,)]))
for i in range(record_num)]
result = arrays2dict(data, feature_cols=["feature"], label_cols=["label"],
shard_size=shard_size)
for i, d in enumerate(result):
if (record_num % shard_size == 0) or (i != record_num // shard_size):
assert d['x'].shape[0] == shard_size
assert d['y'].shape[0] == shard_size
else:
assert d['x'].shape[0] == record_num % shard_size
assert d['y'].shape[0] == record_num % shard_size
def test_array2dict_shard_size_none(self):
from bigdl.orca.learn.utils import arrays2dict
record_num = 100
data = [(np.float32(np.random.randn(1, 50)), np.float32([np.random.randint(0, 2,)]))
for i in range(record_num)]
result = arrays2dict(data, feature_cols=["feature"], label_cols=["label"], shard_size=None)
for i, d in enumerate(result):
assert d['x'].shape[0] == record_num
assert d['y'].shape[0] == record_num
def test_dataframe_to_xshards(self):
rdd = self.sc.range(0, 100)
df = rdd.map(lambda x: ([float(x)] * 50,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
num_partitions = df.rdd.getNumPartitions()
# test shard_size = None
shards = _dataframe_to_xshards(df, feature_cols=["feature"], label_cols=["label"])
num_shards = shards.rdd.count()
assert num_shards == num_partitions
from bigdl.orca import OrcaContext
OrcaContext._shard_size = 1
shards = _dataframe_to_xshards(df, feature_cols=["feature"], label_cols=["label"])
num_shards = shards.rdd.count()
assert num_shards == df.rdd.count()
OrcaContext._shard_size = None
if __name__ == "__main__":
pytest.main([__file__])
| |
"""
Operation for decentralized buildings
"""
import time
from math import ceil
import numpy as np
import pandas as pd
from itertools import repeat
import cea.config
import cea.inputlocator
from cea.optimization.master.emissions_model import calc_emissions_Whyr_to_tonCO2yr
import cea.technologies.boiler as boiler
import cea.technologies.burner as burner
import cea.technologies.chiller_absorption as chiller_absorption
import cea.technologies.chiller_vapor_compression as chiller_vapor_compression
import cea.technologies.cooling_tower as cooling_tower
import cea.technologies.direct_expansion_units as dx
import cea.technologies.solar.solar_collector as solar_collector
import cea.technologies.substation as substation
from cea.constants import HEAT_CAPACITY_OF_WATER_JPERKGK
from cea.optimization.constants import (T_GENERATOR_FROM_FP_C, T_GENERATOR_FROM_ET_C,
Q_LOSS_DISCONNECTED, ACH_TYPE_SINGLE, VCC_CODE_DECENTRALIZED)
from cea.optimization.lca_calculations import LcaCalculations
from cea.optimization.preprocessing.decentralized_buildings_heating import get_unique_keys_from_dicts
from cea.technologies.thermal_network.thermal_network import calculate_ground_temperature
from cea.technologies.supply_systems_database import SupplySystemsDatabase
import cea.utilities.parallel
def disconnected_buildings_cooling_main(locator, building_names, total_demand, config, prices, lca):
"""
Computes the parameters for the operation of disconnected buildings output results in csv files.
There is no optimization at this point. The different cooling energy supply system configurations are calculated
and compared 1 to 1 to each other. it is a classical combinatorial problem.
The six supply system configurations include:
(VCC: Vapor Compression Chiller, ACH: Absorption Chiller, CT: Cooling Tower, Boiler)
(AHU: Air Handling Units, ARU: Air Recirculation Units, SCU: Sensible Cooling Units)
- config 0: Direct Expansion / Mini-split units (NOTE: this configuration is not fully built yet)
- config 1: VCC_to_AAS (AHU + ARU + SCU) + CT
- config 2: FP + single-effect ACH_to_AAS (AHU + ARU + SCU) + Boiler + CT
- config 3: ET + single-effect ACH_to_AAS (AHU + ARU + SCU) + Boiler + CT
- config 4: VCC_to_AA (AHU + ARU) + VCC_to_S (SCU) + CT
- config 5: VCC_to_AA (AHU + ARU) + single effect ACH_S (SCU) + CT + Boiler
Note:
1. Only cooling supply configurations are compared here. The demand for electricity is supplied from the grid,
and the demand for domestic hot water is supplied from electric boilers.
2. Single-effect chillers are coupled with flat-plate solar collectors, and the double-effect chillers are coupled
with evacuated tube solar collectors.
:param locator: locator class with paths to input/output files
:param building_names: list with names of buildings
:param config: cea.config
:param prices: prices class
:return: one .csv file with results of operations of disconnected buildings; one .csv file with operation of the
best configuration (Cost, CO2, Primary Energy)
"""
t0 = time.perf_counter()
supply_systems = SupplySystemsDatabase(locator)
n = len(building_names)
cea.utilities.parallel.vectorize(disconnected_cooling_for_building, config.get_number_of_processes())(
building_names,
repeat(supply_systems, n),
repeat(lca, n),
repeat(locator, n),
repeat(prices, n),
repeat(total_demand, n))
print(time.perf_counter() - t0, "seconds process time for the decentralized Building Routine \n")
def disconnected_cooling_for_building(building_name, supply_systems, lca, locator, prices, total_demand):
chiller_prop = supply_systems.Absorption_chiller
boiler_cost_data = supply_systems.Boiler
scale = 'BUILDING'
VCC_chiller = chiller_vapor_compression.VaporCompressionChiller(locator, scale)
## Calculate cooling loads for different combinations
# SENSIBLE COOLING UNIT
Qc_nom_SCU_W, \
T_re_SCU_K, \
T_sup_SCU_K, \
mdot_SCU_kgpers = calc_combined_cooling_loads(building_name, locator, total_demand,
cooling_configuration=['scu'])
# AIR HANDLING UNIT + AIR RECIRCULATION UNIT
Qc_nom_AHU_ARU_W, \
T_re_AHU_ARU_K, \
T_sup_AHU_ARU_K, \
mdot_AHU_ARU_kgpers = calc_combined_cooling_loads(building_name, locator, total_demand,
cooling_configuration=['ahu', 'aru'])
# SENSIBLE COOLING UNIT + AIR HANDLING UNIT + AIR RECIRCULATION UNIT
Qc_nom_AHU_ARU_SCU_W, \
T_re_AHU_ARU_SCU_K, \
T_sup_AHU_ARU_SCU_K, \
mdot_AHU_ARU_SCU_kgpers = calc_combined_cooling_loads(building_name, locator, total_demand,
cooling_configuration=['ahu', 'aru', 'scu'])
## Get hourly hot water supply condition of Solar Collectors (SC)
# Flate Plate Solar Collectors
SC_FP_data, T_hw_in_FP_C, el_aux_SC_FP_Wh, q_sc_gen_FP_Wh = get_SC_data(building_name, locator, panel_type="FP")
Capex_a_SC_FP_USD, Opex_SC_FP_USD, Capex_SC_FP_USD = solar_collector.calc_Cinv_SC(SC_FP_data['Area_SC_m2'][0],
locator,
panel_type="FP")
# Evacuated Tube Solar Collectors
SC_ET_data, T_hw_in_ET_C, el_aux_SC_ET_Wh, q_sc_gen_ET_Wh = get_SC_data(building_name, locator, panel_type="ET")
Capex_a_SC_ET_USD, Opex_SC_ET_USD, Capex_SC_ET_USD = solar_collector.calc_Cinv_SC(SC_ET_data['Area_SC_m2'][0],
locator,
panel_type="ET")
## Calculate ground temperatures to estimate cold water supply temperatures for absorption chiller
T_ground_K = calculate_ground_temperature(locator) # FIXME: change to outlet temperature from the cooling towers
## Initialize table to save results
# save costs of all supply configurations
operation_results = initialize_result_tables_for_supply_configurations(Qc_nom_SCU_W)
# save supply system activation of all supply configurations
cooling_dispatch = {}
## HOURLY OPERATION
print('{building_name} decentralized cooling supply system simulations...'.format(building_name=building_name))
T_re_AHU_ARU_SCU_K = np.where(T_re_AHU_ARU_SCU_K > 0.0, T_re_AHU_ARU_SCU_K, T_sup_AHU_ARU_SCU_K)
## 0. DX operation
print('{building_name} Config 0: Direct Expansion Units -> AHU,ARU,SCU'.format(building_name=building_name))
el_DX_hourly_Wh, \
q_DX_chw_Wh = np.vectorize(dx.calc_DX)(mdot_AHU_ARU_SCU_kgpers, T_sup_AHU_ARU_SCU_K, T_re_AHU_ARU_SCU_K)
DX_Status = np.where(q_DX_chw_Wh > 0.0, 1, 0)
# add electricity costs, CO2, PE
operation_results[0][7] += sum(prices.ELEC_PRICE * el_DX_hourly_Wh)
operation_results[0][8] += sum(calc_emissions_Whyr_to_tonCO2yr(el_DX_hourly_Wh, lca.EL_TO_CO2_EQ)) # ton CO2
# activation
cooling_dispatch[0] = {'Q_DX_AS_gen_directload_W': q_DX_chw_Wh,
'E_DX_AS_req_W': el_DX_hourly_Wh,
'E_cs_cre_cdata_req_W': el_DX_hourly_Wh,
}
# capacity of cooling technologies
operation_results[0][0] = Qc_nom_AHU_ARU_SCU_W
operation_results[0][1] = Qc_nom_AHU_ARU_SCU_W # 1: DX_AS
system_COP = np.nanmedian(np.divide(q_DX_chw_Wh[None, :], el_DX_hourly_Wh[None, :]).flatten())
operation_results[0][9] += system_COP
## 1. VCC (AHU + ARU + SCU) + CT
print('{building_name} Config 1: Vapor Compression Chillers -> AHU,ARU,SCU'.format(building_name=building_name))
# VCC operation
el_VCC_Wh, q_VCC_cw_Wh, q_VCC_chw_Wh = calc_VCC_operation(T_re_AHU_ARU_SCU_K, T_sup_AHU_ARU_SCU_K,
mdot_AHU_ARU_SCU_kgpers, VCC_chiller)
VCC_Status = np.where(q_VCC_chw_Wh > 0.0, 1, 0)
# CT operation
q_CT_VCC_to_AHU_ARU_SCU_Wh = q_VCC_cw_Wh
Q_nom_CT_VCC_to_AHU_ARU_SCU_W, el_CT_Wh = calc_CT_operation(q_CT_VCC_to_AHU_ARU_SCU_Wh)
# add costs
el_total_Wh = el_VCC_Wh + el_CT_Wh
operation_results[1][7] += sum(prices.ELEC_PRICE * el_total_Wh) # CHF
operation_results[1][8] += sum(calc_emissions_Whyr_to_tonCO2yr(el_total_Wh, lca.EL_TO_CO2_EQ)) # ton CO2
system_COP_list = np.divide(q_VCC_chw_Wh[None, :], el_total_Wh[None, :]).flatten()
system_COP = np.nansum(q_VCC_chw_Wh[None, :] * system_COP_list) / np.nansum(
q_VCC_chw_Wh[None, :]) # weighted average of the system efficiency
operation_results[1][9] += system_COP
cooling_dispatch[1] = {'Q_BaseVCC_AS_gen_directload_W': q_VCC_chw_Wh,
'E_BaseVCC_AS_req_W': el_VCC_Wh,
'E_CT_req_W': el_CT_Wh,
'E_cs_cre_cdata_req_W': el_total_Wh,
}
# capacity of cooling technologies
operation_results[1][0] = Qc_nom_AHU_ARU_SCU_W
operation_results[1][2] = Qc_nom_AHU_ARU_SCU_W # 2: BaseVCC_AS
## 2: SC_FP + single-effect ACH (AHU + ARU + SCU) + CT + Boiler + SC_FP
print(
'{building_name} Config 2: Flat-plate Solar Collectors + Single-effect Absorption chillers -> AHU,ARU,SCU'.format(
building_name=building_name))
# ACH operation
T_hw_out_single_ACH_K, \
el_single_ACH_Wh, \
q_cw_single_ACH_Wh, \
q_hw_single_ACH_Wh, \
q_chw_single_ACH_Wh = calc_ACH_operation(T_ground_K, T_hw_in_FP_C, T_re_AHU_ARU_SCU_K, T_sup_AHU_ARU_SCU_K,
chiller_prop, mdot_AHU_ARU_SCU_kgpers, ACH_TYPE_SINGLE)
ACH_Status = np.where(q_chw_single_ACH_Wh > 0.0, 1, 0)
# CT operation
q_CT_FP_to_single_ACH_to_AHU_ARU_SCU_Wh = q_cw_single_ACH_Wh
Q_nom_CT_FP_to_single_ACH_to_AHU_ARU_SCU_W, el_CT_Wh = calc_CT_operation(
q_CT_FP_to_single_ACH_to_AHU_ARU_SCU_Wh)
# boiler operation
q_gas_Boiler_FP_to_single_ACH_to_AHU_ARU_SCU_Wh, \
Q_nom_Boiler_FP_to_single_ACH_to_AHU_ARU_SCU_W, \
q_load_Boiler_FP_to_single_ACH_to_AHU_ARU_SCU_Wh = calc_boiler_operation(Qc_nom_AHU_ARU_SCU_W,
T_hw_out_single_ACH_K,
q_hw_single_ACH_Wh,
q_sc_gen_FP_Wh)
# add electricity costs
el_total_Wh = el_single_ACH_Wh + el_aux_SC_FP_Wh + el_CT_Wh
operation_results[2][7] += sum(prices.ELEC_PRICE * el_total_Wh) # CHF
operation_results[2][8] += sum(calc_emissions_Whyr_to_tonCO2yr(el_total_Wh, lca.EL_TO_CO2_EQ)) # ton CO2
# add gas costs
q_gas_total_Wh = q_gas_Boiler_FP_to_single_ACH_to_AHU_ARU_SCU_Wh
operation_results[2][7] += sum(prices.NG_PRICE * q_gas_total_Wh) # CHF
operation_results[2][8] += sum(calc_emissions_Whyr_to_tonCO2yr(q_gas_total_Wh, lca.NG_TO_CO2_EQ)) # ton CO2
# add activation
cooling_dispatch[2] = {'Q_ACH_gen_directload_W': q_chw_single_ACH_Wh,
'Q_Boiler_NG_ACH_W': q_load_Boiler_FP_to_single_ACH_to_AHU_ARU_SCU_Wh,
'Q_SC_FP_ACH_W': q_sc_gen_FP_Wh,
'E_ACH_req_W': el_single_ACH_Wh,
'E_CT_req_W': el_CT_Wh,
'E_SC_FP_req_W': el_aux_SC_FP_Wh,
'E_cs_cre_cdata_req_W': el_total_Wh,
'NG_Boiler_req': q_gas_Boiler_FP_to_single_ACH_to_AHU_ARU_SCU_Wh,
}
# capacity of cooling technologies
operation_results[2][0] = Qc_nom_AHU_ARU_SCU_W
operation_results[2][4] = Qc_nom_AHU_ARU_SCU_W # 4: ACH_SC_FP
q_total_load = q_chw_single_ACH_Wh[None, :] + q_sc_gen_FP_Wh[None,
:] + q_load_Boiler_FP_to_single_ACH_to_AHU_ARU_SCU_Wh[None, :]
system_COP_list = np.divide(q_total_load, (
el_total_Wh[None, :] + q_gas_Boiler_FP_to_single_ACH_to_AHU_ARU_SCU_Wh[None, :])).flatten()
system_COP = np.nansum(q_total_load * system_COP_list) / np.nansum(
q_total_load) # weighted average of the system efficiency
operation_results[2][9] += system_COP
# 3: SC_ET + single-effect ACH (AHU + ARU + SCU) + CT + Boiler + SC_ET
print(
'{building_name} Config 3: Evacuated Tube Solar Collectors + Single-effect Absorption chillers -> AHU,ARU,SCU'.format(
building_name=building_name))
# ACH operation
T_hw_out_single_ACH_K, \
el_single_ACH_Wh, \
q_cw_single_ACH_Wh, \
q_hw_single_ACH_Wh, \
q_chw_single_ACH_Wh = calc_ACH_operation(T_ground_K, T_hw_in_ET_C, T_re_AHU_ARU_SCU_K, T_sup_AHU_ARU_SCU_K,
chiller_prop, mdot_AHU_ARU_SCU_kgpers, ACH_TYPE_SINGLE)
# CT operation
q_CT_ET_to_single_ACH_to_AHU_ARU_SCU_W = q_cw_single_ACH_Wh
Q_nom_CT_ET_to_single_ACH_to_AHU_ARU_SCU_W, el_CT_Wh = calc_CT_operation(q_CT_ET_to_single_ACH_to_AHU_ARU_SCU_W)
# burner operation
q_gas_for_burner_Wh, \
Q_nom_Burner_ET_to_single_ACH_to_AHU_ARU_SCU_W, \
q_burner_load_Wh = calc_burner_operation(Qc_nom_AHU_ARU_SCU_W, q_hw_single_ACH_Wh, q_sc_gen_ET_Wh)
# add electricity costs
el_total_Wh = el_single_ACH_Wh + el_aux_SC_ET_Wh + el_CT_Wh
operation_results[3][7] += sum(prices.ELEC_PRICE * el_total_Wh) # CHF
operation_results[3][8] += sum(calc_emissions_Whyr_to_tonCO2yr(el_total_Wh, lca.EL_TO_CO2_EQ)) # ton CO2
# add gas costs
operation_results[3][7] += sum(prices.NG_PRICE * q_gas_for_burner_Wh) # CHF
operation_results[3][8] += sum(calc_emissions_Whyr_to_tonCO2yr(q_gas_for_burner_Wh, lca.NG_TO_CO2_EQ)) # ton CO2
# add activation
cooling_dispatch[3] = {'Q_ACH_gen_directload_W': q_chw_single_ACH_Wh,
'Q_Burner_NG_ACH_W': q_burner_load_Wh,
'Q_SC_ET_ACH_W': q_sc_gen_ET_Wh,
'E_ACH_req_W': el_single_ACH_Wh,
'E_CT_req_W': el_CT_Wh,
'E_SC_ET_req_W': el_aux_SC_ET_Wh,
'E_cs_cre_cdata_req_W': el_total_Wh,
'NG_Burner_req': q_gas_for_burner_Wh,
}
# capacity of cooling technologies
operation_results[3][0] = Qc_nom_AHU_ARU_SCU_W
operation_results[3][5] = Qc_nom_AHU_ARU_SCU_W
q_total_load = (q_burner_load_Wh[None, :] + q_chw_single_ACH_Wh[None, :] + q_sc_gen_ET_Wh[None, :])
system_COP_list = np.divide(q_total_load, (el_total_Wh[None, :] + q_gas_for_burner_Wh[None, :])).flatten()
system_COP = np.nansum(q_total_load * system_COP_list) / np.nansum(
q_total_load) # weighted average of the system efficiency
operation_results[3][9] += system_COP
# these two configurations are only activated when SCU is in use
if Qc_nom_SCU_W > 0.0:
# 4: VCC (AHU + ARU) + VCC (SCU) + CT
print(
'{building_name} Config 4: Vapor Compression Chillers(HT) -> SCU & Vapor Compression Chillers(LT) -> AHU,ARU'.format(
building_name=building_name))
# VCC (AHU + ARU) operation
el_VCC_to_AHU_ARU_Wh, \
q_cw_VCC_to_AHU_ARU_Wh, \
q_chw_VCC_to_AHU_ARU_Wh = calc_VCC_operation(T_re_AHU_ARU_K, T_sup_AHU_ARU_K, mdot_AHU_ARU_kgpers, VCC_chiller)
VCC_LT_Status = np.where(q_chw_VCC_to_AHU_ARU_Wh > 0.0, 1, 0)
# VCC(SCU) operation
el_VCC_to_SCU_Wh, \
q_cw_VCC_to_SCU_Wh, \
q_chw_VCC_to_SCU_Wh = calc_VCC_operation(T_re_SCU_K, T_sup_SCU_K, mdot_SCU_kgpers, VCC_chiller)
VCC_HT_Status = np.where(q_chw_VCC_to_AHU_ARU_Wh > 0.0, 1, 0)
# CT operation
q_CT_VCC_to_AHU_ARU_and_VCC_to_SCU_W = q_cw_VCC_to_AHU_ARU_Wh + q_cw_VCC_to_SCU_Wh
Q_nom_CT_VCC_to_AHU_ARU_and_VCC_to_SCU_W, el_CT_Wh = calc_CT_operation(q_CT_VCC_to_AHU_ARU_and_VCC_to_SCU_W)
# add el costs
el_total_Wh = el_VCC_to_AHU_ARU_Wh + el_VCC_to_SCU_Wh + el_CT_Wh
operation_results[4][7] += sum(prices.ELEC_PRICE * el_total_Wh) # CHF
operation_results[4][8] += sum(calc_emissions_Whyr_to_tonCO2yr(el_total_Wh, lca.EL_TO_CO2_EQ)) # ton CO2
# add activation
cooling_dispatch[4] = {'Q_BaseVCC_AS_gen_directload_W': q_chw_VCC_to_AHU_ARU_Wh,
'Q_BaseVCCHT_AS_gen_directload_W': q_chw_VCC_to_SCU_Wh,
'E_BaseVCC_req_W': el_VCC_to_AHU_ARU_Wh,
'E_VCC_HT_req_W': el_VCC_to_SCU_Wh,
'E_CT_req_W': el_CT_Wh,
'E_cs_cre_cdata_req_W': el_total_Wh
}
# capacity of cooling technologies
operation_results[4][0] = Qc_nom_AHU_ARU_SCU_W
operation_results[4][2] = Qc_nom_AHU_ARU_W # 2: BaseVCC_AS
operation_results[4][3] = Qc_nom_SCU_W # 3: VCCHT_AS
system_COP_list = np.divide(q_CT_VCC_to_AHU_ARU_and_VCC_to_SCU_W[None, :], el_total_Wh[None, :]).flatten()
system_COP = np.nansum(q_CT_VCC_to_AHU_ARU_and_VCC_to_SCU_W[None, :] * system_COP_list) / np.nansum(
q_CT_VCC_to_AHU_ARU_and_VCC_to_SCU_W[None, :]) # weighted average of the system efficiency
operation_results[4][9] += system_COP
# 5: VCC (AHU + ARU) + ACH (SCU) + CT
print(
'{building_name} Config 5: Vapor Compression Chillers(LT) -> AHU,ARU & Flate-place SC + Absorption Chillers(HT) -> SCU'.format(
building_name=building_name))
# ACH (SCU) operation
T_hw_FP_ACH_to_SCU_K, \
el_FP_ACH_to_SCU_Wh, \
q_cw_FP_ACH_to_SCU_Wh, \
q_hw_FP_ACH_to_SCU_Wh, \
q_chw_FP_ACH_to_SCU_Wh = calc_ACH_operation(T_ground_K, T_hw_in_FP_C, T_re_SCU_K, T_sup_SCU_K, chiller_prop,
mdot_SCU_kgpers, ACH_TYPE_SINGLE)
ACH_HT_Status = np.where(q_chw_FP_ACH_to_SCU_Wh > 0.0, 1, 0)
# boiler operation
q_gas_for_boiler_Wh, \
Q_nom_boiler_VCC_to_AHU_ARU_and_FP_to_single_ACH_to_SCU_W, \
q_load_from_boiler_Wh = calc_boiler_operation(Qc_nom_SCU_W, T_hw_FP_ACH_to_SCU_K,
q_hw_FP_ACH_to_SCU_Wh, q_sc_gen_FP_Wh)
# CT operation
q_CT_VCC_to_AHU_ARU_and_single_ACH_to_SCU_Wh = q_cw_VCC_to_AHU_ARU_Wh + q_cw_FP_ACH_to_SCU_Wh
Q_nom_CT_VCC_to_AHU_ARU_and_FP_to_single_ACH_to_SCU_W, \
el_CT_Wh = calc_CT_operation(q_CT_VCC_to_AHU_ARU_and_single_ACH_to_SCU_Wh)
# add electricity costs
el_total_Wh = el_VCC_to_AHU_ARU_Wh + el_FP_ACH_to_SCU_Wh + el_aux_SC_FP_Wh + el_CT_Wh
operation_results[5][7] += sum(prices.ELEC_PRICE * el_total_Wh) # CHF
operation_results[5][8] += sum(calc_emissions_Whyr_to_tonCO2yr(el_total_Wh, lca.EL_TO_CO2_EQ)) # ton CO2
# add gas costs
q_gas_total_Wh = q_gas_for_boiler_Wh
operation_results[5][7] += sum(prices.NG_PRICE * q_gas_total_Wh) # CHF
operation_results[5][8] += sum(calc_emissions_Whyr_to_tonCO2yr(q_gas_total_Wh, lca.NG_TO_CO2_EQ)) # ton CO2
# add activation
cooling_dispatch[5] = {'Q_BaseVCC_AS_gen_directload_W': q_chw_VCC_to_AHU_ARU_Wh,
'Q_ACHHT_AS_gen_directload_W': q_chw_FP_ACH_to_SCU_Wh,
'E_BaseVCC_req_W': el_VCC_to_AHU_ARU_Wh,
'E_ACHHT_req_W': el_FP_ACH_to_SCU_Wh,
'E_SC_FP_ACH_req_W': el_aux_SC_FP_Wh,
'E_CT_req_W': el_CT_Wh,
'E_cs_cre_cdata_req_W': el_total_Wh,
'Q_BaseBoiler_NG_req': q_gas_for_boiler_Wh,
}
# capacity of cooling technologies
operation_results[5][0] = Qc_nom_AHU_ARU_SCU_W
operation_results[5][2] = Qc_nom_AHU_ARU_W # 2: BaseVCC_AS
operation_results[5][6] = Qc_nom_SCU_W # 6: ACHHT_SC_FP
q_total_load = q_CT_VCC_to_AHU_ARU_and_single_ACH_to_SCU_Wh[None, :] + q_gas_for_boiler_Wh[None, :]
system_COP_list = np.divide(q_total_load, el_total_Wh[None, :]).flatten()
system_COP = np.nansum(q_total_load * system_COP_list) / np.nansum(
q_total_load) # weighted average of the system efficiency
operation_results[5][9] += system_COP
## Calculate Capex/Opex
# Initialize arrays
number_of_configurations = len(operation_results)
Capex_a_USD = np.zeros((number_of_configurations, 1))
Capex_total_USD = np.zeros((number_of_configurations, 1))
Opex_a_fixed_USD = np.zeros((number_of_configurations, 1))
print('{building_name} Cost calculation...'.format(building_name=building_name))
# 0: DX
Capex_a_DX_USD, Opex_fixed_DX_USD, Capex_DX_USD = dx.calc_Cinv_DX(Qc_nom_AHU_ARU_SCU_W)
# add costs
Capex_a_USD[0][0] = Capex_a_DX_USD
Capex_total_USD[0][0] = Capex_DX_USD
Opex_a_fixed_USD[0][0] = Opex_fixed_DX_USD
# 1: VCC + CT
Capex_a_VCC_USD, Opex_fixed_VCC_USD, Capex_VCC_USD = chiller_vapor_compression.calc_Cinv_VCC(
Qc_nom_AHU_ARU_SCU_W, locator, 'CH3')
Capex_a_CT_USD, Opex_fixed_CT_USD, Capex_CT_USD = cooling_tower.calc_Cinv_CT(
Q_nom_CT_VCC_to_AHU_ARU_SCU_W, locator, 'CT1')
# add costs
Capex_a_USD[1][0] = Capex_a_CT_USD + Capex_a_VCC_USD
Capex_total_USD[1][0] = Capex_CT_USD + Capex_VCC_USD
Opex_a_fixed_USD[1][0] = Opex_fixed_CT_USD + Opex_fixed_VCC_USD
# 2: single effect ACH + CT + Boiler + SC_FP
Capex_a_ACH_USD, Opex_fixed_ACH_USD, Capex_ACH_USD = chiller_absorption.calc_Cinv_ACH(
Qc_nom_AHU_ARU_SCU_W, supply_systems.Absorption_chiller, ACH_TYPE_SINGLE)
Capex_a_CT_USD, Opex_fixed_CT_USD, Capex_CT_USD = cooling_tower.calc_Cinv_CT(
Q_nom_CT_FP_to_single_ACH_to_AHU_ARU_SCU_W, locator, 'CT1')
Capex_a_boiler_USD, Opex_fixed_boiler_USD, Capex_boiler_USD = boiler.calc_Cinv_boiler(
Q_nom_Boiler_FP_to_single_ACH_to_AHU_ARU_SCU_W, 'BO1', boiler_cost_data)
Capex_a_USD[2][0] = Capex_a_CT_USD + Capex_a_ACH_USD + Capex_a_boiler_USD + Capex_a_SC_FP_USD
Capex_total_USD[2][0] = Capex_CT_USD + Capex_ACH_USD + Capex_boiler_USD + Capex_SC_FP_USD
Opex_a_fixed_USD[2][
0] = Opex_fixed_CT_USD + Opex_fixed_ACH_USD + Opex_fixed_boiler_USD + Opex_SC_FP_USD
# 3: double effect ACH + CT + Boiler + SC_ET
Capex_a_ACH_USD, Opex_fixed_ACH_USD, Capex_ACH_USD = chiller_absorption.calc_Cinv_ACH(
Qc_nom_AHU_ARU_SCU_W, supply_systems.Absorption_chiller, ACH_TYPE_SINGLE)
Capex_a_CT_USD, Opex_fixed_CT_USD, Capex_CT_USD = cooling_tower.calc_Cinv_CT(
Q_nom_CT_ET_to_single_ACH_to_AHU_ARU_SCU_W, locator, 'CT1')
Capex_a_burner_USD, Opex_fixed_burner_USD, Capex_burner_USD = burner.calc_Cinv_burner(
Q_nom_Burner_ET_to_single_ACH_to_AHU_ARU_SCU_W, boiler_cost_data, 'BO1')
Capex_a_USD[3][0] = Capex_a_CT_USD + Capex_a_ACH_USD + Capex_a_burner_USD + Capex_a_SC_ET_USD
Capex_total_USD[3][0] = Capex_CT_USD + Capex_ACH_USD + Capex_burner_USD + Capex_SC_ET_USD
Opex_a_fixed_USD[3][
0] = Opex_fixed_CT_USD + Opex_fixed_ACH_USD + Opex_fixed_burner_USD + Opex_SC_ET_USD
# these two configurations are only activated when SCU is in use
if Qc_nom_SCU_W > 0.0:
# 4: VCC (AHU + ARU) + VCC (SCU) + CT
Capex_a_VCC_AA_USD, Opex_VCC_AA_USD, Capex_VCC_AA_USD = chiller_vapor_compression.calc_Cinv_VCC(
Qc_nom_AHU_ARU_W, locator, 'CH3')
Capex_a_VCC_S_USD, Opex_VCC_S_USD, Capex_VCC_S_USD = chiller_vapor_compression.calc_Cinv_VCC(
Qc_nom_SCU_W, locator, 'CH3')
Capex_a_CT_USD, Opex_fixed_CT_USD, Capex_CT_USD = cooling_tower.calc_Cinv_CT(
Q_nom_CT_VCC_to_AHU_ARU_and_VCC_to_SCU_W, locator, 'CT1')
Capex_a_USD[4][0] = Capex_a_CT_USD + Capex_a_VCC_AA_USD + Capex_a_VCC_S_USD
Capex_total_USD[4][0] = Capex_CT_USD + Capex_VCC_AA_USD + Capex_VCC_S_USD
Opex_a_fixed_USD[4][0] = Opex_fixed_CT_USD + Opex_VCC_AA_USD + Opex_VCC_S_USD
# 5: VCC (AHU + ARU) + ACH (SCU) + CT + Boiler + SC_FP
Capex_a_ACH_S_USD, Opex_fixed_ACH_S_USD, Capex_ACH_S_USD = chiller_absorption.calc_Cinv_ACH(
Qc_nom_SCU_W, supply_systems.Absorption_chiller, ACH_TYPE_SINGLE)
Capex_a_CT_USD, Opex_fixed_CT_USD, Capex_CT_USD = cooling_tower.calc_Cinv_CT(
Q_nom_CT_VCC_to_AHU_ARU_and_FP_to_single_ACH_to_SCU_W, locator, 'CT1')
Capex_a_boiler_USD, Opex_fixed_boiler_USD, Capex_boiler_USD = boiler.calc_Cinv_boiler(
Q_nom_boiler_VCC_to_AHU_ARU_and_FP_to_single_ACH_to_SCU_W, 'BO1', boiler_cost_data)
Capex_a_USD[5][0] = Capex_a_CT_USD + Capex_a_VCC_AA_USD + Capex_a_ACH_S_USD + \
Capex_a_SC_FP_USD + Capex_a_boiler_USD
Capex_total_USD[5][0] = Capex_CT_USD + Capex_VCC_AA_USD + Capex_ACH_S_USD + \
Capex_SC_FP_USD + Capex_boiler_USD
Opex_a_fixed_USD[5][0] = Opex_fixed_CT_USD + Opex_VCC_AA_USD + Opex_fixed_ACH_S_USD + \
Opex_SC_FP_USD + Opex_fixed_boiler_USD
## write all results from the configurations into TotalCosts, TotalCO2, TotalPrim
Opex_a_USD, TAC_USD, TotalCO2, TotalPrim = compile_TAC_CO2_Prim(Capex_a_USD, Opex_a_fixed_USD,
number_of_configurations, operation_results)
## Determine the best configuration
Best, indexBest = rank_results(TAC_USD, TotalCO2, TotalPrim, number_of_configurations)
# Save results in csv file
performance_results = {
"Nominal heating load": operation_results[:, 0],
"Capacity_DX_AS_W": operation_results[:, 1],
"Capacity_BaseVCC_AS_W": operation_results[:, 2],
"Capacity_VCCHT_AS_W": operation_results[:, 3],
"Capacity_ACH_SC_FP_W": operation_results[:, 4],
"Capaticy_ACH_SC_ET_W": operation_results[:, 5],
"Capacity_ACHHT_FP_W": operation_results[:, 6],
"Capex_a_USD": Capex_a_USD[:, 0],
"Capex_total_USD": Capex_total_USD[:, 0],
"Opex_fixed_USD": Opex_a_fixed_USD[:, 0],
"Opex_var_USD": operation_results[:, 7],
"GHG_tonCO2": operation_results[:, 8],
"TAC_USD": TAC_USD[:, 1],
"Best configuration": Best[:, 0],
"system_COP": operation_results[:, 9],
}
performance_results_df = pd.DataFrame(performance_results)
performance_results_df.to_csv(
locator.get_optimization_decentralized_folder_building_result_cooling(building_name), index=False)
# save activation for the best supply system configuration
best_activation_df = pd.DataFrame.from_dict(cooling_dispatch[indexBest]) #
cooling_dispatch_columns = get_unique_keys_from_dicts(cooling_dispatch)
cooling_dispatch_df = pd.DataFrame(columns=cooling_dispatch_columns, index=range(len(best_activation_df)))
cooling_dispatch_df.update(best_activation_df)
cooling_dispatch_df.to_csv(
locator.get_optimization_decentralized_folder_building_result_cooling_activation(building_name),
index=False, na_rep='nan')
def calc_VCC_operation(T_chw_re_K, T_chw_sup_K, mdot_kgpers, VCC_chiller):
from cea.optimization.constants import VCC_T_COOL_IN
q_chw_Wh = mdot_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK * (T_chw_re_K - T_chw_sup_K)
peak_cooling_load = np.nanmax(q_chw_Wh)
VCC_operation = np.vectorize(chiller_vapor_compression.calc_VCC)(peak_cooling_load,
q_chw_Wh,
T_chw_sup_K,
T_chw_re_K,
VCC_T_COOL_IN,
VCC_chiller)
q_cw_Wh = np.asarray([x['q_cw_W'] for x in VCC_operation])
el_VCC_Wh = np.asarray([x['wdot_W'] for x in VCC_operation])
return el_VCC_Wh, q_cw_Wh, q_chw_Wh
def calc_CT_operation(q_CT_load_Wh):
Q_nom_CT_W = np.max(q_CT_load_Wh)
el_CT_Wh = np.vectorize(cooling_tower.calc_CT)(q_CT_load_Wh, Q_nom_CT_W)
return Q_nom_CT_W, el_CT_Wh
def calc_boiler_operation(Q_ACH_size_W, T_hw_out_from_ACH_K, q_hw_single_ACH_Wh, q_sc_gen_FP_Wh):
if not np.isclose(Q_ACH_size_W, 0.0):
q_boiler_load_Wh = q_hw_single_ACH_Wh - q_sc_gen_FP_Wh
Q_nom_Boilers_W = np.max(q_boiler_load_Wh)
T_re_boiler_K = T_hw_out_from_ACH_K
boiler_eff = np.vectorize(boiler.calc_Cop_boiler)(q_boiler_load_Wh, Q_nom_Boilers_W, T_re_boiler_K)
Q_gas_for_boiler_Wh = np.divide(q_boiler_load_Wh, boiler_eff,
out=np.zeros_like(q_boiler_load_Wh), where=boiler_eff != 0.0)
else:
q_boiler_load_Wh = 0.0
Q_nom_Boilers_W = 0.0
Q_gas_for_boiler_Wh = np.zeros(len(q_hw_single_ACH_Wh))
return Q_gas_for_boiler_Wh, Q_nom_Boilers_W, q_boiler_load_Wh
def calc_burner_operation(Q_ACH_size_W, q_hw_single_ACH_Wh, q_sc_gen_ET_Wh):
if not np.isclose(Q_ACH_size_W, 0.0):
q_burner_load_Wh = q_hw_single_ACH_Wh - q_sc_gen_ET_Wh
Q_nom_Burners_W = np.max(q_burner_load_Wh)
burner_eff = np.vectorize(burner.calc_cop_burner)(q_burner_load_Wh, Q_nom_Burners_W)
q_gas_for_burber_Wh = np.divide(q_burner_load_Wh, burner_eff,
out=np.zeros_like(q_burner_load_Wh), where=burner_eff != 0)
else:
q_burner_load_Wh = 0.0
Q_nom_Burners_W = 0.0
q_gas_for_burber_Wh = np.zeros(len(q_hw_single_ACH_Wh))
return q_gas_for_burber_Wh, Q_nom_Burners_W, q_burner_load_Wh
def compile_TAC_CO2_Prim(Capex_a_USD, Opex_a_fixed_USD, number_of_configurations, operation_results):
TAC_USD = np.zeros((number_of_configurations, 2))
TotalCO2 = np.zeros((number_of_configurations, 2))
TotalPrim = np.zeros((number_of_configurations, 2))
Opex_a_USD = np.zeros((number_of_configurations, 2))
for i in range(number_of_configurations):
TAC_USD[i][0] = TotalCO2[i][0] = TotalPrim[i][0] = Opex_a_USD[i][0] = i
Opex_a_USD[i][1] = Opex_a_fixed_USD[i][0] + operation_results[i][7]
TAC_USD[i][1] = Capex_a_USD[i][0] + Opex_a_USD[i][1]
TotalCO2[i][1] = operation_results[i][8]
TotalPrim[i][1] = operation_results[i][9]
return Opex_a_USD, TAC_USD, TotalCO2, TotalPrim
def rank_results(TAC_USD, TotalCO2, TotalPrim, number_of_configurations):
Best = np.zeros((number_of_configurations, 1))
# rank results
CostsS = TAC_USD[np.argsort(TAC_USD[:, 1])]
CO2S = TotalCO2[np.argsort(TotalCO2[:, 1])]
el = len(CostsS)
rank = 0
Bestfound = False
optsearch = np.empty(el)
optsearch.fill(3)
indexBest = 0
while not Bestfound and rank < el:
optsearch[int(CostsS[rank][0])] -= 1
optsearch[int(CO2S[rank][0])] -= 1
if np.count_nonzero(optsearch) != el:
Bestfound = True
indexBest = np.where(optsearch == 0)[0][0]
rank += 1
# get the best option according to the ranking.
Best[indexBest][0] = 1
return Best, indexBest
def initialize_result_tables_for_supply_configurations(Qc_nom_SCU_W):
"""
The cooling technologies are listed as follow:
0: DX -> AHU,ARU,SCU
1: VCC -> AHU,ARU,SCU
2: FP + ACH -> AHU,ARU,SCU
3: ET + ACH -> AHU,ARU,SCU
4: VCC -> AHU,ARU
5: VCC -> SCU
6: FP + ACH -> SCU
:param result_AHU_ARU_SCU:
:return:
"""
if Qc_nom_SCU_W <= 0.0:
operation_results = np.zeros((4, 10))
else:
operation_results = np.zeros((6, 10))
return operation_results
def calc_ACH_operation(T_ground_K, T_SC_hw_in_C, T_chw_re_K, T_chw_sup_K, absorption_chiller, mdot_chw_kgpers,
ACH_type):
absorption_chiller = chiller_absorption.AbsorptionChiller(absorption_chiller, ACH_type)
SC_to_single_ACH_operation = np.vectorize(chiller_absorption.calc_chiller_main)(mdot_chw_kgpers,
T_chw_sup_K,
T_chw_re_K,
T_SC_hw_in_C,
T_ground_K,
absorption_chiller)
el_ACH_Wh = np.asarray([x['wdot_W'] for x in SC_to_single_ACH_operation])
q_chw_ACH_Wh = np.asarray([x['q_chw_W'] for x in SC_to_single_ACH_operation])
q_cw_ACH_Wh = np.asarray([x['q_cw_W'] for x in SC_to_single_ACH_operation])
q_hw_ACH_Wh = np.asarray([x['q_hw_W'] for x in SC_to_single_ACH_operation])
T_hw_out_ACH_K = np.asarray([x['T_hw_out_C'] + 273.15 for x in SC_to_single_ACH_operation])
return T_hw_out_ACH_K, el_ACH_Wh, q_cw_ACH_Wh, q_hw_ACH_Wh, q_chw_ACH_Wh
def get_SC_data(building_name, locator, panel_type):
SC_data = pd.read_csv(locator.SC_results(building_name, panel_type),
usecols=["T_SC_sup_C", "T_SC_re_C", "mcp_SC_kWperC", "Q_SC_gen_kWh", "Area_SC_m2",
"Eaux_SC_kWh"])
q_sc_gen_Wh = SC_data['Q_SC_gen_kWh'] * 1000
q_sc_gen_Wh = np.where(q_sc_gen_Wh < 0.0, 0.0, q_sc_gen_Wh)
el_aux_SC_Wh = SC_data['Eaux_SC_kWh'] * 1000
if panel_type == "FP":
T_hw_in_C = [x if x > T_GENERATOR_FROM_FP_C else T_GENERATOR_FROM_FP_C for x in SC_data['T_SC_re_C']]
elif panel_type == "ET":
T_hw_in_C = [x if x > T_GENERATOR_FROM_ET_C else T_GENERATOR_FROM_ET_C for x in SC_data['T_SC_re_C']]
else:
print('invalid panel type: ', panel_type)
return SC_data, T_hw_in_C, el_aux_SC_Wh, q_sc_gen_Wh
def calc_combined_cooling_loads(building_name, locator, total_demand, cooling_configuration):
# get combined cooling supply/return conditions using substation script
buildings_name_with_cooling = [building_name]
substation.substation_main_cooling(locator, total_demand, buildings_name_with_cooling, cooling_configuration)
substation_operation = pd.read_csv(locator.get_optimization_substations_results_file(building_name, "DC", ""),
usecols=["T_supply_DC_space_cooling_data_center_and_refrigeration_result_K",
"T_return_DC_space_cooling_data_center_and_refrigeration_result_K",
"mdot_space_cooling_data_center_and_refrigeration_result_kgpers"])
T_re_K = substation_operation["T_return_DC_space_cooling_data_center_and_refrigeration_result_K"].values
T_sup_K = substation_operation["T_supply_DC_space_cooling_data_center_and_refrigeration_result_K"].values
mdot_kgpers = substation_operation["mdot_space_cooling_data_center_and_refrigeration_result_kgpers"].values
# calculate combined load
Qc_load_W = np.vectorize(calc_new_load)(mdot_kgpers, T_sup_K, T_re_K)
Qc_design_W = Qc_load_W.max()
return Qc_design_W, T_re_K, T_sup_K, mdot_kgpers
def get_tech_unit_size_and_number(Qc_nom_W, max_tech_size_W):
if Qc_nom_W <= max_tech_size_W:
Q_installed_W = Qc_nom_W
number_of_installation = 1
else:
number_of_installation = int(ceil(Qc_nom_W / max_tech_size_W))
Q_installed_W = Qc_nom_W / number_of_installation
return Q_installed_W, number_of_installation
# ============================
# other functions
# ============================
def calc_new_load(mdot_kgpers, T_sup_K, T_re_K):
"""
This function calculates the load distribution side of the district heating distribution.
:param mdot_kgpers: mass flow
:param T_sup_K: chilled water supply temperautre
:param T_re_K: chilled water return temperature
:type mdot_kgpers: float
:type TsupDH: float
:type T_re_K: float
:return: Q_cooling_load: load of the distribution
:rtype: float
"""
if mdot_kgpers > 0:
Q_cooling_load_W = mdot_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK * (T_re_K - T_sup_K) * (
1 + Q_LOSS_DISCONNECTED) # for cooling load
if Q_cooling_load_W < 0:
raise ValueError('Q_cooling_load less than zero, check temperatures!')
else:
Q_cooling_load_W = 0
return Q_cooling_load_W
# ============================
# test
# ============================
def main(config):
"""
run the whole preprocessing routine
"""
from cea.optimization.prices import Prices as Prices
print("Running decentralized model for buildings with scenario = %s" % config.scenario)
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
supply_systems = SupplySystemsDatabase(locator)
total_demand = pd.read_csv(locator.get_total_demand())
building_names = total_demand.Name
prices = Prices(supply_systems)
lca = LcaCalculations(supply_systems)
disconnected_buildings_cooling_main(locator, building_names, total_demand, config, prices, lca)
print("test_decentralized_buildings_cooling() succeeded")
if __name__ == '__main__':
main(cea.config.Configuration())
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_subscription_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/sshPublicKeys')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request(
resource_group_name: str,
ssh_public_key_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"sshPublicKeyName": _SERIALIZER.url("ssh_public_key_name", ssh_public_key_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request(
resource_group_name: str,
ssh_public_key_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"sshPublicKeyName": _SERIALIZER.url("ssh_public_key_name", ssh_public_key_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request(
resource_group_name: str,
ssh_public_key_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"sshPublicKeyName": _SERIALIZER.url("ssh_public_key_name", ssh_public_key_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
ssh_public_key_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"sshPublicKeyName": _SERIALIZER.url("ssh_public_key_name", ssh_public_key_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_generate_key_pair_request(
resource_group_name: str,
ssh_public_key_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}/generateKeyPair')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"sshPublicKeyName": _SERIALIZER.url("ssh_public_key_name", ssh_public_key_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class SshPublicKeysOperations(object):
"""SshPublicKeysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_subscription(
self,
**kwargs: Any
) -> Iterable["_models.SshPublicKeysGroupListResult"]:
"""Lists all of the SSH public keys in the subscription. Use the nextLink property in the response
to get the next page of SSH public keys.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SshPublicKeysGroupListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_07_01.models.SshPublicKeysGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeysGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SshPublicKeysGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/sshPublicKeys'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.SshPublicKeysGroupListResult"]:
"""Lists all of the SSH public keys in the specified resource group. Use the nextLink property in
the response to get the next page of SSH public keys.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SshPublicKeysGroupListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_07_01.models.SshPublicKeysGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeysGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SshPublicKeysGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys'} # type: ignore
@distributed_trace
def create(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: "_models.SshPublicKeyResource",
**kwargs: Any
) -> "_models.SshPublicKeyResource":
"""Creates a new SSH public key resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to create the SSH public key.
:type parameters: ~azure.mgmt.compute.v2021_07_01.models.SshPublicKeyResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_07_01.models.SshPublicKeyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'SshPublicKeyResource')
request = build_create_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SshPublicKeyResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SshPublicKeyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}'} # type: ignore
@distributed_trace
def update(
self,
resource_group_name: str,
ssh_public_key_name: str,
parameters: "_models.SshPublicKeyUpdateResource",
**kwargs: Any
) -> "_models.SshPublicKeyResource":
"""Updates a new SSH public key resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:param parameters: Parameters supplied to update the SSH public key.
:type parameters: ~azure.mgmt.compute.v2021_07_01.models.SshPublicKeyUpdateResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_07_01.models.SshPublicKeyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'SshPublicKeyUpdateResource')
request = build_update_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SshPublicKeyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}'} # type: ignore
@distributed_trace
def delete(
self,
resource_group_name: str,
ssh_public_key_name: str,
**kwargs: Any
) -> None:
"""Delete an SSH public key.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
ssh_public_key_name: str,
**kwargs: Any
) -> "_models.SshPublicKeyResource":
"""Retrieves information about an SSH public key.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyResource, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_07_01.models.SshPublicKeyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SshPublicKeyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}'} # type: ignore
@distributed_trace
def generate_key_pair(
self,
resource_group_name: str,
ssh_public_key_name: str,
**kwargs: Any
) -> "_models.SshPublicKeyGenerateKeyPairResult":
"""Generates and returns a public/private key pair and populates the SSH public key resource with
the public key. The length of the key will be 3072 bits. This operation can only be performed
once per SSH public key resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ssh_public_key_name: The name of the SSH public key.
:type ssh_public_key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SshPublicKeyGenerateKeyPairResult, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_07_01.models.SshPublicKeyGenerateKeyPairResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SshPublicKeyGenerateKeyPairResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_generate_key_pair_request(
resource_group_name=resource_group_name,
ssh_public_key_name=ssh_public_key_name,
subscription_id=self._config.subscription_id,
template_url=self.generate_key_pair.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SshPublicKeyGenerateKeyPairResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generate_key_pair.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}/generateKeyPair'} # type: ignore
| |
"""Test script for the gzip module.
"""
import unittest
from test import support
from test.support import bigmemtest, _4G
import os
import io
import struct
import array
gzip = support.import_module('gzip')
data1 = b""" int length=DEFAULTALLOC, err = Z_OK;
PyObject *RetVal;
int flushmode = Z_FINISH;
unsigned long start_total_out;
"""
data2 = b"""/* zlibmodule.c -- gzip-compatible data compression */
/* See http://www.gzip.org/zlib/
/* See http://www.winimage.com/zLibDll for Windows */
"""
class UnseekableIO(io.BytesIO):
def seekable(self):
return False
def tell(self):
raise io.UnsupportedOperation
def seek(self, *args):
raise io.UnsupportedOperation
class BaseTest(unittest.TestCase):
filename = support.TESTFN
def setUp(self):
support.unlink(self.filename)
def tearDown(self):
support.unlink(self.filename)
class TestGzip(BaseTest):
def write_and_read_back(self, data, mode='b'):
b_data = bytes(data)
with gzip.GzipFile(self.filename, 'w'+mode) as f:
l = f.write(data)
self.assertEqual(l, len(b_data))
with gzip.GzipFile(self.filename, 'r'+mode) as f:
self.assertEqual(f.read(), b_data)
def test_write(self):
with gzip.GzipFile(self.filename, 'wb') as f:
f.write(data1 * 50)
# Try flush and fileno.
f.flush()
f.fileno()
if hasattr(os, 'fsync'):
os.fsync(f.fileno())
f.close()
# Test multiple close() calls.
f.close()
# The following test_write_xy methods test that write accepts
# the corresponding bytes-like object type as input
# and that the data written equals bytes(xy) in all cases.
def test_write_memoryview(self):
self.write_and_read_back(memoryview(data1 * 50))
m = memoryview(bytes(range(256)))
data = m.cast('B', shape=[8,8,4])
self.write_and_read_back(data)
def test_write_bytearray(self):
self.write_and_read_back(bytearray(data1 * 50))
def test_write_array(self):
self.write_and_read_back(array.array('I', data1 * 40))
def test_write_incompatible_type(self):
# Test that non-bytes-like types raise TypeError.
# Issue #21560: attempts to write incompatible types
# should not affect the state of the fileobject
with gzip.GzipFile(self.filename, 'wb') as f:
with self.assertRaises(TypeError):
f.write('')
with self.assertRaises(TypeError):
f.write([])
f.write(data1)
with gzip.GzipFile(self.filename, 'rb') as f:
self.assertEqual(f.read(), data1)
def test_read(self):
self.test_write()
# Try reading.
with gzip.GzipFile(self.filename, 'r') as f:
d = f.read()
self.assertEqual(d, data1*50)
def test_read1(self):
self.test_write()
blocks = []
nread = 0
with gzip.GzipFile(self.filename, 'r') as f:
while True:
d = f.read1()
if not d:
break
blocks.append(d)
nread += len(d)
# Check that position was updated correctly (see issue10791).
self.assertEqual(f.tell(), nread)
self.assertEqual(b''.join(blocks), data1 * 50)
@bigmemtest(size=_4G, memuse=1)
def test_read_large(self, size):
# Read chunk size over UINT_MAX should be supported, despite zlib's
# limitation per low-level call
compressed = gzip.compress(data1, compresslevel=1)
f = gzip.GzipFile(fileobj=io.BytesIO(compressed), mode='rb')
self.assertEqual(f.read(size), data1)
def test_io_on_closed_object(self):
# Test that I/O operations on closed GzipFile objects raise a
# ValueError, just like the corresponding functions on file objects.
# Write to a file, open it for reading, then close it.
self.test_write()
f = gzip.GzipFile(self.filename, 'r')
fileobj = f.fileobj
self.assertFalse(fileobj.closed)
f.close()
self.assertTrue(fileobj.closed)
with self.assertRaises(ValueError):
f.read(1)
with self.assertRaises(ValueError):
f.seek(0)
with self.assertRaises(ValueError):
f.tell()
# Open the file for writing, then close it.
f = gzip.GzipFile(self.filename, 'w')
fileobj = f.fileobj
self.assertFalse(fileobj.closed)
f.close()
self.assertTrue(fileobj.closed)
with self.assertRaises(ValueError):
f.write(b'')
with self.assertRaises(ValueError):
f.flush()
def test_append(self):
self.test_write()
# Append to the previous file
with gzip.GzipFile(self.filename, 'ab') as f:
f.write(data2 * 15)
with gzip.GzipFile(self.filename, 'rb') as f:
d = f.read()
self.assertEqual(d, (data1*50) + (data2*15))
def test_many_append(self):
# Bug #1074261 was triggered when reading a file that contained
# many, many members. Create such a file and verify that reading it
# works.
with gzip.GzipFile(self.filename, 'wb', 9) as f:
f.write(b'a')
for i in range(0, 200):
with gzip.GzipFile(self.filename, "ab", 9) as f: # append
f.write(b'a')
# Try reading the file
with gzip.GzipFile(self.filename, "rb") as zgfile:
contents = b""
while 1:
ztxt = zgfile.read(8192)
contents += ztxt
if not ztxt: break
self.assertEqual(contents, b'a'*201)
def test_exclusive_write(self):
with gzip.GzipFile(self.filename, 'xb') as f:
f.write(data1 * 50)
with gzip.GzipFile(self.filename, 'rb') as f:
self.assertEqual(f.read(), data1 * 50)
with self.assertRaises(FileExistsError):
gzip.GzipFile(self.filename, 'xb')
def test_buffered_reader(self):
# Issue #7471: a GzipFile can be wrapped in a BufferedReader for
# performance.
self.test_write()
with gzip.GzipFile(self.filename, 'rb') as f:
with io.BufferedReader(f) as r:
lines = [line for line in r]
self.assertEqual(lines, 50 * data1.splitlines(keepends=True))
def test_readline(self):
self.test_write()
# Try .readline() with varying line lengths
with gzip.GzipFile(self.filename, 'rb') as f:
line_length = 0
while 1:
L = f.readline(line_length)
if not L and line_length != 0: break
self.assertTrue(len(L) <= line_length)
line_length = (line_length + 1) % 50
def test_readlines(self):
self.test_write()
# Try .readlines()
with gzip.GzipFile(self.filename, 'rb') as f:
L = f.readlines()
with gzip.GzipFile(self.filename, 'rb') as f:
while 1:
L = f.readlines(150)
if L == []: break
def test_seek_read(self):
self.test_write()
# Try seek, read test
with gzip.GzipFile(self.filename) as f:
while 1:
oldpos = f.tell()
line1 = f.readline()
if not line1: break
newpos = f.tell()
f.seek(oldpos) # negative seek
if len(line1)>10:
amount = 10
else:
amount = len(line1)
line2 = f.read(amount)
self.assertEqual(line1[:amount], line2)
f.seek(newpos) # positive seek
def test_seek_whence(self):
self.test_write()
# Try seek(whence=1), read test
with gzip.GzipFile(self.filename) as f:
f.read(10)
f.seek(10, whence=1)
y = f.read(10)
self.assertEqual(y, data1[20:30])
def test_seek_write(self):
# Try seek, write test
with gzip.GzipFile(self.filename, 'w') as f:
for pos in range(0, 256, 16):
f.seek(pos)
f.write(b'GZ\n')
def test_mode(self):
self.test_write()
with gzip.GzipFile(self.filename, 'r') as f:
self.assertEqual(f.myfileobj.mode, 'rb')
support.unlink(self.filename)
with gzip.GzipFile(self.filename, 'x') as f:
self.assertEqual(f.myfileobj.mode, 'xb')
def test_1647484(self):
for mode in ('wb', 'rb'):
with gzip.GzipFile(self.filename, mode) as f:
self.assertTrue(hasattr(f, "name"))
self.assertEqual(f.name, self.filename)
def test_paddedfile_getattr(self):
self.test_write()
with gzip.GzipFile(self.filename, 'rb') as f:
self.assertTrue(hasattr(f.fileobj, "name"))
self.assertEqual(f.fileobj.name, self.filename)
def test_mtime(self):
mtime = 123456789
with gzip.GzipFile(self.filename, 'w', mtime = mtime) as fWrite:
fWrite.write(data1)
with gzip.GzipFile(self.filename) as fRead:
self.assertTrue(hasattr(fRead, 'mtime'))
self.assertIsNone(fRead.mtime)
dataRead = fRead.read()
self.assertEqual(dataRead, data1)
self.assertEqual(fRead.mtime, mtime)
def test_metadata(self):
mtime = 123456789
with gzip.GzipFile(self.filename, 'w', mtime = mtime) as fWrite:
fWrite.write(data1)
with open(self.filename, 'rb') as fRead:
# see RFC 1952: http://www.faqs.org/rfcs/rfc1952.html
idBytes = fRead.read(2)
self.assertEqual(idBytes, b'\x1f\x8b') # gzip ID
cmByte = fRead.read(1)
self.assertEqual(cmByte, b'\x08') # deflate
flagsByte = fRead.read(1)
self.assertEqual(flagsByte, b'\x08') # only the FNAME flag is set
mtimeBytes = fRead.read(4)
self.assertEqual(mtimeBytes, struct.pack('<i', mtime)) # little-endian
xflByte = fRead.read(1)
self.assertEqual(xflByte, b'\x02') # maximum compression
osByte = fRead.read(1)
self.assertEqual(osByte, b'\xff') # OS "unknown" (OS-independent)
# Since the FNAME flag is set, the zero-terminated filename follows.
# RFC 1952 specifies that this is the name of the input file, if any.
# However, the gzip module defaults to storing the name of the output
# file in this field.
expected = self.filename.encode('Latin-1') + b'\x00'
nameBytes = fRead.read(len(expected))
self.assertEqual(nameBytes, expected)
# Since no other flags were set, the header ends here.
# Rather than process the compressed data, let's seek to the trailer.
fRead.seek(os.stat(self.filename).st_size - 8)
crc32Bytes = fRead.read(4) # CRC32 of uncompressed data [data1]
self.assertEqual(crc32Bytes, b'\xaf\xd7d\x83')
isizeBytes = fRead.read(4)
self.assertEqual(isizeBytes, struct.pack('<i', len(data1)))
def test_with_open(self):
# GzipFile supports the context management protocol
with gzip.GzipFile(self.filename, "wb") as f:
f.write(b"xxx")
f = gzip.GzipFile(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with gzip.GzipFile(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
def test_zero_padded_file(self):
with gzip.GzipFile(self.filename, "wb") as f:
f.write(data1 * 50)
# Pad the file with zeroes
with open(self.filename, "ab") as f:
f.write(b"\x00" * 50)
with gzip.GzipFile(self.filename, "rb") as f:
d = f.read()
self.assertEqual(d, data1 * 50, "Incorrect data in file")
def test_non_seekable_file(self):
uncompressed = data1 * 50
buf = UnseekableIO()
with gzip.GzipFile(fileobj=buf, mode="wb") as f:
f.write(uncompressed)
compressed = buf.getvalue()
buf = UnseekableIO(compressed)
with gzip.GzipFile(fileobj=buf, mode="rb") as f:
self.assertEqual(f.read(), uncompressed)
def test_peek(self):
uncompressed = data1 * 200
with gzip.GzipFile(self.filename, "wb") as f:
f.write(uncompressed)
def sizes():
while True:
for n in range(5, 50, 10):
yield n
with gzip.GzipFile(self.filename, "rb") as f:
f.max_read_chunk = 33
nread = 0
for n in sizes():
s = f.peek(n)
if s == b'':
break
self.assertEqual(f.read(len(s)), s)
nread += len(s)
self.assertEqual(f.read(100), b'')
self.assertEqual(nread, len(uncompressed))
def test_textio_readlines(self):
# Issue #10791: TextIOWrapper.readlines() fails when wrapping GzipFile.
lines = (data1 * 50).decode("ascii").splitlines(keepends=True)
self.test_write()
with gzip.GzipFile(self.filename, 'r') as f:
with io.TextIOWrapper(f, encoding="ascii") as t:
self.assertEqual(t.readlines(), lines)
def test_fileobj_from_fdopen(self):
# Issue #13781: Opening a GzipFile for writing fails when using a
# fileobj created with os.fdopen().
fd = os.open(self.filename, os.O_WRONLY | os.O_CREAT)
with os.fdopen(fd, "wb") as f:
with gzip.GzipFile(fileobj=f, mode="w") as g:
pass
def test_bytes_filename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with gzip.GzipFile(bytes_filename, "wb") as f:
f.write(data1 * 50)
with gzip.GzipFile(bytes_filename, "rb") as f:
self.assertEqual(f.read(), data1 * 50)
# Sanity check that we are actually operating on the right file.
with gzip.GzipFile(str_filename, "rb") as f:
self.assertEqual(f.read(), data1 * 50)
def test_decompress_limited(self):
"""Decompressed data buffering should be limited"""
bomb = gzip.compress(b'\0' * int(2e6), compresslevel=9)
self.assertLess(len(bomb), io.DEFAULT_BUFFER_SIZE)
bomb = io.BytesIO(bomb)
decomp = gzip.GzipFile(fileobj=bomb)
self.assertEqual(decomp.read(1), b'\0')
max_decomp = 1 + io.DEFAULT_BUFFER_SIZE
self.assertLessEqual(decomp._buffer.raw.tell(), max_decomp,
"Excessive amount of data was decompressed")
# Testing compress/decompress shortcut functions
def test_compress(self):
for data in [data1, data2]:
for args in [(), (1,), (6,), (9,)]:
datac = gzip.compress(data, *args)
self.assertEqual(type(datac), bytes)
with gzip.GzipFile(fileobj=io.BytesIO(datac), mode="rb") as f:
self.assertEqual(f.read(), data)
def test_decompress(self):
for data in (data1, data2):
buf = io.BytesIO()
with gzip.GzipFile(fileobj=buf, mode="wb") as f:
f.write(data)
self.assertEqual(gzip.decompress(buf.getvalue()), data)
# Roundtrip with compress
datac = gzip.compress(data)
self.assertEqual(gzip.decompress(datac), data)
def test_read_truncated(self):
data = data1*50
# Drop the CRC (4 bytes) and file size (4 bytes).
truncated = gzip.compress(data)[:-8]
with gzip.GzipFile(fileobj=io.BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with gzip.GzipFile(fileobj=io.BytesIO(truncated)) as f:
self.assertEqual(f.read(len(data)), data)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 10-byte header.
for i in range(2, 10):
with gzip.GzipFile(fileobj=io.BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
def test_read_with_extra(self):
# Gzip data with an extra field
gzdata = (b'\x1f\x8b\x08\x04\xb2\x17cQ\x02\xff'
b'\x05\x00Extra'
b'\x0bI-.\x01\x002\xd1Mx\x04\x00\x00\x00')
with gzip.GzipFile(fileobj=io.BytesIO(gzdata)) as f:
self.assertEqual(f.read(), b'Test')
def test_prepend_error(self):
# See issue #20875
with gzip.open(self.filename, "wb") as f:
f.write(data1)
with gzip.open(self.filename, "rb") as f:
f._buffer.raw._fp.prepend()
class TestOpen(BaseTest):
def test_binary_modes(self):
uncompressed = data1 * 50
with gzip.open(self.filename, "wb") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed)
with gzip.open(self.filename, "rb") as f:
self.assertEqual(f.read(), uncompressed)
with gzip.open(self.filename, "ab") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed * 2)
with self.assertRaises(FileExistsError):
gzip.open(self.filename, "xb")
support.unlink(self.filename)
with gzip.open(self.filename, "xb") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
uncompressed = data1 * 50
with gzip.open(self.filename, "w") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed)
with gzip.open(self.filename, "r") as f:
self.assertEqual(f.read(), uncompressed)
with gzip.open(self.filename, "a") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed * 2)
with self.assertRaises(FileExistsError):
gzip.open(self.filename, "x")
support.unlink(self.filename)
with gzip.open(self.filename, "x") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed)
def test_text_modes(self):
uncompressed = data1.decode("ascii") * 50
uncompressed_raw = uncompressed.replace("\n", os.linesep)
with gzip.open(self.filename, "wt") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read()).decode("ascii")
self.assertEqual(file_data, uncompressed_raw)
with gzip.open(self.filename, "rt") as f:
self.assertEqual(f.read(), uncompressed)
with gzip.open(self.filename, "at") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read()).decode("ascii")
self.assertEqual(file_data, uncompressed_raw * 2)
def test_fileobj(self):
uncompressed_bytes = data1 * 50
uncompressed_str = uncompressed_bytes.decode("ascii")
compressed = gzip.compress(uncompressed_bytes)
with gzip.open(io.BytesIO(compressed), "r") as f:
self.assertEqual(f.read(), uncompressed_bytes)
with gzip.open(io.BytesIO(compressed), "rb") as f:
self.assertEqual(f.read(), uncompressed_bytes)
with gzip.open(io.BytesIO(compressed), "rt") as f:
self.assertEqual(f.read(), uncompressed_str)
def test_bad_params(self):
# Test invalid parameter combinations.
with self.assertRaises(TypeError):
gzip.open(123.456)
with self.assertRaises(ValueError):
gzip.open(self.filename, "wbt")
with self.assertRaises(ValueError):
gzip.open(self.filename, "xbt")
with self.assertRaises(ValueError):
gzip.open(self.filename, "rb", encoding="utf-8")
with self.assertRaises(ValueError):
gzip.open(self.filename, "rb", errors="ignore")
with self.assertRaises(ValueError):
gzip.open(self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
uncompressed = data1.decode("ascii") * 50
uncompressed_raw = uncompressed.replace("\n", os.linesep)
with gzip.open(self.filename, "wt", encoding="utf-16") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read()).decode("utf-16")
self.assertEqual(file_data, uncompressed_raw)
with gzip.open(self.filename, "rt", encoding="utf-16") as f:
self.assertEqual(f.read(), uncompressed)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with gzip.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with gzip.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
uncompressed = data1.decode("ascii") * 50
with gzip.open(self.filename, "wt", newline="\n") as f:
f.write(uncompressed)
with gzip.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [uncompressed])
def test_main(verbose=None):
support.run_unittest(TestGzip, TestOpen)
if __name__ == "__main__":
test_main(verbose=True)
| |
__author__ = 'adeb'
import numpy as np
import theano
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv, conv3d2d
from theano.tensor.shared_randomstreams import RandomStreams
from spynet.utils.utilities import share, get_h5file_data
from spynet.models.max_pool_3d import max_pool_3d
class LayerBlock():
"""
Abstract class that represents a function from an input space to an output space.
It is the building block of a Layer object.
"""
name = None
def __init__(self):
self.params = []
def forward(self, x, batch_size, run_time):
"""Return the output of the layer block
Args:
x (theano.tensor.TensorType): input of the layer block
batch_size (int): size of the batch of data being processed by the network
run_time (boolean): equals true when the function is used at runtime and false when it is used during
training. This is useful for dropout.
Returns:
(theano.tensor.TensorType): output of the layer block
"""
raise NotImplementedError
def save_parameters(self, h5file, name):
"""
Save all parameters of the layer block in a hdf5 file.
"""
pass
def load_parameters(self, h5file, name):
"""
Load all parameters of the layer block in a hdf5 file.
"""
pass
def update_params(self):
pass
def __str__(self):
msg = "[{}] \n".format(self.name)
return msg
class LayerBlockIdentity(LayerBlock):
"""
Identity function
"""
name = "Identity Layer block"
def __init__(self):
LayerBlock.__init__(self)
def forward(self, x, batch_size, run_time):
return x
class LayerBlockNoise(LayerBlock):
"""
Noise layer block that adds a random signal on the fly
"""
def __init__(self):
LayerBlock.__init__(self)
numpy_rng = np.random.RandomState(123)
self.theano_rng = RandomStreams(numpy_rng.randint(2**30))
class LayerBlockNoiseDropoutBernoulli(LayerBlockNoise):
"""
Noise block layer that adds bernoulli noise on the fly
"""
name = "Bernoulli Layer block"
def __init__(self, bernoulli_p):
LayerBlockNoise.__init__(self)
self.bernoulli_p = bernoulli_p
def forward(self, x, batch_size, run_time):
if run_time:
return x * self.bernoulli_p
else:
return x * self.theano_rng.binomial(size=x.shape, n=1, p=self.bernoulli_p, dtype=theano.config.floatX)
class LayerBlockGaussianNoise(LayerBlockNoise):
"""
Noise block layer that adds gaussian noise on the fly
"""
name = "Gaussian noise Layer block"
def __init__(self):
LayerBlockNoise.__init__(self)
def forward(self, x, batch_size, run_time):
return x + self.theano_rng.normal(size=x.shape, avg=0, std=0.2, dtype=theano.config.floatX)
class LayerBlockMultiplication(LayerBlock):
"""
Block that multiplies the input elementwise by a vector of the same size
"""
name = "Multiplication Layer block"
def __init__(self, vec):
LayerBlock.__init__(self)
self.vec = share(vec)
def forward(self, x, batch_size, run_time):
return x * self.vec
class LayerBlockNormalization(LayerBlock):
"""
Block that normalizes the input so it sums to one
"""
name = "Normalization Layer block"
def __init__(self):
LayerBlock.__init__(self)
def forward(self, x, batch_size, run_time):
return x / theano.tensor.sum(x)
class LayerBlockOfNeurons(LayerBlock):
"""
Abstract class defining a group of neurons.
Attributes:
name (string): Name of the layer block (used for printing or writing)
w (theano shared numpy array): Weights of the layer block
b (theano shared numpy array): Biases of the layer block
params (list): [w,b]
neuron_type (NeuronType object): defines the type of the neurons of the layer block
"""
def __init__(self, neuron_type):
LayerBlock.__init__(self)
self.w = None
self.b = None
self.neuron_type = neuron_type
def init_parameters(self, w_shape, b_shape):
w_bound = self.compute_bound_parameters_virtual()
# initialize weights with random weights
self.w = share(np.asarray(
np.random.uniform(low=-w_bound, high=w_bound, size=w_shape),
dtype=theano.config.floatX), "w")
# the bias is a 1D tensor -- one bias per output feature map
b_values = 0.1 + np.zeros(b_shape, dtype=theano.config.floatX) # Slightly positive for RELU units
self.b = share(b_values, "b")
self.update_params()
def compute_bound_parameters_virtual(self):
raise NotImplementedError
def save_parameters(self, h5file, name):
h5file.create_dataset(name + "/w", data=self.w.get_value(), dtype='f')
h5file.create_dataset(name + "/b", data=self.b.get_value(), dtype='f')
def load_parameters(self, h5file, name):
self.w.set_value(get_h5file_data(h5file, name + "/w"), borrow=True)
self.b.set_value(get_h5file_data(h5file, name + "/b"), borrow=True)
def update_params(self):
self.params = [self.w, self.b]
def __str__(self):
msg = "[{}] with [{}] \n".format(self.name, self.neuron_type)
msg += self.print_virtual()
n_parameters = 0
for p in self.params:
n_parameters += p.get_value().size
msg += "Number of parameters: {} \n".format(n_parameters)
return msg
def print_virtual(self):
return ""
class LayerBlockFullyConnected(LayerBlockOfNeurons):
"""
Layer block in which each input is connected to all the block neurons
"""
name = "Fully connected layer block"
def __init__(self, neuron_type, n_in, n_out):
LayerBlockOfNeurons.__init__(self, neuron_type)
self.n_in = n_in
self.n_out = n_out
self.init_parameters((self.n_in, self.n_out), (self.n_out,))
def compute_bound_parameters_virtual(self):
return np.sqrt(6. / (self.n_in + self.n_out))
def set_w(self, new_w):
self.w.set_value(new_w, borrow=True)
self.n_in, self.n_out = new_w.shape
def forward(self, x, batch_size, run_time):
return self.neuron_type.activation_function(theano.tensor.dot(x, self.w) + self.b)
def print_virtual(self):
return "Number of inputs: {} \nNumber of outputs: {}\n".format(self.n_in, self.n_out)
class LayerBlockConv2DAbstract(LayerBlockOfNeurons):
"""
Abstract class defining common components of LayerConv2D and LayerConvPool2D
"""
def __init__(self, neuron_type, in_shape, flt_shape):
"""
Args:
in_shape (tuple or list of length 3):
(num input feature maps, image height, image width)
flt_shape (tuple or list of length 4):
(number of filters, num input feature maps, filter height, filter width)
"""
LayerBlockOfNeurons.__init__(self, neuron_type)
self.in_shape = in_shape
self.filter_shape = flt_shape
if in_shape[0] != flt_shape[1]:
raise Exception("The number of feature maps is not consistent")
self.init_parameters(flt_shape, (flt_shape[0],))
def forward(self, x, batch_size, run_time):
img_batch_shape = (batch_size,) + self.in_shape
x = x.reshape(img_batch_shape)
# Convolve input feature maps with filters
conv_out = conv.conv2d(input=x,
filters=self.w,
image_shape=img_batch_shape,
filter_shape=self.filter_shape)
return self.forward_virtual(conv_out)
def forward_virtual(self, conv_out):
raise NotImplementedError
def print_virtual(self):
return "Image shape: {}\nFilter shape: {}\n".format(self.in_shape, self.filter_shape)
class LayerBlockConv2D(LayerBlockConv2DAbstract):
"""
2D convolutional layer block
"""
name = "2D convolutional layer block"
def __init__(self, neuron_type, in_shape, flt_shape):
LayerBlockConv2DAbstract.__init__(self, neuron_type, in_shape, flt_shape)
def compute_bound_parameters_virtual(self):
fan_in = np.prod(self.filter_shape[1:])
fan_out = self.filter_shape[0] * np.prod(self.filter_shape[2:])
return np.sqrt(6. / (fan_in + fan_out))
def forward_virtual(self, conv_out):
return self.neuron_type.activation_function(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')).flatten(2)
class LayerBlockConvPool2D(LayerBlockConv2DAbstract):
"""
2D convolutional layer + pooling layer. The reason for not having a separate pooling layer is that the combination
of the two layer blocks can be optimized.
"""
name = "2D convolutional + pooling layer"
def __init__(self, neuron_type, in_shape, flt_shape, poolsize=(2, 2)):
self.poolsize = poolsize
LayerBlockConv2DAbstract.__init__(self, neuron_type, in_shape, flt_shape)
def compute_bound_parameters_virtual(self):
fan_in = np.prod(self.filter_shape[1:])
fan_out = (self.filter_shape[0] * np.prod(self.filter_shape[2:]) / np.prod(self.poolsize))
return np.sqrt(6. / (fan_in + fan_out))
def forward_virtual(self, conv_out):
# Downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(input=conv_out,
ds=self.poolsize,
ignore_border=True)
return self.neuron_type.activation_function(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')).flatten(2)
def print_virtual(self):
return LayerBlockConv2DAbstract.print_virtual(self) + "Pool size: {}\n".format(self.poolsize)
class LayerBlockConvPool3D(LayerBlockOfNeurons):
"""
3D convolutional layer block + pooling layer block
"""
name = "3D convolutional + pooling layer block"
def __init__(self, neuron_type, in_channels, in_shape, flt_channels, flt_shape, poolsize):
"""
Args:
in_channels (int): number of input channels
in_shape (tuple of length 3): shape of the input (in_width, in_height, in_depth)
flt_channels (int):
flt_shape (tuple of length 3): shape of the filters (flt_depth, flt_height, flt_width)
poolsize (tuple of length 3): window of the pooling operation
"""
LayerBlockOfNeurons.__init__(self, neuron_type)
in_width, in_height, in_depth = self.in_shape = in_shape
flt_width, flt_height, flt_depth = self.flt_shape = flt_shape
self.in_channels = in_channels
self.flt_channels = flt_channels
self.image_shape = (in_depth, in_channels, in_height, in_width)
self.filter_shape = (flt_channels, flt_depth, in_channels, flt_height, flt_width)
self.poolsize = poolsize
self.init_parameters(self.filter_shape, (self.filter_shape[0],))
def compute_bound_parameters_virtual(self):
fan_in = np.prod(self.in_shape)
fan_out = self.flt_channels * np.prod(self.flt_shape) / np.prod(self.poolsize)
return np.sqrt(6. / (fan_in + fan_out))
def forward(self, x, batch_size, run_time):
img_batch_shape = (batch_size,) + self.image_shape
x = x.reshape(img_batch_shape)
# Convolve input feature maps with filters
conv_out = conv3d2d.conv3d(signals=x,
filters=self.w,
signals_shape=img_batch_shape,
filters_shape=self.filter_shape,
border_mode='valid')
perm = [0, 2, 1, 3, 4] # Permutation is needed due to the pooling function prototype
pooled_out = max_pool_3d(conv_out.dimshuffle(perm), self.poolsize, ignore_border=True)
return self.neuron_type.activation_function(pooled_out.dimshuffle(perm)
+ self.b.dimshuffle('x', 'x', 0, 'x', 'x')).flatten(2)
def print_virtual(self):
return "Image shape: {} \n Filter shape: {} \n Pool size: {} \n".format(
self.image_shape, self.filter_shape, self.poolsize)
| |
#!/usr/bin/env python
"Makes working with XML feel like you are working with JSON"
try:
from defusedexpat import pyexpat as expat
except ImportError:
from xml.parsers import expat
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
try: # pragma no cover
from cStringIO import StringIO
except ImportError: # pragma no cover
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try: # pragma no cover
from collections import OrderedDict
except ImportError: # pragma no cover
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
try: # pragma no cover
_basestring = basestring
except NameError: # pragma no cover
_basestring = str
try: # pragma no cover
_unicode = unicode
except NameError: # pragma no cover
_unicode = str
__author__ = 'Martin Blech'
__version__ = '0.11.0'
__license__ = 'MIT'
class ParsingInterrupted(Exception):
pass
class _DictSAXHandler(object):
def __init__(self,
item_depth=0,
item_callback=lambda *args: True,
xml_attribs=True,
attr_prefix='@',
cdata_key='#text',
force_cdata=False,
cdata_separator='',
postprocessor=None,
dict_constructor=OrderedDict,
strip_whitespace=True,
namespace_separator=':',
namespaces=None,
force_list=None):
self.path = []
self.stack = []
self.data = []
self.item = None
self.item_depth = item_depth
self.xml_attribs = xml_attribs
self.item_callback = item_callback
self.attr_prefix = attr_prefix
self.cdata_key = cdata_key
self.force_cdata = force_cdata
self.cdata_separator = cdata_separator
self.postprocessor = postprocessor
self.dict_constructor = dict_constructor
self.strip_whitespace = strip_whitespace
self.namespace_separator = namespace_separator
self.namespaces = namespaces
self.namespace_declarations = OrderedDict()
self.force_list = force_list
def _build_name(self, full_name):
if not self.namespaces:
return full_name
i = full_name.rfind(self.namespace_separator)
if i == -1:
return full_name
namespace, name = full_name[:i], full_name[i+1:]
short_namespace = self.namespaces.get(namespace, namespace)
if not short_namespace:
return name
else:
return self.namespace_separator.join((short_namespace, name))
def _attrs_to_dict(self, attrs):
if isinstance(attrs, dict):
return attrs
return self.dict_constructor(zip(attrs[0::2], attrs[1::2]))
def startNamespaceDecl(self, prefix, uri):
self.namespace_declarations[prefix or ''] = uri
def startElement(self, full_name, attrs):
name = self._build_name(full_name)
attrs = self._attrs_to_dict(attrs)
if attrs and self.namespace_declarations:
attrs['xmlns'] = self.namespace_declarations
self.namespace_declarations = OrderedDict()
self.path.append((name, attrs or None))
if len(self.path) > self.item_depth:
self.stack.append((self.item, self.data))
if self.xml_attribs:
attr_entries = []
for key, value in attrs.items():
key = self.attr_prefix+self._build_name(key)
if self.postprocessor:
entry = self.postprocessor(self.path, key, value)
else:
entry = (key, value)
if entry:
attr_entries.append(entry)
attrs = self.dict_constructor(attr_entries)
else:
attrs = None
self.item = attrs or None
self.data = []
def endElement(self, full_name):
name = self._build_name(full_name)
if len(self.path) == self.item_depth:
item = self.item
if item is None:
item = (None if not self.data
else self.cdata_separator.join(self.data))
should_continue = self.item_callback(self.path, item)
if not should_continue:
raise ParsingInterrupted()
if len(self.stack):
data = (None if not self.data
else self.cdata_separator.join(self.data))
item = self.item
self.item, self.data = self.stack.pop()
if self.strip_whitespace and data:
data = data.strip() or None
if data and self.force_cdata and item is None:
item = self.dict_constructor()
if item is not None:
if data:
self.push_data(item, self.cdata_key, data)
self.item = self.push_data(self.item, name, item)
else:
self.item = self.push_data(self.item, name, data)
else:
self.item = None
self.data = []
self.path.pop()
def characters(self, data):
if not self.data:
self.data = [data]
else:
self.data.append(data)
def push_data(self, item, key, data):
if self.postprocessor is not None:
result = self.postprocessor(self.path, key, data)
if result is None:
return item
key, data = result
if item is None:
item = self.dict_constructor()
try:
value = item[key]
if isinstance(value, list):
value.append(data)
else:
item[key] = [value, data]
except KeyError:
if self._should_force_list(key, data):
item[key] = [data]
else:
item[key] = data
return item
def _should_force_list(self, key, value):
if not self.force_list:
return False
try:
return key in self.force_list
except TypeError:
return self.force_list(self.path[:-1], key, value)
def parse(xml_input, encoding=None, expat=expat, process_namespaces=False,
namespace_separator=':', disable_entities=True, **kwargs):
"""Parse the given XML input and convert it into a dictionary.
`xml_input` can either be a `string` or a file-like object.
If `xml_attribs` is `True`, element attributes are put in the dictionary
among regular child elements, using `@` as a prefix to avoid collisions. If
set to `False`, they are just ignored.
Simple example::
>>> import xmltodict
>>> doc = xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>
... \"\"\")
>>> doc['a']['@prop']
u'x'
>>> doc['a']['b']
[u'1', u'2']
If `item_depth` is `0`, the function returns a dictionary for the root
element (default behavior). Otherwise, it calls `item_callback` every time
an item at the specified depth is found and returns `None` in the end
(streaming mode).
The callback function receives two parameters: the `path` from the document
root to the item (name-attribs pairs), and the `item` (dict). If the
callback's return value is false-ish, parsing will be stopped with the
:class:`ParsingInterrupted` exception.
Streaming example::
>>> def handle(path, item):
... print('path:%s item:%s' % (path, item))
... return True
...
>>> xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>\"\"\", item_depth=2, item_callback=handle)
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:1
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:2
The optional argument `postprocessor` is a function that takes `path`,
`key` and `value` as positional arguments and returns a new `(key, value)`
pair where both `key` and `value` may have changed. Usage example::
>>> def postprocessor(path, key, value):
... try:
... return key + ':int', int(value)
... except (ValueError, TypeError):
... return key, value
>>> xmltodict.parse('<a><b>1</b><b>2</b><b>x</b></a>',
... postprocessor=postprocessor)
OrderedDict([(u'a', OrderedDict([(u'b:int', [1, 2]), (u'b', u'x')]))])
You can pass an alternate version of `expat` (such as `defusedexpat`) by
using the `expat` parameter. E.g:
>>> import defusedexpat
>>> xmltodict.parse('<a>hello</a>', expat=defusedexpat.pyexpat)
OrderedDict([(u'a', u'hello')])
You can use the force_list argument to force lists to be created even
when there is only a single child of a given level of hierarchy. The
force_list argument is a tuple of keys. If the key for a given level
of hierarchy is in the force_list argument, that level of hierarchy
will have a list as a child (even if there is only one sub-element).
The index_keys operation takes precendence over this. This is applied
after any user-supplied postprocessor has already run.
For example, given this input:
<servers>
<server>
<name>host1</name>
<os>Linux</os>
<interfaces>
<interface>
<name>em0</name>
<ip_address>10.0.0.1</ip_address>
</interface>
</interfaces>
</server>
</servers>
If called with force_list=('interface',), it will produce
this dictionary:
{'servers':
{'server':
{'name': 'host1',
'os': 'Linux'},
'interfaces':
{'interface':
[ {'name': 'em0', 'ip_address': '10.0.0.1' } ] } } }
`force_list` can also be a callable that receives `path`, `key` and
`value`. This is helpful in cases where the logic that decides whether
a list should be forced is more complex.
"""
handler = _DictSAXHandler(namespace_separator=namespace_separator,
**kwargs)
if isinstance(xml_input, _unicode):
if not encoding:
encoding = 'utf-8'
xml_input = xml_input.encode(encoding)
if not process_namespaces:
namespace_separator = None
parser = expat.ParserCreate(
encoding,
namespace_separator
)
try:
parser.ordered_attributes = True
except AttributeError:
# Jython's expat does not support ordered_attributes
pass
parser.StartNamespaceDeclHandler = handler.startNamespaceDecl
parser.StartElementHandler = handler.startElement
parser.EndElementHandler = handler.endElement
parser.CharacterDataHandler = handler.characters
parser.buffer_text = True
if disable_entities:
try:
# Attempt to disable DTD in Jython's expat parser (Xerces-J).
feature = "http://apache.org/xml/features/disallow-doctype-decl"
parser._reader.setFeature(feature, True)
except AttributeError:
# For CPython / expat parser.
# Anything not handled ends up here and entities aren't expanded.
parser.DefaultHandler = lambda x: None
# Expects an integer return; zero means failure -> expat.ExpatError.
parser.ExternalEntityRefHandler = lambda *x: 1
if hasattr(xml_input, 'read'):
parser.ParseFile(xml_input)
else:
parser.Parse(xml_input, True)
return handler.item
def _process_namespace(name, namespaces, ns_sep=':', attr_prefix='@'):
if not namespaces:
return name
try:
ns, name = name.rsplit(ns_sep, 1)
except ValueError:
pass
else:
ns_res = namespaces.get(ns.strip(attr_prefix))
name = '{0}{1}{2}{3}'.format(
attr_prefix if ns.startswith(attr_prefix) else '',
ns_res, ns_sep, name) if ns_res else name
return name
def _emit(key, value, content_handler,
attr_prefix='@',
cdata_key='#text',
depth=0,
preprocessor=None,
pretty=False,
newl='\n',
indent='\t',
namespace_separator=':',
namespaces=None,
full_document=True):
key = _process_namespace(key, namespaces, namespace_separator, attr_prefix)
if preprocessor is not None:
result = preprocessor(key, value)
if result is None:
return
key, value = result
if (not hasattr(value, '__iter__')
or isinstance(value, _basestring)
or isinstance(value, dict)):
value = [value]
for index, v in enumerate(value):
if full_document and depth == 0 and index > 0:
raise ValueError('document with multiple roots')
if v is None:
v = OrderedDict()
elif isinstance(v, bool):
if v:
v = _unicode('true')
else:
v = _unicode('false')
elif not isinstance(v, dict):
v = _unicode(v)
if isinstance(v, _basestring):
v = OrderedDict(((cdata_key, v),))
cdata = None
attrs = OrderedDict()
children = []
for ik, iv in v.items():
if ik == cdata_key:
cdata = iv
continue
if ik.startswith(attr_prefix):
ik = _process_namespace(ik, namespaces, namespace_separator,
attr_prefix)
if ik == '@xmlns' and isinstance(iv, dict):
for k, v in iv.items():
attr = 'xmlns{0}'.format(':{0}'.format(k) if k else '')
attrs[attr] = _unicode(v)
continue
if not isinstance(iv, _unicode):
iv = _unicode(iv)
attrs[ik[len(attr_prefix):]] = iv
continue
children.append((ik, iv))
if pretty:
content_handler.ignorableWhitespace(depth * indent)
content_handler.startElement(key, AttributesImpl(attrs))
if pretty and children:
content_handler.ignorableWhitespace(newl)
for child_key, child_value in children:
_emit(child_key, child_value, content_handler,
attr_prefix, cdata_key, depth+1, preprocessor,
pretty, newl, indent, namespaces=namespaces,
namespace_separator=namespace_separator)
if cdata is not None:
content_handler.characters(cdata)
if pretty and children:
content_handler.ignorableWhitespace(depth * indent)
content_handler.endElement(key)
if pretty and depth:
content_handler.ignorableWhitespace(newl)
def unparse(input_dict, output=None, encoding='utf-8', full_document=True,
short_empty_elements=False,
**kwargs):
"""Emit an XML document for the given `input_dict` (reverse of `parse`).
The resulting XML document is returned as a string, but if `output` (a
file-like object) is specified, it is written there instead.
Dictionary keys prefixed with `attr_prefix` (default=`'@'`) are interpreted
as XML node attributes, whereas keys equal to `cdata_key`
(default=`'#text'`) are treated as character data.
The `pretty` parameter (default=`False`) enables pretty-printing. In this
mode, lines are terminated with `'\n'` and indented with `'\t'`, but this
can be customized with the `newl` and `indent` parameters.
"""
if full_document and len(input_dict) != 1:
raise ValueError('Document must have exactly one root.')
must_return = False
if output is None:
output = StringIO()
must_return = True
if short_empty_elements:
content_handler = XMLGenerator(output, encoding, True)
else:
content_handler = XMLGenerator(output, encoding)
if full_document:
content_handler.startDocument()
for key, value in input_dict.items():
_emit(key, value, content_handler, full_document=full_document,
**kwargs)
if full_document:
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except AttributeError: # pragma no cover
pass
return value
if __name__ == '__main__': # pragma: no cover
import sys
import marshal
try:
stdin = sys.stdin.buffer
stdout = sys.stdout.buffer
except AttributeError:
stdin = sys.stdin
stdout = sys.stdout
(item_depth,) = sys.argv[1:]
item_depth = int(item_depth)
def handle_item(path, item):
marshal.dump((path, item), stdout)
return True
try:
root = parse(stdin,
item_depth=item_depth,
item_callback=handle_item,
dict_constructor=dict)
if item_depth == 0:
handle_item([], root)
except KeyboardInterrupt:
pass
| |
from _pydev_bundle import pydev_log
from _pydevd_bundle.pydevd_utils import hasattr_checked, DAPGrouper
try:
import StringIO
except:
import io as StringIO
import traceback
from os.path import basename
from functools import partial
from _pydevd_bundle.pydevd_constants import dict_iter_items, dict_keys, xrange, IS_PY36_OR_GREATER, \
MethodWrapperType, RETURN_VALUES_DICT, DebugInfoHolder, IS_PYPY, GENERATED_LEN_ATTR_NAME
from _pydevd_bundle.pydevd_safe_repr import SafeRepr
# Note: 300 is already a lot to see in the outline (after that the user should really use the shell to get things)
# and this also means we'll pass less information to the client side (which makes debugging faster).
MAX_ITEMS_TO_HANDLE = 300
TOO_LARGE_MSG = 'Too large to show contents. Max items to show: ' + str(MAX_ITEMS_TO_HANDLE)
TOO_LARGE_ATTR = 'Unable to handle:'
#=======================================================================================================================
# UnableToResolveVariableException
#=======================================================================================================================
class UnableToResolveVariableException(Exception):
pass
try:
from collections import OrderedDict
except:
OrderedDict = dict
try:
import java.lang # @UnresolvedImport
except:
pass
#=======================================================================================================================
# See: pydevd_extension_api module for resolver interface
#=======================================================================================================================
def sorted_attributes_key(attr_name):
if attr_name.startswith('__'):
if attr_name.endswith('__'):
# __ double under before and after __
return (3, attr_name)
else:
# __ double under before
return (2, attr_name)
elif attr_name.startswith('_'):
# _ single under
return (1, attr_name)
else:
# Regular (Before anything)
return (0, attr_name)
#=======================================================================================================================
# DefaultResolver
#=======================================================================================================================
class DefaultResolver:
'''
DefaultResolver is the class that'll actually resolve how to show some variable.
'''
def resolve(self, var, attribute):
return getattr(var, attribute)
def get_contents_debug_adapter_protocol(self, obj, fmt=None):
if MethodWrapperType:
dct, used___dict__ = self._get_py_dictionary(obj)
else:
dct = self._get_jy_dictionary(obj)[0]
lst = sorted(dict_iter_items(dct), key=lambda tup: sorted_attributes_key(tup[0]))
if used___dict__:
eval_name = '.__dict__[%s]'
else:
eval_name = '.%s'
ret = []
for attr_name, attr_value in lst:
entry = (attr_name, attr_value, eval_name % attr_name)
ret.append(entry)
return ret
def get_dictionary(self, var, names=None, used___dict__=False):
if MethodWrapperType:
return self._get_py_dictionary(var, names, used___dict__=used___dict__)[0]
else:
return self._get_jy_dictionary(var)[0]
def _get_jy_dictionary(self, obj):
ret = {}
found = java.util.HashMap()
original = obj
if hasattr_checked(obj, '__class__') and obj.__class__ == java.lang.Class:
# get info about superclasses
classes = []
classes.append(obj)
c = obj.getSuperclass()
while c != None:
classes.append(c)
c = c.getSuperclass()
# get info about interfaces
interfs = []
for obj in classes:
interfs.extend(obj.getInterfaces())
classes.extend(interfs)
# now is the time when we actually get info on the declared methods and fields
for obj in classes:
declaredMethods = obj.getDeclaredMethods()
declaredFields = obj.getDeclaredFields()
for i in xrange(len(declaredMethods)):
name = declaredMethods[i].getName()
ret[name] = declaredMethods[i].toString()
found.put(name, 1)
for i in xrange(len(declaredFields)):
name = declaredFields[i].getName()
found.put(name, 1)
# if declaredFields[i].isAccessible():
declaredFields[i].setAccessible(True)
# ret[name] = declaredFields[i].get( declaredFields[i] )
try:
ret[name] = declaredFields[i].get(original)
except:
ret[name] = declaredFields[i].toString()
# this simple dir does not always get all the info, that's why we have the part before
# (e.g.: if we do a dir on String, some methods that are from other interfaces such as
# charAt don't appear)
try:
d = dir(original)
for name in d:
if found.get(name) != 1:
ret[name] = getattr(original, name)
except:
# sometimes we're unable to do a dir
pass
return ret
def get_names(self, var):
used___dict__ = False
try:
names = dir(var)
except Exception:
names = []
if not names:
if hasattr_checked(var, '__dict__'):
names = dict_keys(var.__dict__)
used___dict__ = True
return names, used___dict__
def _get_py_dictionary(self, var, names=None, used___dict__=False):
'''
:return tuple(names, used___dict__), where used___dict__ means we have to access
using obj.__dict__[name] instead of getattr(obj, name)
'''
# On PyPy we never show functions. This is because of a corner case where PyPy becomes
# absurdly slow -- it takes almost half a second to introspect a single numpy function (so,
# the related test, "test_case_16_resolve_numpy_array", times out... this probably isn't
# specific to numpy, but to any library where the CPython bridge is used, but as we
# can't be sure in the debugger, we play it safe and don't show it at all).
filter_function = IS_PYPY
if not names:
names, used___dict__ = self.get_names(var)
d = {}
# Be aware that the order in which the filters are applied attempts to
# optimize the operation by removing as many items as possible in the
# first filters, leaving fewer items for later filters
for name in names:
try:
name_as_str = name
if name_as_str.__class__ != str:
name_as_str = '%r' % (name_as_str,)
if not used___dict__:
attr = getattr(var, name)
else:
attr = var.__dict__[name]
# filter functions?
if filter_function:
if inspect.isroutine(attr) or isinstance(attr, MethodWrapperType):
continue
except:
# if some error occurs getting it, let's put it to the user.
strIO = StringIO.StringIO()
traceback.print_exc(file=strIO)
attr = strIO.getvalue()
d[name_as_str] = attr
return d, used___dict__
class DAPGrouperResolver:
def get_contents_debug_adapter_protocol(self, obj, fmt=None):
return obj.get_contents_debug_adapter_protocol()
_basic_immutable_types = (int, float, complex, str, bytes, type(None), bool, frozenset)
try:
_basic_immutable_types += (long, unicode) # Py2 types
except NameError:
pass
def _does_obj_repr_evaluate_to_obj(obj):
'''
If obj is an object where evaluating its representation leads to
the same object, return True, otherwise, return False.
'''
try:
if isinstance(obj, tuple):
for o in obj:
if not _does_obj_repr_evaluate_to_obj(o):
return False
return True
else:
return isinstance(obj, _basic_immutable_types)
except:
return False
#=======================================================================================================================
# DictResolver
#=======================================================================================================================
class DictResolver:
sort_keys = not IS_PY36_OR_GREATER
def resolve(self, dict, key):
if key in (GENERATED_LEN_ATTR_NAME, TOO_LARGE_ATTR):
return None
if '(' not in key:
# we have to treat that because the dict resolver is also used to directly resolve the global and local
# scopes (which already have the items directly)
try:
return dict[key]
except:
return getattr(dict, key)
# ok, we have to iterate over the items to find the one that matches the id, because that's the only way
# to actually find the reference from the string we have before.
expected_id = int(key.split('(')[-1][:-1])
for key, val in dict_iter_items(dict):
if id(key) == expected_id:
return val
raise UnableToResolveVariableException()
def key_to_str(self, key, fmt=None):
if fmt is not None:
if fmt.get('hex', False):
safe_repr = SafeRepr()
safe_repr.convert_to_hex = True
return safe_repr(key)
return '%r' % (key,)
def init_dict(self):
return {}
def get_contents_debug_adapter_protocol(self, dct, fmt=None):
'''
This method is to be used in the case where the variables are all saved by its id (and as
such don't need to have the `resolve` method called later on, so, keys don't need to
embed the reference in the key).
Note that the return should be ordered.
:return list(tuple(name:str, value:object, evaluateName:str))
'''
ret = []
i = 0
found_representations = set()
for key, val in dict_iter_items(dct):
i += 1
key_as_str = self.key_to_str(key, fmt)
if key_as_str not in found_representations:
found_representations.add(key_as_str)
else:
# If the key would be a duplicate, add the key id (otherwise
# VSCode won't show all keys correctly).
# See: https://github.com/microsoft/debugpy/issues/148
key_as_str = '%s (id: %s)' % (key_as_str, id(key))
found_representations.add(key_as_str)
if _does_obj_repr_evaluate_to_obj(key):
s = self.key_to_str(key) # do not format the key
eval_key_str = '[%s]' % (s,)
else:
eval_key_str = None
ret.append((key_as_str, val, eval_key_str))
if i > MAX_ITEMS_TO_HANDLE:
ret.append((TOO_LARGE_ATTR, TOO_LARGE_MSG, None))
break
# in case the class extends built-in type and has some additional fields
from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(dct, fmt)
if from_default_resolver:
ret = from_default_resolver + ret
if self.sort_keys:
ret = sorted(ret, key=lambda tup: sorted_attributes_key(tup[0]))
ret.append((GENERATED_LEN_ATTR_NAME, len(dct), partial(_apply_evaluate_name, evaluate_name='len(%s)')))
return ret
def get_dictionary(self, dict):
ret = self.init_dict()
i = 0
for key, val in dict_iter_items(dict):
i += 1
# we need to add the id because otherwise we cannot find the real object to get its contents later on.
key = '%s (%s)' % (self.key_to_str(key), id(key))
ret[key] = val
if i > MAX_ITEMS_TO_HANDLE:
ret[TOO_LARGE_ATTR] = TOO_LARGE_MSG
break
# in case if the class extends built-in type and has some additional fields
additional_fields = defaultResolver.get_dictionary(dict)
ret.update(additional_fields)
ret[GENERATED_LEN_ATTR_NAME] = len(dict)
return ret
def _apply_evaluate_name(parent_name, evaluate_name):
return evaluate_name % (parent_name,)
#=======================================================================================================================
# TupleResolver
#=======================================================================================================================
class TupleResolver: # to enumerate tuples and lists
def resolve(self, var, attribute):
'''
@param var: that's the original attribute
@param attribute: that's the key passed in the dict (as a string)
'''
if attribute in (GENERATED_LEN_ATTR_NAME, TOO_LARGE_ATTR):
return None
try:
return var[int(attribute)]
except:
return getattr(var, attribute)
def get_contents_debug_adapter_protocol(self, lst, fmt=None):
'''
This method is to be used in the case where the variables are all saved by its id (and as
such don't need to have the `resolve` method called later on, so, keys don't need to
embed the reference in the key).
Note that the return should be ordered.
:return list(tuple(name:str, value:object, evaluateName:str))
'''
l = len(lst)
ret = []
format_str = '%0' + str(int(len(str(l - 1)))) + 'd'
if fmt is not None and fmt.get('hex', False):
format_str = '0x%0' + str(int(len(hex(l).lstrip('0x')))) + 'x'
for i, item in enumerate(lst):
ret.append((format_str % i, item, '[%s]' % i))
if i > MAX_ITEMS_TO_HANDLE:
ret.append((TOO_LARGE_ATTR, TOO_LARGE_MSG, None))
break
# Needed in case the class extends the built-in type and has some additional fields.
from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(lst, fmt=fmt)
if from_default_resolver:
ret = from_default_resolver + ret
ret.append((GENERATED_LEN_ATTR_NAME, len(lst), partial(_apply_evaluate_name, evaluate_name='len(%s)')))
return ret
def get_dictionary(self, var, fmt={}):
l = len(var)
d = {}
format_str = '%0' + str(int(len(str(l - 1)))) + 'd'
if fmt is not None and fmt.get('hex', False):
format_str = '0x%0' + str(int(len(hex(l).lstrip('0x')))) + 'x'
for i, item in enumerate(var):
d[format_str % i] = item
if i > MAX_ITEMS_TO_HANDLE:
d[TOO_LARGE_ATTR] = TOO_LARGE_MSG
break
# in case if the class extends built-in type and has some additional fields
additional_fields = defaultResolver.get_dictionary(var)
d.update(additional_fields)
d[GENERATED_LEN_ATTR_NAME] = len(var)
return d
#=======================================================================================================================
# SetResolver
#=======================================================================================================================
class SetResolver:
'''
Resolves a set as dict id(object)->object
'''
def get_contents_debug_adapter_protocol(self, obj, fmt=None):
ret = []
for i, item in enumerate(obj):
ret.append((str(id(item)), item, None))
if i > MAX_ITEMS_TO_HANDLE:
ret.append((TOO_LARGE_ATTR, TOO_LARGE_MSG, None))
break
# Needed in case the class extends the built-in type and has some additional fields.
from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(obj, fmt=fmt)
if from_default_resolver:
ret = from_default_resolver + ret
ret.append((GENERATED_LEN_ATTR_NAME, len(obj), partial(_apply_evaluate_name, evaluate_name='len(%s)')))
return ret
def resolve(self, var, attribute):
if attribute in (GENERATED_LEN_ATTR_NAME, TOO_LARGE_ATTR):
return None
try:
attribute = int(attribute)
except:
return getattr(var, attribute)
for v in var:
if id(v) == attribute:
return v
raise UnableToResolveVariableException('Unable to resolve %s in %s' % (attribute, var))
def get_dictionary(self, var):
d = {}
for i, item in enumerate(var):
d[str(id(item))] = item
if i > MAX_ITEMS_TO_HANDLE:
d[TOO_LARGE_ATTR] = TOO_LARGE_MSG
break
# in case if the class extends built-in type and has some additional fields
additional_fields = defaultResolver.get_dictionary(var)
d.update(additional_fields)
d[GENERATED_LEN_ATTR_NAME] = len(var)
return d
def change_var_from_name(self, container, name, new_value):
# The name given in this case must be the id(item), so, we can actually
# iterate in the set and see which item matches the given id.
try:
# Check that the new value can actually be added to a set (i.e.: it's hashable/comparable).
set().add(new_value)
except:
return None
for item in container:
if str(id(item)) == name:
container.remove(item)
container.add(new_value)
return str(id(new_value))
return None
#=======================================================================================================================
# InstanceResolver
#=======================================================================================================================
class InstanceResolver:
def resolve(self, var, attribute):
field = var.__class__.getDeclaredField(attribute)
field.setAccessible(True)
return field.get(var)
def get_dictionary(self, obj):
ret = {}
declaredFields = obj.__class__.getDeclaredFields()
for i in xrange(len(declaredFields)):
name = declaredFields[i].getName()
try:
declaredFields[i].setAccessible(True)
ret[name] = declaredFields[i].get(obj)
except:
pydev_log.exception()
return ret
#=======================================================================================================================
# JyArrayResolver
#=======================================================================================================================
class JyArrayResolver:
'''
This resolves a regular Object[] array from java
'''
def resolve(self, var, attribute):
if attribute == GENERATED_LEN_ATTR_NAME:
return None
return var[int(attribute)]
def get_dictionary(self, obj):
ret = {}
for i in xrange(len(obj)):
ret[ i ] = obj[i]
ret[GENERATED_LEN_ATTR_NAME] = len(obj)
return ret
#=======================================================================================================================
# MultiValueDictResolver
#=======================================================================================================================
class MultiValueDictResolver(DictResolver):
def resolve(self, dict, key):
if key in (GENERATED_LEN_ATTR_NAME, TOO_LARGE_ATTR):
return None
# ok, we have to iterate over the items to find the one that matches the id, because that's the only way
# to actually find the reference from the string we have before.
expected_id = int(key.split('(')[-1][:-1])
for key in dict_keys(dict):
val = dict.getlist(key)
if id(key) == expected_id:
return val
raise UnableToResolveVariableException()
#=======================================================================================================================
# DjangoFormResolver
#=======================================================================================================================
class DjangoFormResolver(DefaultResolver):
def get_dictionary(self, var, names=None):
# Do not call self.errors because it is a property and has side effects.
names, used___dict__ = self.get_names(var)
has_errors_attr = False
if "errors" in names:
has_errors_attr = True
names.remove("errors")
d = defaultResolver.get_dictionary(var, names=names, used___dict__=used___dict__)
if has_errors_attr:
try:
errors_attr = getattr(var, "_errors")
except:
errors_attr = None
d["errors"] = errors_attr
return d
#=======================================================================================================================
# DequeResolver
#=======================================================================================================================
class DequeResolver(TupleResolver):
def get_dictionary(self, var):
d = TupleResolver.get_dictionary(self, var)
d['maxlen'] = getattr(var, 'maxlen', None)
return d
#=======================================================================================================================
# OrderedDictResolver
#=======================================================================================================================
class OrderedDictResolver(DictResolver):
sort_keys = False
def init_dict(self):
return OrderedDict()
#=======================================================================================================================
# FrameResolver
#=======================================================================================================================
class FrameResolver:
'''
This resolves a frame.
'''
def resolve(self, obj, attribute):
if attribute == '__internals__':
return defaultResolver.get_dictionary(obj)
if attribute == 'stack':
return self.get_frame_stack(obj)
if attribute == 'f_locals':
return obj.f_locals
return None
def get_dictionary(self, obj):
ret = {}
ret['__internals__'] = defaultResolver.get_dictionary(obj)
ret['stack'] = self.get_frame_stack(obj)
ret['f_locals'] = obj.f_locals
return ret
def get_frame_stack(self, frame):
ret = []
if frame is not None:
ret.append(self.get_frame_name(frame))
while frame.f_back:
frame = frame.f_back
ret.append(self.get_frame_name(frame))
return ret
def get_frame_name(self, frame):
if frame is None:
return 'None'
try:
name = basename(frame.f_code.co_filename)
return 'frame: %s [%s:%s] id:%s' % (frame.f_code.co_name, name, frame.f_lineno, id(frame))
except:
return 'frame object'
defaultResolver = DefaultResolver()
dictResolver = DictResolver()
tupleResolver = TupleResolver()
instanceResolver = InstanceResolver()
jyArrayResolver = JyArrayResolver()
setResolver = SetResolver()
multiValueDictResolver = MultiValueDictResolver()
djangoFormResolver = DjangoFormResolver()
dequeResolver = DequeResolver()
orderedDictResolver = OrderedDictResolver()
frameResolver = FrameResolver()
dapGrouperResolver = DAPGrouperResolver()
class InspectStub:
def isbuiltin(self, _args):
return False
def isroutine(self, object):
return False
try:
import inspect
except:
inspect = InspectStub()
def get_var_scope(attr_name, attr_value, evaluate_name, handle_return_values):
if attr_name.startswith("'"):
if attr_name.endswith("'"):
attr_name = attr_name[1:-1]
else:
i = attr_name.find("__' (")
if i >= 0:
# Handle attr_name such as: >>'__name__' (1732494379184)<<
attr_name = attr_name[1: i + 2]
if handle_return_values and attr_name == RETURN_VALUES_DICT:
return ''
elif attr_name == GENERATED_LEN_ATTR_NAME:
return ''
if attr_name.startswith('__') and attr_name.endswith('__'):
return DAPGrouper.SCOPE_SPECIAL_VARS
if attr_name.startswith('_') or attr_name.endswith('__'):
return DAPGrouper.SCOPE_PROTECTED_VARS
try:
if inspect.isroutine(attr_value) or isinstance(attr_value, MethodWrapperType):
return DAPGrouper.SCOPE_FUNCTION_VARS
elif inspect.isclass(attr_value):
return DAPGrouper.SCOPE_CLASS_VARS
except:
# It's possible that isinstance throws an exception when dealing with user-code.
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 0:
pydev_log.exception()
return ''
| |
#!/usr/bin/env python3
# nlhtree_py/testDropFromU.py
""" Test the drop_from_u_dir functionality. """
import os
import sys
import time
import unittest
from binascii import hexlify
import hashlib
from rnglib import SimpleRNG
from nlhtree import NLHTree, NLHLeaf
from xlattice import HashTypes
from xlu import UDir, DirStruc
if sys.version_info < (3, 6):
# pylint: disable=unused-import
import sha3 # monkey-patches hashlib
assert sha3 # prevent flakes warning
class TestDropFromU(unittest.TestCase):
""" Test the drop_from_u_dir functionality. """
def setUp(self):
self.rng = SimpleRNG(time.time())
def tearDown(self):
pass
def populate_tree(self, tree, data_path, u_dir, hashtype):
"""
Generate nnn and nnn unique random values, where nnn is at least 16.
"""
nnn = 16 + self.rng.next_int16(16)
# DEBUG
# print("nnn = %d" % nnn)
# EnnnD
values = []
hashes = []
for count in range(nnn):
# generate datum ------------------------------
datum = self.rng.some_bytes(32 + self.rng.next_int16(32))
values.append(datum)
# generate hash = bin_key ----------------------
if hashtype == HashTypes.SHA1:
sha = hashlib.sha1()
elif hashtype == HashTypes.SHA2:
sha = hashlib.sha256()
elif hashtype == HashTypes.SHA3:
sha = hashlib.sha3_256()
elif hashtype == HashTypes.BLAKE2B:
sha = hashlib.blake2b(digest_size=32)
else:
raise NotImplementedError
sha.update(datum)
bin_key = sha.digest()
hex_key = sha.hexdigest()
hashes.append(bin_key)
# write data file -----------------------------
file_name = 'value%04d' % count
path_to_file = os.path.join(data_path, file_name)
with open(path_to_file, 'wb') as file:
# DEBUG
# print("writing %s to %s" % (hex_key, path_to_file))
# END
file.write(datum)
# insert leaf into tree -----------------------
# path_from_top = os.path.join(top_name, file_name)
leaf = NLHLeaf(file_name, bin_key, hashtype)
tree.insert(leaf)
# DEBUG
# print(" inserting <%s %s>" % (leaf.name, leaf.hex_hash))
# END
# write data into uDir ------------------------
u_dir.put_data(datum, hex_key)
return values, hashes
def generate_udt(self, struc, hashtype):
"""
Generate under ./tmp a data directory with random content,
a uDir containing the same data, and an NLHTree that matches.
uDir has the directory structure (DIR_FLAT, DIR16x16, DIR256x256,
etc requested. Hashes are SHA1 if using SHA1 is True, SHA256
otherwise.
values is a list of binary values, each the content of a file
under dataDir. Each value contains a non-zero number of bytes.
hashes is a list of the SHA hashes of the values. Each hash
is a binary value. If using SHA1 it consists of 20 bytes.
return uPath, data_path, tree, hashes, values
"""
# make a unique U directory under ./tmp/
os.makedirs('tmp', mode=0o755, exist_ok=True)
u_root_name = self.rng.next_file_name(8)
u_path = os.path.join('tmp', u_root_name)
while os.path.exists(u_path):
u_root_name = self.rng.next_file_name(8)
u_path = os.path.join('tmp', u_root_name)
# DEBUG
# print("u_root_name = %s" % u_root_name)
# END
# create uDir and the NLHTree
u_dir = UDir(u_path, struc, hashtype)
self.assertTrue(os.path.exists(u_path))
# make a unique data directory under tmp/
data_tmp = self.rng.next_file_name(8)
tmp_path = os.path.join('tmp', data_tmp)
while os.path.exists(tmp_path):
data_tmp = self.rng.next_file_name(8)
tmp_path = os.path.join('tmp', data_tmp)
# dataDir must have same base name as NLHTree
top_name = self.rng.next_file_name(8)
data_path = os.path.join(tmp_path, top_name)
os.makedirs(data_path, mode=0o755)
# DEBUG
# print("data_tmp = %s" % data_tmp)
# print("top_name = %s" % top_name)
# print('data_path = %s' % data_path)
# END
tree = NLHTree(top_name, hashtype)
values, hashes = self.populate_tree(tree, data_path, u_dir, hashtype)
return u_path, data_path, tree, hashes, values
# ---------------------------------------------------------------
def do_test_with_ephemeral_tree(self, struc, hashtype):
"""
Generate a tmp/ subdirectory containing a quasi-random data
directory and corresponding uDir and NLHTree serialization.
We use the directory strucure (struc) and hash type (hashtype)
indicated, running various consistency tests on the three.
"""
u_path, data_path, tree, hashes, values = self.generate_udt(
struc, hashtype)
# DEBUG
# print("TREE:\n%s" % tree)
# END
# verify that the dataDir matches the nlhTree
tree2 = NLHTree.create_from_file_system(data_path, hashtype)
# DEBUG
# print("TREE2:\n%s" % tree2)
# END
self.assertEqual(tree2, tree)
nnn = len(values) # number of values present
hex_hashes = []
for count in range(nnn):
hex_hashes.append(hexlify(hashes[count]).decode('ascii'))
ndxes = [ndx for ndx in range(nnn)] # indexes into lists
self.rng.shuffle(ndxes) # shuffled
kkk = self.rng.next_int16(nnn) # we will drop this many indexes
# DEBUG
# print("dropping %d from %d elements" % (kkk, nnn))
# END
drop_me = ndxes[0:kkk] # indexes of values to drop
keep_me = ndxes[kkk:] # of those which should still be present
# construct an NLHTree containing values to be dropped from uDir
clone = tree.clone()
for count in keep_me:
name = 'value%04d' % count
clone.delete(name) # the parameter is a glob !
# these values should be absent from q: they won't be dropped from uDir
for count in keep_me:
name = 'value%04d' % count
xxx = clone.find(name)
self.assertEqual(len(xxx), 0)
# these values shd still be present in clone: they'll be dropped from
# UDir
for count in drop_me:
name = 'value%04d' % count
xxx = clone.find(name)
self.assertEqual(len(xxx), 1)
# the clone subtree contains those elements which will be dropped
# from uDir
unmatched = clone.drop_from_u_dir(u_path) # was unmatched
# DEBUG
# for x in unmatched: # (relPath, hash)
# print("unmatched: %s %s" % (x[0], x[1]))
# END
self.assertEqual(len(unmatched), 0)
u_dir = UDir(u_path, struc, hashtype)
self.assertTrue(os.path.exists(u_path))
# these values should still be present in uDir
for count in keep_me:
hex_hash = hex_hashes[count]
self.assertTrue(u_dir.exists(hex_hash))
# these values should NOT be present in UDir
for count in drop_me:
hex_hash = hex_hashes[count]
self.assertFalse(u_dir.exists(hex_hash))
def test_with_ephemeral_tree(self):
"""
Generate tmp/ subdirectories containing a quasi-random data
directory and corresponding uDir and NLHTree serialization,
using various directory structures and hash types.
"""
for struc in DirStruc:
for hashtype in HashTypes:
self.do_test_with_ephemeral_tree(struc, hashtype)
if __name__ == '__main__':
unittest.main()
| |
"""Blocking and non-blocking HTTP client interfaces.
This module defines a common interface shared by two implementations,
``simple_httpclient`` and ``curl_httpclient``. Applications may either
instantiate their chosen implementation class directly or use the
`AsyncHTTPClient` class from this module, which selects an implementation
that can be overridden with the `AsyncHTTPClient.configure` method.
The default implementation is ``simple_httpclient``, and this is expected
to be suitable for most users' needs. However, some applications may wish
to switch to ``curl_httpclient`` for reasons such as the following:
* ``curl_httpclient`` has some features not found in ``simple_httpclient``,
including support for HTTP proxies and the ability to use a specified
network interface.
* ``curl_httpclient`` is more likely to be compatible with sites that are
not-quite-compliant with the HTTP spec, or sites that use little-exercised
features of HTTP.
* ``curl_httpclient`` is faster.
Note that if you are using ``curl_httpclient``, it is highly
recommended that you use a recent version of ``libcurl`` and
``pycurl``. Currently the minimum supported version of libcurl is
7.22.0, and the minimum version of pycurl is 7.18.2. It is highly
recommended that your ``libcurl`` installation is built with
asynchronous DNS resolver (threaded or c-ares), otherwise you may
encounter various problems with request timeouts (for more
information, see
http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS
and comments in curl_httpclient.py).
To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
import datetime
import functools
from io import BytesIO
import ssl
import time
import weakref
from tornado.concurrent import (
Future,
future_set_result_unless_cancelled,
future_set_exception_unless_cancelled,
)
from tornado.escape import utf8, native_str
from tornado import gen, httputil
from tornado.ioloop import IOLoop
from tornado.util import Configurable
from typing import Type, Any, Union, Dict, Callable, Optional, cast, Awaitable
class HTTPClient(object):
"""A blocking HTTP client.
This interface is provided to make it easier to share code between
synchronous and asynchronous applications. Applications that are
running an `.IOLoop` must use `AsyncHTTPClient` instead.
Typical usage looks like this::
http_client = httpclient.HTTPClient()
try:
response = http_client.fetch("http://www.google.com/")
print(response.body)
except httpclient.HTTPError as e:
# HTTPError is raised for non-200 responses; the response
# can be found in e.response.
print("Error: " + str(e))
except Exception as e:
# Other errors are possible, such as IOError.
print("Error: " + str(e))
http_client.close()
.. versionchanged:: 5.0
Due to limitations in `asyncio`, it is no longer possible to
use the synchronous ``HTTPClient`` while an `.IOLoop` is running.
Use `AsyncHTTPClient` instead.
"""
def __init__(
self, async_client_class: Type["AsyncHTTPClient"] = None, **kwargs: Any
) -> None:
# Initialize self._closed at the beginning of the constructor
# so that an exception raised here doesn't lead to confusing
# failures in __del__.
self._closed = True
self._io_loop = IOLoop(make_current=False)
if async_client_class is None:
async_client_class = AsyncHTTPClient
# Create the client while our IOLoop is "current", without
# clobbering the thread's real current IOLoop (if any).
async def make_client() -> "AsyncHTTPClient":
await gen.sleep(0)
assert async_client_class is not None
return async_client_class(**kwargs)
self._async_client = self._io_loop.run_sync(make_client)
self._closed = False
def __del__(self) -> None:
self.close()
def close(self) -> None:
"""Closes the HTTPClient, freeing any resources used."""
if not self._closed:
self._async_client.close()
self._io_loop.close()
self._closed = True
def fetch(
self, request: Union["HTTPRequest", str], **kwargs: Any
) -> "HTTPResponse":
"""Executes a request, returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
If an error occurs during the fetch, we raise an `HTTPError` unless
the ``raise_error`` keyword argument is set to False.
"""
response = self._io_loop.run_sync(
functools.partial(self._async_client.fetch, request, **kwargs)
)
return response
class AsyncHTTPClient(Configurable):
"""An non-blocking HTTP client.
Example usage::
async def f():
http_client = AsyncHTTPClient()
try:
response = await http_client.fetch("http://www.google.com")
except Exception as e:
print("Error: %s" % e)
else:
print(response.body)
The constructor for this class is magic in several respects: It
actually creates an instance of an implementation-specific
subclass, and instances are reused as a kind of pseudo-singleton
(one per `.IOLoop`). The keyword argument ``force_instance=True``
can be used to suppress this singleton behavior. Unless
``force_instance=True`` is used, no arguments should be passed to
the `AsyncHTTPClient` constructor. The implementation subclass as
well as arguments to its constructor can be set with the static
method `configure()`
All `AsyncHTTPClient` implementations support a ``defaults``
keyword argument, which can be used to set default values for
`HTTPRequest` attributes. For example::
AsyncHTTPClient.configure(
None, defaults=dict(user_agent="MyUserAgent"))
# or with force_instance:
client = AsyncHTTPClient(force_instance=True,
defaults=dict(user_agent="MyUserAgent"))
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
_instance_cache = None # type: Dict[IOLoop, AsyncHTTPClient]
@classmethod
def configurable_base(cls) -> Type[Configurable]:
return AsyncHTTPClient
@classmethod
def configurable_default(cls) -> Type[Configurable]:
from tornado.simple_httpclient import SimpleAsyncHTTPClient
return SimpleAsyncHTTPClient
@classmethod
def _async_clients(cls) -> Dict[IOLoop, "AsyncHTTPClient"]:
attr_name = "_async_client_dict_" + cls.__name__
if not hasattr(cls, attr_name):
setattr(cls, attr_name, weakref.WeakKeyDictionary())
return getattr(cls, attr_name)
def __new__(cls, force_instance: bool = False, **kwargs: Any) -> "AsyncHTTPClient":
io_loop = IOLoop.current()
if force_instance:
instance_cache = None
else:
instance_cache = cls._async_clients()
if instance_cache is not None and io_loop in instance_cache:
return instance_cache[io_loop]
instance = super(AsyncHTTPClient, cls).__new__(cls, **kwargs) # type: ignore
# Make sure the instance knows which cache to remove itself from.
# It can't simply call _async_clients() because we may be in
# __new__(AsyncHTTPClient) but instance.__class__ may be
# SimpleAsyncHTTPClient.
instance._instance_cache = instance_cache
if instance_cache is not None:
instance_cache[instance.io_loop] = instance
return instance
def initialize(self, defaults: Dict[str, Any] = None) -> None:
self.io_loop = IOLoop.current()
self.defaults = dict(HTTPRequest._DEFAULTS)
if defaults is not None:
self.defaults.update(defaults)
self._closed = False
def close(self) -> None:
"""Destroys this HTTP client, freeing any file descriptors used.
This method is **not needed in normal use** due to the way
that `AsyncHTTPClient` objects are transparently reused.
``close()`` is generally only necessary when either the
`.IOLoop` is also being closed, or the ``force_instance=True``
argument was used when creating the `AsyncHTTPClient`.
No other methods may be called on the `AsyncHTTPClient` after
``close()``.
"""
if self._closed:
return
self._closed = True
if self._instance_cache is not None:
cached_val = self._instance_cache.pop(self.io_loop, None)
# If there's an object other than self in the instance
# cache for our IOLoop, something has gotten mixed up. A
# value of None appears to be possible when this is called
# from a destructor (HTTPClient.__del__) as the weakref
# gets cleared before the destructor runs.
if cached_val is not None and cached_val is not self:
raise RuntimeError("inconsistent AsyncHTTPClient cache")
def fetch(
self,
request: Union[str, "HTTPRequest"],
raise_error: bool = True,
**kwargs: Any
) -> Awaitable["HTTPResponse"]:
"""Executes a request, asynchronously returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
This method returns a `.Future` whose result is an
`HTTPResponse`. By default, the ``Future`` will raise an
`HTTPError` if the request returned a non-200 response code
(other errors may also be raised if the server could not be
contacted). Instead, if ``raise_error`` is set to False, the
response will always be returned regardless of the response
code.
If a ``callback`` is given, it will be invoked with the `HTTPResponse`.
In the callback interface, `HTTPError` is not automatically raised.
Instead, you must check the response's ``error`` attribute or
call its `~HTTPResponse.rethrow` method.
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned
`.Future` instead.
The ``raise_error=False`` argument only affects the
`HTTPError` raised when a non-200 response code is used,
instead of suppressing all errors.
"""
if self._closed:
raise RuntimeError("fetch() called on closed AsyncHTTPClient")
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
else:
if kwargs:
raise ValueError(
"kwargs can't be used if request is an HTTPRequest object"
)
# We may modify this (to add Host, Accept-Encoding, etc),
# so make sure we don't modify the caller's object. This is also
# where normal dicts get converted to HTTPHeaders objects.
request.headers = httputil.HTTPHeaders(request.headers)
request_proxy = _RequestProxy(request, self.defaults)
future = Future() # type: Future[HTTPResponse]
def handle_response(response: "HTTPResponse") -> None:
if response.error:
if raise_error or not response._error_is_response_code:
future_set_exception_unless_cancelled(future, response.error)
return
future_set_result_unless_cancelled(future, response)
self.fetch_impl(cast(HTTPRequest, request_proxy), handle_response)
return future
def fetch_impl(
self, request: "HTTPRequest", callback: Callable[["HTTPResponse"], None]
) -> None:
raise NotImplementedError()
@classmethod
def configure(
cls, impl: "Union[None, str, Type[Configurable]]", **kwargs: Any
) -> None:
"""Configures the `AsyncHTTPClient` subclass to use.
``AsyncHTTPClient()`` actually creates an instance of a subclass.
This method may be called with either a class object or the
fully-qualified name of such a class (or ``None`` to use the default,
``SimpleAsyncHTTPClient``)
If additional keyword arguments are given, they will be passed
to the constructor of each subclass instance created. The
keyword argument ``max_clients`` determines the maximum number
of simultaneous `~AsyncHTTPClient.fetch()` operations that can
execute in parallel on each `.IOLoop`. Additional arguments
may be supported depending on the implementation class in use.
Example::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
super(AsyncHTTPClient, cls).configure(impl, **kwargs)
class HTTPRequest(object):
"""HTTP client request object."""
_headers = None # type: Union[Dict[str, str], httputil.HTTPHeaders]
# Default values for HTTPRequest parameters.
# Merged with the values on the request object by AsyncHTTPClient
# implementations.
_DEFAULTS = dict(
connect_timeout=20.0,
request_timeout=20.0,
follow_redirects=True,
max_redirects=5,
decompress_response=True,
proxy_password="",
allow_nonstandard_methods=False,
validate_cert=True,
)
def __init__(
self,
url: str,
method: str = "GET",
headers: Union[Dict[str, str], httputil.HTTPHeaders] = None,
body: Union[bytes, str] = None,
auth_username: str = None,
auth_password: str = None,
auth_mode: str = None,
connect_timeout: float = None,
request_timeout: float = None,
if_modified_since: Union[float, datetime.datetime] = None,
follow_redirects: bool = None,
max_redirects: int = None,
user_agent: str = None,
use_gzip: bool = None,
network_interface: str = None,
streaming_callback: Callable[[bytes], None] = None,
header_callback: Callable[[str], None] = None,
prepare_curl_callback: Callable[[Any], None] = None,
proxy_host: str = None,
proxy_port: int = None,
proxy_username: str = None,
proxy_password: str = None,
proxy_auth_mode: str = None,
allow_nonstandard_methods: bool = None,
validate_cert: bool = None,
ca_certs: str = None,
allow_ipv6: bool = None,
client_key: str = None,
client_cert: str = None,
body_producer: Callable[[Callable[[bytes], None]], "Future[None]"] = None,
expect_100_continue: bool = False,
decompress_response: bool = None,
ssl_options: Union[Dict[str, Any], ssl.SSLContext] = None,
) -> None:
r"""All parameters except ``url`` are optional.
:arg str url: URL to fetch
:arg str method: HTTP method, e.g. "GET" or "POST"
:arg headers: Additional HTTP headers to pass on the request
:type headers: `~tornado.httputil.HTTPHeaders` or `dict`
:arg body: HTTP request body as a string (byte or unicode; if unicode
the utf-8 encoding will be used)
:arg body_producer: Callable used for lazy/asynchronous request bodies.
It is called with one argument, a ``write`` function, and should
return a `.Future`. It should call the write function with new
data as it becomes available. The write function returns a
`.Future` which can be used for flow control.
Only one of ``body`` and ``body_producer`` may
be specified. ``body_producer`` is not supported on
``curl_httpclient``. When using ``body_producer`` it is recommended
to pass a ``Content-Length`` in the headers as otherwise chunked
encoding will be used, and many servers do not support chunked
encoding on requests. New in Tornado 4.0
:arg str auth_username: Username for HTTP authentication
:arg str auth_password: Password for HTTP authentication
:arg str auth_mode: Authentication mode; default is "basic".
Allowed values are implementation-defined; ``curl_httpclient``
supports "basic" and "digest"; ``simple_httpclient`` only supports
"basic"
:arg float connect_timeout: Timeout for initial connection in seconds,
default 20 seconds
:arg float request_timeout: Timeout for entire request in seconds,
default 20 seconds
:arg if_modified_since: Timestamp for ``If-Modified-Since`` header
:type if_modified_since: `datetime` or `float`
:arg bool follow_redirects: Should redirects be followed automatically
or return the 3xx response? Default True.
:arg int max_redirects: Limit for ``follow_redirects``, default 5.
:arg str user_agent: String to send as ``User-Agent`` header
:arg bool decompress_response: Request a compressed response from
the server and decompress it after downloading. Default is True.
New in Tornado 4.0.
:arg bool use_gzip: Deprecated alias for ``decompress_response``
since Tornado 4.0.
:arg str network_interface: Network interface or source IP to use for request.
See ``curl_httpclient`` note below.
:arg collections.abc.Callable streaming_callback: If set, ``streaming_callback`` will
be run with each chunk of data as it is received, and
``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in
the final response.
:arg collections.abc.Callable header_callback: If set, ``header_callback`` will
be run with each header line as it is received (including the
first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line
containing only ``\r\n``. All lines include the trailing newline
characters). ``HTTPResponse.headers`` will be empty in the final
response. This is most useful in conjunction with
``streaming_callback``, because it's the only way to get access to
header data while the request is in progress.
:arg collections.abc.Callable prepare_curl_callback: If set, will be called with
a ``pycurl.Curl`` object to allow the application to make additional
``setopt`` calls.
:arg str proxy_host: HTTP proxy hostname. To use proxies,
``proxy_host`` and ``proxy_port`` must be set; ``proxy_username``,
``proxy_pass`` and ``proxy_auth_mode`` are optional. Proxies are
currently only supported with ``curl_httpclient``.
:arg int proxy_port: HTTP proxy port
:arg str proxy_username: HTTP proxy username
:arg str proxy_password: HTTP proxy password
:arg str proxy_auth_mode: HTTP proxy Authentication mode;
default is "basic". supports "basic" and "digest"
:arg bool allow_nonstandard_methods: Allow unknown values for ``method``
argument? Default is False.
:arg bool validate_cert: For HTTPS requests, validate the server's
certificate? Default is True.
:arg str ca_certs: filename of CA certificates in PEM format,
or None to use defaults. See note below when used with
``curl_httpclient``.
:arg str client_key: Filename for client SSL key, if any. See
note below when used with ``curl_httpclient``.
:arg str client_cert: Filename for client SSL certificate, if any.
See note below when used with ``curl_httpclient``.
:arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in
``simple_httpclient`` (unsupported by ``curl_httpclient``).
Overrides ``validate_cert``, ``ca_certs``, ``client_key``,
and ``client_cert``.
:arg bool allow_ipv6: Use IPv6 when available? Default is True.
:arg bool expect_100_continue: If true, send the
``Expect: 100-continue`` header and wait for a continue response
before sending the request body. Only supported with
``simple_httpclient``.
.. note::
When using ``curl_httpclient`` certain options may be
inherited by subsequent fetches because ``pycurl`` does
not allow them to be cleanly reset. This applies to the
``ca_certs``, ``client_key``, ``client_cert``, and
``network_interface`` arguments. If you use these
options, you should pass them on every request (you don't
have to always use the same values, but it's not possible
to mix requests that specify these options with ones that
use the defaults).
.. versionadded:: 3.1
The ``auth_mode`` argument.
.. versionadded:: 4.0
The ``body_producer`` and ``expect_100_continue`` arguments.
.. versionadded:: 4.2
The ``ssl_options`` argument.
.. versionadded:: 4.5
The ``proxy_auth_mode`` argument.
"""
# Note that some of these attributes go through property setters
# defined below.
self.headers = headers
if if_modified_since:
self.headers["If-Modified-Since"] = httputil.format_timestamp(
if_modified_since
)
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_username = proxy_username
self.proxy_password = proxy_password
self.proxy_auth_mode = proxy_auth_mode
self.url = url
self.method = method
self.body = body
self.body_producer = body_producer
self.auth_username = auth_username
self.auth_password = auth_password
self.auth_mode = auth_mode
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
self.follow_redirects = follow_redirects
self.max_redirects = max_redirects
self.user_agent = user_agent
if decompress_response is not None:
self.decompress_response = decompress_response # type: Optional[bool]
else:
self.decompress_response = use_gzip
self.network_interface = network_interface
self.streaming_callback = streaming_callback
self.header_callback = header_callback
self.prepare_curl_callback = prepare_curl_callback
self.allow_nonstandard_methods = allow_nonstandard_methods
self.validate_cert = validate_cert
self.ca_certs = ca_certs
self.allow_ipv6 = allow_ipv6
self.client_key = client_key
self.client_cert = client_cert
self.ssl_options = ssl_options
self.expect_100_continue = expect_100_continue
self.start_time = time.time()
@property
def headers(self) -> httputil.HTTPHeaders:
# TODO: headers may actually be a plain dict until fairly late in
# the process (AsyncHTTPClient.fetch), but practically speaking,
# whenever the property is used they're already HTTPHeaders.
return self._headers # type: ignore
@headers.setter
def headers(self, value: Union[Dict[str, str], httputil.HTTPHeaders]) -> None:
if value is None:
self._headers = httputil.HTTPHeaders()
else:
self._headers = value # type: ignore
@property
def body(self) -> bytes:
return self._body
@body.setter
def body(self, value: Union[bytes, str]) -> None:
self._body = utf8(value)
class HTTPResponse(object):
"""HTTP Response object.
Attributes:
* ``request``: HTTPRequest object
* ``code``: numeric HTTP status code, e.g. 200 or 404
* ``reason``: human-readable reason phrase describing the status code
* ``headers``: `tornado.httputil.HTTPHeaders` object
* ``effective_url``: final location of the resource after following any
redirects
* ``buffer``: ``cStringIO`` object for response body
* ``body``: response body as bytes (created on demand from ``self.buffer``)
* ``error``: Exception object, if any
* ``request_time``: seconds from request start to finish. Includes all
network operations from DNS resolution to receiving the last byte of
data. Does not include time spent in the queue (due to the
``max_clients`` option). If redirects were followed, only includes
the final request.
* ``start_time``: Time at which the HTTP operation started, based on
`time.time` (not the monotonic clock used by `.IOLoop.time`). May
be ``None`` if the request timed out while in the queue.
* ``time_info``: dictionary of diagnostic timing information from the
request. Available data are subject to change, but currently uses timings
available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,
plus ``queue``, which is the delay (if any) introduced by waiting for
a slot under `AsyncHTTPClient`'s ``max_clients`` setting.
.. versionadded:: 5.1
Added the ``start_time`` attribute.
.. versionchanged:: 5.1
The ``request_time`` attribute previously included time spent in the queue
for ``simple_httpclient``, but not in ``curl_httpclient``. Now queueing time
is excluded in both implementations. ``request_time`` is now more accurate for
``curl_httpclient`` because it uses a monotonic clock when available.
"""
# I'm not sure why these don't get type-inferred from the references in __init__.
error = None # type: Optional[BaseException]
_error_is_response_code = False
request = None # type: HTTPRequest
def __init__(
self,
request: HTTPRequest,
code: int,
headers: httputil.HTTPHeaders = None,
buffer: BytesIO = None,
effective_url: str = None,
error: BaseException = None,
request_time: float = None,
time_info: Dict[str, float] = None,
reason: str = None,
start_time: float = None,
) -> None:
if isinstance(request, _RequestProxy):
self.request = request.request
else:
self.request = request
self.code = code
self.reason = reason or httputil.responses.get(code, "Unknown")
if headers is not None:
self.headers = headers
else:
self.headers = httputil.HTTPHeaders()
self.buffer = buffer
self._body = None # type: Optional[bytes]
if effective_url is None:
self.effective_url = request.url
else:
self.effective_url = effective_url
self._error_is_response_code = False
if error is None:
if self.code < 200 or self.code >= 300:
self._error_is_response_code = True
self.error = HTTPError(self.code, message=self.reason, response=self)
else:
self.error = None
else:
self.error = error
self.start_time = start_time
self.request_time = request_time
self.time_info = time_info or {}
@property
def body(self) -> bytes:
if self.buffer is None:
return b""
elif self._body is None:
self._body = self.buffer.getvalue()
return self._body
def rethrow(self) -> None:
"""If there was an error on the request, raise an `HTTPError`."""
if self.error:
raise self.error
def __repr__(self) -> str:
args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items()))
return "%s(%s)" % (self.__class__.__name__, args)
class HTTPClientError(Exception):
"""Exception thrown for an unsuccessful HTTP request.
Attributes:
* ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is
used when no HTTP response was received, e.g. for a timeout.
* ``response`` - `HTTPResponse` object, if any.
Note that if ``follow_redirects`` is False, redirects become HTTPErrors,
and you can look at ``error.response.headers['Location']`` to see the
destination of the redirect.
.. versionchanged:: 5.1
Renamed from ``HTTPError`` to ``HTTPClientError`` to avoid collisions with
`tornado.web.HTTPError`. The name ``tornado.httpclient.HTTPError`` remains
as an alias.
"""
def __init__(
self, code: int, message: str = None, response: HTTPResponse = None
) -> None:
self.code = code
self.message = message or httputil.responses.get(code, "Unknown")
self.response = response
super(HTTPClientError, self).__init__(code, message, response)
def __str__(self) -> str:
return "HTTP %d: %s" % (self.code, self.message)
# There is a cyclic reference between self and self.response,
# which breaks the default __repr__ implementation.
# (especially on pypy, which doesn't have the same recursion
# detection as cpython).
__repr__ = __str__
HTTPError = HTTPClientError
class _RequestProxy(object):
"""Combines an object with a dictionary of defaults.
Used internally by AsyncHTTPClient implementations.
"""
def __init__(
self, request: HTTPRequest, defaults: Optional[Dict[str, Any]]
) -> None:
self.request = request
self.defaults = defaults
def __getattr__(self, name: str) -> Any:
request_attr = getattr(self.request, name)
if request_attr is not None:
return request_attr
elif self.defaults is not None:
return self.defaults.get(name, None)
else:
return None
def main() -> None:
from tornado.options import define, options, parse_command_line
define("print_headers", type=bool, default=False)
define("print_body", type=bool, default=True)
define("follow_redirects", type=bool, default=True)
define("validate_cert", type=bool, default=True)
define("proxy_host", type=str)
define("proxy_port", type=int)
args = parse_command_line()
client = HTTPClient()
for arg in args:
try:
response = client.fetch(
arg,
follow_redirects=options.follow_redirects,
validate_cert=options.validate_cert,
proxy_host=options.proxy_host,
proxy_port=options.proxy_port,
)
except HTTPError as e:
if e.response is not None:
response = e.response
else:
raise
if options.print_headers:
print(response.headers)
if options.print_body:
print(native_str(response.body))
client.close()
if __name__ == "__main__":
main()
| |
import math
import numpy as np
import os
from osgeo import gdal
from osgeo import osr
from django.conf import settings
from django.contrib.gis.geos import Polygon, MultiPolygon, MultiLineString, LineString, Point
from faker import Factory
from noise import snoise2
from scipy.ndimage.filters import median_filter
from shapely.geometry import Polygon as Poly
from shapely.ops import cascaded_union
class ModelExporter:
def __init__(self, biome_model, river_model, region_model, city_model, max_lat, max_lng):
self.biome_model = biome_model
self.city_model = city_model
self.region_model = region_model
self.river_model = river_model
self.max_lat = max_lat
self.max_lng = max_lng
def biome_pre_save(self, obj, center, map_obj):
pass
def city_pre_save(self, obj, city, map_obj):
pass
def region_pre_save(self, obj, region, map_obj):
pass
def river_pre_save(self, obj, edge, map_obj):
pass
def cleanup_biome(self, map_obj):
self.biome_model.objects.all().delete()
def cleanup_city(self, map_obj):
self.city_model.objects.all().delete()
def cleanup_region(self, map_obj):
self.region_model.objects.all().delete()
def cleanup_river(self, map_obj):
self.river_model.objects.all().delete()
def export(self, map_obj):
print('Export data to DB')
fake = Factory.create()
# Export regions
print('Save regions')
self.cleanup_region(map_obj)
# new_objects = []
for region in map_obj.regions:
obj = self.region_model()
obj.name = fake.city()
polygons = [center.shapely_object for center in region.centers]
region_poly = cascaded_union(polygons)
coords = [self.point_to_lnglat(point) for point in region_poly.exterior.coords]
obj.geom = MultiPolygon([Polygon(coords)])
self.region_pre_save(obj, region, map_obj)
obj.full_clean()
obj.save()
region.model = obj
# new_objects.append(obj)
# self.region_model.objects.bulk_create(new_objects)
# Save region neighbors
print('Save regions neighbors')
checked = []
for region in map_obj.regions:
for neighbour in region.neighboir_regions:
if neighbour not in checked:
region.model.neighbors.add(neighbour.model)
# Export biomes
print('Save biomes')
self.cleanup_biome(map_obj)
new_objects = []
for center in map_obj.centers:
obj = self.biome_model()
center.model = obj
obj.biome = center.biome
obj.water = center.water
obj.coast = center.coast
obj.border = center.border
obj.ocean = center.ocean
obj.elevation = center.elevation
obj.moisture = center.moisture
obj.center = Point(*self.point_to_lnglat(center.point))
obj.river = any(edge.river for edge in center.borders)
if not center.water:
obj.region = center.region.model
coords = []
for corner in center.corners:
coords.append(self.point_to_lnglat(corner.point))
# Sort coordinates. Should be sorted already, but lets check once more.
coords.sort(key=lambda p: math.atan2(p[1] - obj.center.y, p[0] - obj.center.x))
coords.append(coords[0])
obj.geom = MultiPolygon([Polygon(coords)])
self.biome_pre_save(obj, center, map_obj)
obj.full_clean()
obj.save()
new_objects.append(obj)
# FIXME: Use bulk_create and change neighbors saving
# self.model.objects.bulk_create(new_objects)
# save neighbors
print('Save biomes neighbors')
checked = []
for center in map_obj.centers:
checked.append(center)
for neighbour in center.neighbors:
if neighbour not in checked:
center.model.neighbors.add(neighbour.model)
# Export rivers
print('Save rivers')
self.cleanup_river(map_obj)
new_objects = []
for edge in map_obj.edges:
if edge.river:
obj = self.river_model()
obj.width = edge.river
p1 = self.point_to_lnglat(edge.corners[0].point)
p2 = self.point_to_lnglat(edge.corners[1].point)
obj.geom = MultiLineString(LineString(p1, p2))
self.river_pre_save(obj, edge, map_obj)
obj.full_clean()
new_objects.append(obj)
self.river_model.objects.bulk_create(new_objects)
# Export cities
print('Save cities')
self.cleanup_city(map_obj)
new_objects = []
for region in map_obj.regions:
for center in region.centers:
obj = self.city_model()
obj.biome = center.model
obj.capital = (center == region.capital)
obj.name = fake.city()
obj.region = region.model
obj.coords = Point(*self.point_to_lnglat(center.point))
self.region_pre_save(obj, region, map_obj)
obj.full_clean()
new_objects.append(obj)
self.city_model.objects.bulk_create(new_objects)
def point_to_lnglat(self, point):
return (
self.max_lng * point[0] - self.max_lng / 2,
self.max_lat * point[1] - self.max_lat / 2
)
class GeoTiffExporter(object):
def __init__(self, max_lat, max_lng, width=1000, hill_noise=True, dst_filename=None):
self.max_lat = max_lat
self.max_lng = max_lng
if not dst_filename:
self.dst_filename = os.path.join(settings.BASE_DIR, 'map.tif')
else:
self.dst_filename = dst_filename
self.top_left_point = (-(max_lng / 2), max_lat / 2)
self.bot_right_point = (max_lng / 2, -(max_lat / 2))
self.max_height = 500 # elevation will be scaled to this value
self.width = width
self.hill_noise = hill_noise
# @profile # 5624515 function calls in 9.509 seconds
def export(self, map_obj):
# http://www.gdal.org/gdal_tutorial.html
# http://blambi.blogspot.com/2010/05/making-geo-referenced-images-in-python.html
in_srs = self.get_in_projection()
out_srs = self.get_out_projection()
coord_transform = osr.CoordinateTransformation(in_srs, out_srs)
top_left_lng_m, top_left_lat_m, _ = coord_transform.TransformPoint(*self.top_left_point)
bot_right_lng_m, bot_right_lat_m, _ = coord_transform.TransformPoint(*self.bot_right_point)
# image size
x_pixels = self.width
pixel_size = abs(top_left_lng_m - bot_right_lng_m) / x_pixels
y_pixels = int(abs(bot_right_lat_m - top_left_lat_m) / pixel_size) + 1
x_pixels += 1
# pixel/coords transform and inverse transform
geo = [top_left_lng_m, pixel_size, 0, top_left_lat_m, 0, -pixel_size]
inv_geo = gdal.InvGeoTransform(geo)[1]
image_data = self.get_image_data(map_obj, (y_pixels, x_pixels), inv_geo, coord_transform)
image_data = median_filter(image_data, (6, 6))
# image_data = gaussian_filter(image_data, sigma=1)
if self.hill_noise:
self.add_noise(image_data, map_obj.seed)
image_data *= self.max_height
image_data = self.add_hillshade(image_data, 225, 45)
# create image
dataset = gdal.GetDriverByName('GTiff').Create(
self.dst_filename,
x_pixels,
y_pixels,
1, # bands count
gdal.GDT_Byte)
dataset.SetGeoTransform(geo)
dataset.SetProjection(out_srs.ExportToWkt())
dataset.GetRasterBand(1).WriteArray(image_data)
dataset.FlushCache()
def get_image_data(self, map_obj, size, inv_geo, coord_transform):
cache_file_name = '%s_%s_%s.npy' % (map_obj.seed, len(map_obj.points), self.width)
cache_file_path = os.path.join(settings.HEIGHT_CACHE_DIR, cache_file_name)
try:
return np.load(cache_file_path)
except IOError:
pass
raster = np.zeros(size, dtype=np.float32)
step = 0.5 / size[0]
count = len(map_obj.centers)
completed = 0
for center in map_obj.centers:
completed += 1
if completed % 100 == 0:
print('%s of %s' % (completed, count))
if center.water:
continue
v1 = np.array([center.point[0], center.point[1], center.elevation])
for edge in center.borders:
c1 = edge.corners[0]
c2 = edge.corners[1]
cp1 = c1.point
cp2 = c2.point
# get the equation of a plane from three points
v2 = np.array([cp1[0], cp1[1], c1.elevation])
v3 = np.array([cp2[0], cp2[1], c2.elevation])
normal = np.cross(v2 - v1, v3 - v1)
a, b, c = normal
d = np.dot(normal, v3)
# calculate elevation for all points in polygon
poly = Poly([center.point, cp1, cp2])
minx, miny, maxx, maxy = poly.bounds
# TODO: requires some optimization, too many checks here
for x in np.arange(minx, maxx, step):
for y in np.arange(miny, maxy, step):
if in_triange((x, y), v1, cp1, cp2):
# calculate elevation and convert to pixel value
z = (a * x + b * y - d) / -c
# get pixel coordinates from our coordinates(0-1)
img_x, img_y = self.point_to_pixel((x, y), inv_geo, coord_transform)
raster[img_y][img_x] = z
np.save(cache_file_path, raster)
return raster
def get_in_projection(self):
"""
We save our polygons in this projection.
"""
proj = osr.SpatialReference()
proj.ImportFromEPSG(4326)
return proj
def get_out_projection(self):
"""
Output projection is projection of our map tiles.
"""
proj = osr.SpatialReference()
proj.ImportFromEPSG(3857)
return proj
def get_pixel(self, lng, lat, inv_geo, transform):
"""
Return pixel coordinates from lng/lat
"""
gx, gy, _ = transform.TransformPoint(lng, lat)
gx, gy = gdal.ApplyGeoTransform(inv_geo, gx, gy)
return int(gx), int(gy)
def point_to_lnglat(self, point):
"""
Convert point in our coordinates(0-1) to lng/lat
"""
return (
self.max_lng * point[0] - self.max_lng / 2,
self.max_lat * point[1] - self.max_lat / 2
)
def point_to_pixel(self, point, inv_geo, transform):
"""
Convert point in our coordinates(0-1) to pixel coordinates
"""
lng, lat = self.point_to_lnglat(point)
return self.get_pixel(lng, lat, inv_geo, transform)
def add_hillshade(self, image_data, azimuth, angle_altitude):
"""
From here http://geoexamples.blogspot.com/2014/03/shaded-relief-images-using-gdal-python.html
"""
x, y = np.gradient(image_data)
slope = np.pi / 2. - np.arctan(np.sqrt(x * x + y * y))
aspect = np.arctan2(-x, y)
azimuthrad = azimuth * np.pi / 180.
altituderad = angle_altitude*np.pi / 180.
shaded = np.sin(altituderad) * np.sin(slope) + np.cos(altituderad) * np.cos(slope) \
* np.cos(azimuthrad - aspect)
return 255 * (shaded + 1) / 2
def add_noise(self, image_data, seed):
for y in range(image_data.shape[0]):
for x in range(image_data.shape[1]):
# large scale gives more frequent noise
if image_data[y][x] > 0:
scale = 0.03
level = 0.004 + 0.004 * image_data[y][x]
noise = snoise2(x * scale, y * scale, octaves=2, base=seed) * level
image_data[y][x] = image_data[y][x] + noise
if image_data[y][x] < 0:
image_data[y][x] = 0
def in_triange(pt, v1, v2, v3):
b1 = ((pt[0] - v2[0]) * (v1[1] - v2[1]) - (v1[0] - v2[0]) * (pt[1] - v2[1])) <= 0
b2 = ((pt[0] - v3[0]) * (v2[1] - v3[1]) - (v2[0] - v3[0]) * (pt[1] - v3[1])) <= 0
b3 = ((pt[0] - v1[0]) * (v3[1] - v1[1]) - (v3[0] - v1[0]) * (pt[1] - v1[1])) <= 0
return (b1 == b2) and (b2 == b3)
| |
'''
Build a neural machine translation model with soft attention
'''
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import cPickle as pkl
import ipdb
import numpy
import copy
import os
import warnings
import sys
import time
import logging
from collections import OrderedDict
from data_iterator import TextIterator
profile = False
# push parameters to Theano shared variables
def zipp(params, tparams):
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
# pull parameters from Theano shared variables
def unzip(zipped):
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
# get the list of parameters: Note that tparams must be OrderedDict
def itemlist(tparams):
return [vv for kk, vv in tparams.iteritems()]
# dropout
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(
use_noise,
state_before * trng.binomial(state_before.shape, p=0.5, n=1,
dtype=state_before.dtype),
state_before * 0.5)
return proj
# make prefix-appended name
def _p(pp, name):
return '%s_%s' % (pp, name)
# initialize Theano shared variables according to the initial parameters
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
# load parameters
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('%s is not in the archive' % kk)
continue
params[kk] = pp[kk]
return params
# layers: 'name': ('parameter initializer', 'feedforward')
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'gru': ('param_init_gru', 'gru_layer'),
'gru_cond': ('param_init_gru_cond', 'gru_cond_layer'),
'funcf_layer': ('param_init_funcf_layer', 'funcf_layer'),
}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# some utilities
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin, nout=None, scale=0.01, ortho=True):
if nout is None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def relu(x):
return tensor.nnet.relu(x)
def tanh(x):
return tensor.tanh(x)
def linear(x):
return x
def concatenate(tensor_list, axis=0):
"""
Alternative implementation of `theano.tensor.concatenate`.
This function does exactly the same thing, but contrary to Theano's own
implementation, the gradient is implemented on the GPU.
Backpropagating through `theano.tensor.concatenate` yields slowdowns
because the inverse operation (splitting) needs to be done on the CPU.
This implementation does not have that problem.
:usage:
>>> x, y = theano.tensor.matrices('x', 'y')
>>> c = concatenate([x, y], axis=1)
:parameters:
- tensor_list : list
list of Theano tensor expressions that should be concatenated.
- axis : int
the tensors will be joined along this axis.
:returns:
- out : tensor
the concatenated tensor expression.
"""
concat_size = sum(tt.shape[axis] for tt in tensor_list)
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range(axis + 1, tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, offset + tt.shape[axis]),)
for k in range(axis + 1, tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
# batch preparation
def prepare_data(seqs_x, seqs_y, label, maxlen=None, n_words_src=30000,
n_words=30000):
# x: a list of sentences
lengths_x = [len(s) for s in seqs_x]
lengths_y = [len(s) for s in seqs_y]
if maxlen is not None:
new_seqs_x = []
new_seqs_y = []
new_lengths_x = []
new_lengths_y = []
new_label = []
for l_x, s_x, l_y, s_y, ll in zip(lengths_x, seqs_x, lengths_y, seqs_y, label):
if l_x < maxlen and l_y < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
new_seqs_y.append(s_y)
new_lengths_y.append(l_y)
new_label.append(ll)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
lengths_y = new_lengths_y
seqs_y = new_seqs_y
label = new_label
if len(lengths_x) < 1 or len(lengths_y) < 1:
return None, None, None, None, None
n_samples = len(seqs_x)
maxlen_x = numpy.max(lengths_x) + 2
maxlen_y = numpy.max(lengths_y) + 2
x = numpy.zeros((maxlen_x, n_samples)).astype('int64')
y = numpy.zeros((maxlen_y, n_samples)).astype('int64')
flabel = numpy.array(label).astype('int64')
x_mask = numpy.zeros((maxlen_x, n_samples)).astype('float32')
y_mask = numpy.zeros((maxlen_y, n_samples)).astype('float32')
for idx, [s_x, s_y] in enumerate(zip(seqs_x, seqs_y)):
x[0, idx] = 1
x[lengths_x[idx]+1, idx] = 2
x[1:lengths_x[idx] + 1, idx] = s_x
x_mask[:lengths_x[idx] + 2, idx] = 1.
y[0, idx] = 1
y[lengths_y[idx]+1, idx] = 2
y[1:lengths_y[idx] + 1, idx] = s_y
y_mask[:lengths_y[idx] + 2, idx] = 1.
return x, x_mask, y, y_mask, flabel
# feedforward layer: affine transformation + point-wise nonlinearity
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None,
ortho=True):
if nin is None:
nin = options['dim_proj']
if nout is None:
nout = options['dim_proj']
params[_p(prefix, 'W')] = norm_weight(nin, nout, scale=0.01, ortho=ortho)
params[_p(prefix, 'b')] = numpy.zeros((nout,)).astype('float32')
return params
def fflayer(tparams, state_below, options, prefix='rconv',
activ='lambda x: tensor.tanh(x)', **kwargs):
return eval(activ)(
tensor.dot(state_below, tparams[_p(prefix, 'W')]) +
tparams[_p(prefix, 'b')])
# functionF layer
def param_init_funcf_layer(options, params, prefix='funcF', nin=None, nout=None,
ortho=True):
if nin is None:
nin = options['dim_word']
if nout is None:
nout = options['dim_proj']
params[_p(prefix, 'W1')] = norm_weight(nin, nout, scale=0.01, ortho=ortho)
params[_p(prefix, 'b1')] = numpy.zeros((nout,)).astype('float32')
params[_p(prefix, 'W2')] = norm_weight(nout, nout, scale=0.01, ortho=ortho)
params[_p(prefix, 'b2')] = numpy.zeros((nout,)).astype('float32')
return params
def funcf_layer(tparams, state_below, options, prefix='funcF',
activ='lambda x: tensor.tanh(x)', **kwargs):
emb_proj = (tensor.dot(state_below, tparams[_p(prefix, 'W1')]) +
tparams[_p(prefix, 'b1')])
return eval(activ)(
tensor.dot(emb_proj, tparams[_p(prefix, 'W2')]) +
tparams[_p(prefix, 'b2')])
# GRU layer
def param_init_gru(options, params, prefix='gru', nin=None, dim=None):
if nin is None:
nin = options['dim_proj']
if dim is None:
dim = options['dim_proj']
# embedding to gates transformation weights, biases
W = numpy.concatenate([norm_weight(nin, dim),
norm_weight(nin, dim)], axis=1)
params[_p(prefix, 'W')] = W
params[_p(prefix, 'b')] = numpy.zeros((2 * dim,)).astype('float32')
# recurrent transformation weights for gates
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix, 'U')] = U
# embedding to hidden state proposal weights, biases
Wx = norm_weight(nin, dim)
params[_p(prefix, 'Wx')] = Wx
params[_p(prefix, 'bx')] = numpy.zeros((dim,)).astype('float32')
# recurrent transformation weights for hidden state proposal
Ux = ortho_weight(dim)
params[_p(prefix, 'Ux')] = Ux
return params
def gru_layer(tparams, state_below, options, prefix='gru', mask=None,
**kwargs):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix, 'Ux')].shape[1]
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
# utility function to slice a tensor
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
# state_below is the input word embeddings
# input to the gates, concatenated
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + \
tparams[_p(prefix, 'b')]
# input to compute the hidden state proposal
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + \
tparams[_p(prefix, 'bx')]
# step function to be used by scan
# arguments | sequences |outputs-info| non-seqs
def _step_slice(m_, x_, xx_, h_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
# reset and update gates
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
# compute the hidden state proposal
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx = preactx + xx_
# hidden state proposal
h = tensor.tanh(preactx)
# leaky integrate and obtain next hidden state
h = u * h_ + (1. - u) * h
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h
# prepare scan arguments
seqs = [mask, state_below_, state_belowx]
init_states = [tensor.alloc(0., n_samples, dim)]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]]
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info=init_states,
non_sequences=shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
rval = [rval]
return rval
# Conditional GRU layer with Attention
def param_init_gru_cond(options, params, prefix='gru_cond',
nin=None, dim=None, dimctx=None,
nin_nonlin=None, dim_nonlin=None):
if nin is None:
nin = options['dim']
if dim is None:
dim = options['dim']
if dimctx is None:
dimctx = options['dim']
if nin_nonlin is None:
nin_nonlin = nin
if dim_nonlin is None:
dim_nonlin = dim
W = numpy.concatenate([norm_weight(nin, dim),
norm_weight(nin, dim)], axis=1)
params[_p(prefix, 'W')] = W
params[_p(prefix, 'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim_nonlin),
ortho_weight(dim_nonlin)], axis=1)
params[_p(prefix, 'U')] = U
Wx = norm_weight(nin_nonlin, dim_nonlin)
params[_p(prefix, 'Wx')] = Wx
Ux = ortho_weight(dim_nonlin)
params[_p(prefix, 'Ux')] = Ux
params[_p(prefix, 'bx')] = numpy.zeros((dim_nonlin,)).astype('float32')
U_nl = numpy.concatenate([ortho_weight(dim_nonlin),
ortho_weight(dim_nonlin)], axis=1)
params[_p(prefix, 'U_nl')] = U_nl
params[_p(prefix, 'b_nl')] = numpy.zeros((2 * dim_nonlin,)).astype('float32')
Ux_nl = ortho_weight(dim_nonlin)
params[_p(prefix, 'Ux_nl')] = Ux_nl
params[_p(prefix, 'bx_nl')] = numpy.zeros((dim_nonlin,)).astype('float32')
# context to LSTM
Wc = norm_weight(dimctx, dim * 2)
params[_p(prefix, 'Wc')] = Wc
Wcx = norm_weight(dimctx, dim)
params[_p(prefix, 'Wcx')] = Wcx
# attention: combined -> hidden
W_comb_att = norm_weight(dim, dimctx)
params[_p(prefix, 'W_comb_att')] = W_comb_att
# attention: context -> hidden
Wc_att = norm_weight(dimctx)
params[_p(prefix, 'Wc_att')] = Wc_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix, 'b_att')] = b_att
# attention:
U_att = norm_weight(dimctx, 1)
params[_p(prefix, 'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
return params
def gru_cond_layer(tparams, state_below, options, prefix='gru',
mask=None, context=None, one_step=False,
init_memory=None, init_state=None,
context_mask=None,
**kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
dim = tparams[_p(prefix, 'Wcx')].shape[1]
# initial/previous state
if init_state is None:
init_state = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 3, \
'Context must be 3-d: #annotation x #sample x dim'
pctx_ = tensor.dot(context, tparams[_p(prefix, 'Wc_att')]) + \
tparams[_p(prefix, 'b_att')]
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
# projected x
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + \
tparams[_p(prefix, 'bx')]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + \
tparams[_p(prefix, 'b')]
def _step_slice(m_, x_, xx_, h_, ctx_, alpha_, pctx_, cc_,
U, Wc, W_comb_att, U_att, c_tt, Ux, Wcx,
U_nl, Ux_nl, b_nl, bx_nl):
preact1 = tensor.dot(h_, U)
preact1 += x_
preact1 = tensor.nnet.sigmoid(preact1)
r1 = _slice(preact1, 0, dim)
u1 = _slice(preact1, 1, dim)
preactx1 = tensor.dot(h_, Ux)
preactx1 *= r1
preactx1 += xx_
h1 = tensor.tanh(preactx1)
h1 = u1 * h_ + (1. - u1) * h1
h1 = m_[:, None] * h1 + (1. - m_)[:, None] * h_
# attention
pstate_ = tensor.dot(h1, W_comb_att)
pctx__ = pctx_ + pstate_[None, :, :]
# pctx__ += xc_
pctx__ = tensor.tanh(pctx__)
alpha = tensor.dot(pctx__, U_att) + c_tt
alpha = alpha.reshape([alpha.shape[0], alpha.shape[1]])
alpha = tensor.exp(alpha)
if context_mask:
alpha = alpha * context_mask
alpha = alpha / alpha.sum(0, keepdims=True)
ctx_ = (cc_ * alpha[:, :, None]).sum(0) # current context
preact2 = tensor.dot(h1, U_nl) + b_nl
preact2 += tensor.dot(ctx_, Wc)
preact2 = tensor.nnet.sigmoid(preact2)
r2 = _slice(preact2, 0, dim)
u2 = _slice(preact2, 1, dim)
preactx2 = tensor.dot(h1, Ux_nl) + bx_nl
preactx2 *= r2
preactx2 += tensor.dot(ctx_, Wcx)
h2 = tensor.tanh(preactx2)
h2 = u2 * h1 + (1. - u2) * h2
h2 = m_[:, None] * h2 + (1. - m_)[:, None] * h1
return h2, ctx_, alpha.T # pstate_, preact, preactx, r, u
seqs = [mask, state_below_, state_belowx]
# seqs = [mask, state_below_, state_belowx, state_belowc]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Wc')],
tparams[_p(prefix, 'W_comb_att')],
tparams[_p(prefix, 'U_att')],
tparams[_p(prefix, 'c_tt')],
tparams[_p(prefix, 'Ux')],
tparams[_p(prefix, 'Wcx')],
tparams[_p(prefix, 'U_nl')],
tparams[_p(prefix, 'Ux_nl')],
tparams[_p(prefix, 'b_nl')],
tparams[_p(prefix, 'bx_nl')]]
if one_step:
rval = _step(*(seqs + [init_state, None, None, pctx_, context] +
shared_vars))
else:
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info=[init_state,
tensor.alloc(0., n_samples,
context.shape[2]),
tensor.alloc(0., n_samples,
context.shape[0])],
non_sequences=[pctx_, context] + shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
return rval
'''
# initialize all parameters
def init_params(options):
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
params['Wemb_dec'] = norm_weight(options['n_words'], options['dim_word'])
# encoder: bidirectional RNN
params = get_layer(options['encoder'])[0](options, params,
prefix='encoder',
nin=options['dim_word'],
dim=options['dim'])
params = get_layer(options['encoder'])[0](options, params,
prefix='encoder_r',
nin=options['dim_word'],
dim=options['dim'])
ctxdim = 2 * options['dim']
# init_state, init_cell
params = get_layer('ff')[0](options, params, prefix='ff_state',
nin=ctxdim, nout=options['dim'])
# decoder
params = get_layer(options['decoder'])[0](options, params,
prefix='decoder',
nin=options['dim_word'],
dim=options['dim'],
dimctx=ctxdim)
# readout
params = get_layer('ff')[0](options, params, prefix='ff_logit_lstm',
nin=options['dim'], nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_prev',
nin=options['dim_word'],
nout=options['dim_word'], ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_ctx',
nin=ctxdim, nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit',
nin=options['dim_word'],
nout=options['n_words'])
return params
'''
def init_params(options):
params = OrderedDict()
# embedding
#params['Wemb'] = norm_weight(options['dict_size'], options['dim_word'])
params['Wemb'] = options['allembs']
# params['Wemb_dec'] = norm_weight(options['n_words'], options['dim_word'])
# funcf
#params = get_layer('funcf_layer')[0](options, params,
# prefix='funcf',
# nin=options['dim_word'],
# nout=options['dim'])
# funcG
#params = get_layer('funcf_layer')[0](options, params,
# prefix='funcG',
# nin=options['dim_word'] * 2,
# nout=options['dim'])
params = get_layer('ff')[0](options, params, prefix='funcG',
nin=options['dim'] * 2, nout=options['dim'],
ortho=False)
# readout
params = get_layer('ff')[0](options, params, prefix='ff_logit',
nin=options['dim'] * 2, nout=options['dim'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_linear',
nin=options['dim'], nout=options['class_num'],
ortho=False)
return params
def build_dam(tparams, options):
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples
x = tensor.matrix('x', dtype='int64')
x_mask = tensor.matrix('x_mask', dtype='float32')
y = tensor.matrix('y', dtype='int64')
y_mask = tensor.matrix('y_mask', dtype='float32')
#all_embs = tensor.matrix('emb', dtype='float32')
label = tensor.vector('label', dtype='int64')
n_timesteps_h = x.shape[0]
n_timesteps_t = y.shape[0]
n_samples = x.shape[1]
emb_h = tparams['Wemb'][x.flatten()]
emb_h = emb_h.reshape([n_timesteps_h, n_samples, options['dim_word']])
if options['use_dropout']:
emb_h = dropout_layer(emb_h, use_noise, trng)
emb_t = tparams['Wemb'][y.flatten()]
emb_t = emb_t.reshape([n_timesteps_t, n_samples, options['dim_word']])
if options['use_dropout']:
emb_t = dropout_layer(emb_t, use_noise, trng)
#proj_h = get_layer('funcf_layer')[1](tparams, emb_h, options,
# prefix='funcf')
#proj_t = get_layer('funcf_layer')[1](tparams, emb_t, options,
# prefix='funcf')
weight_matrix = tensor.batched_dot(emb_h.dimshuffle(1, 0, 2), emb_t.dimshuffle(1, 2, 0))
weight_matrix_1 = tensor.exp(weight_matrix - weight_matrix.max(1, keepdims=True)).dimshuffle(1,2,0)
weight_matrix_2 = tensor.exp(weight_matrix - weight_matrix.max(2, keepdims=True)).dimshuffle(1,2,0)
alpha_weight = weight_matrix_1 * x_mask.dimshuffle(0, 'x', 1)/ weight_matrix_1.sum(0, keepdims=True)
beta_weight = weight_matrix_2 * y_mask.dimshuffle('x', 0, 1)/ weight_matrix_2.sum(1, keepdims=True)
alpha = (emb_h.dimshuffle(0, 'x', 1, 2) * alpha_weight.dimshuffle(0, 1, 2, 'x')).sum(0)
beta = (emb_t.dimshuffle('x', 0, 1, 2) * beta_weight.dimshuffle(0, 1, 2, 'x')).sum(1)
v1 = concatenate([emb_h, beta], axis=2)
v2 = concatenate([emb_t, alpha], axis=2)
proj_v1 = get_layer('ff')[1](tparams, v1, options,prefix='funcG', activ='relu')
proj_v2 = get_layer('ff')[1](tparams, v2, options, prefix='funcG', activ='relu')
logit1 = (proj_v1 * x_mask[:, :, None]).sum(0)
logit2 = (proj_v2 * y_mask[:, :, None]).sum(0)
logit = concatenate([logit1, logit2], axis=1)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit', activ='tanh')
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit_linear', activ='linear')
probs = tensor.nnet.softmax(logit)
predict_label = probs.argmax(axis=1 )
#cost = -tensor.log(probs)[tensor.arange(label.shape[0]), label]
cost = tensor.nnet.categorical_crossentropy(probs, label)
return trng, use_noise, x, x_mask, y, y_mask, label, predict_label, cost
# build a training model
def build_model(tparams, options):
opt_ret = dict()
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples
x = tensor.matrix('x', dtype='int64')
x_mask = tensor.matrix('x_mask', dtype='float32')
y = tensor.matrix('y', dtype='int64')
y_mask = tensor.matrix('y_mask', dtype='float32')
# for the backward rnn, we just need to invert x and x_mask
xr = x[::-1]
xr_mask = x_mask[::-1]
n_timesteps = x.shape[0]
n_timesteps_trg = y.shape[0]
n_samples = x.shape[1]
# word embedding for forward rnn (source)
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder',
mask=x_mask)
# word embedding for backward rnn (source)
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
projr = get_layer(options['encoder'])[1](tparams, embr, options,
prefix='encoder_r',
mask=xr_mask)
# context will be the concatenation of forward and backward rnns
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim - 1)
# mean of the context (across time) will be used to initialize decoder rnn
ctx_mean = (ctx * x_mask[:, :, None]).sum(0) / x_mask.sum(0)[:, None]
# or you can use the last state of forward + backward encoder rnns
# ctx_mean = concatenate([proj[0][-1], projr[0][-1]], axis=proj[0].ndim-2)
# initial decoder state
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
# word embedding (target), we will shift the target sequence one time step
# to the right. This is done because of the bi-gram connections in the
# readout and decoder rnn. The first target will be all zeros and we will
# not condition on the last output.
emb = tparams['Wemb_dec'][y.flatten()]
emb = emb.reshape([n_timesteps_trg, n_samples, options['dim_word']])
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
# decoder - pass through the decoder conditional gru with attention
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=y_mask, context=ctx,
context_mask=x_mask,
one_step=False,
init_state=init_state)
# hidden states of the decoder gru
proj_h = proj[0]
# weighted averages of context, generated by attention module
ctxs = proj[1]
# weights (alignment matrix)
opt_ret['dec_alphas'] = proj[2]
# compute word probabilities
logit_lstm = get_layer('ff')[1](tparams, proj_h, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm + logit_prev + logit_ctx)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([logit_shp[0] * logit_shp[1],
logit_shp[2]]))
# cost
y_flat = y.flatten()
y_flat_idx = tensor.arange(y_flat.shape[0]) * options['n_words'] + y_flat
cost = -tensor.log(probs.flatten()[y_flat_idx])
cost = cost.reshape([y.shape[0], y.shape[1]])
cost = (cost * y_mask).sum(0)
return trng, use_noise, x, x_mask, y, y_mask, opt_ret, cost
# build a sampler
def build_sampler(tparams, options, trng, use_noise):
x = tensor.matrix('x', dtype='int64')
xr = x[::-1]
n_timesteps = x.shape[0]
n_samples = x.shape[1]
# word embedding (source), forward and backward
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
# encoder
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder')
projr = get_layer(options['encoder'])[1](tparams, embr, options,
prefix='encoder_r')
# concatenate forward and backward rnn hidden states
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim - 1)
# get the input for decoder rnn initializer mlp
ctx_mean = ctx.mean(0)
# ctx_mean = concatenate([proj[0][-1],projr[0][-1]], axis=proj[0].ndim-2)
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
print 'Building f_init...',
outs = [init_state, ctx]
f_init = theano.function([x], outs, name='f_init', profile=profile)
print 'Done'
# x: 1 x 1
y = tensor.vector('y_sampler', dtype='int64')
init_state = tensor.matrix('init_state', dtype='float32')
# if it's the first word, emb should be all zero and it is indicated by -1
emb = tensor.switch(y[:, None] < 0,
tensor.alloc(0., 1, tparams['Wemb_dec'].shape[1]),
tparams['Wemb_dec'][y])
# apply one step of conditional gru with attention
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=None, context=ctx,
one_step=True,
init_state=init_state)
# get the next hidden state
next_state = proj[0]
# get the weighted averages of context for this target word y
ctxs = proj[1]
logit_lstm = get_layer('ff')[1](tparams, next_state, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm + logit_prev + logit_ctx)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
# compute the softmax probability
next_probs = tensor.nnet.softmax(logit)
# sample from softmax distribution to get the sample
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# compile a function to do the whole thing above, next word probability,
# sampled word for the next target, next hidden state to be used
print 'Building f_next..',
inps = [y, ctx, init_state]
outs = [next_probs, next_sample, next_state]
f_next = theano.function(inps, outs, name='f_next', profile=profile)
print 'Done'
return f_init, f_next
# generate sample, either with stochastic sampling or beam search. Note that,
# this function iteratively calls f_init and f_next functions.
def gen_sample(tparams, f_init, f_next, x, options, trng=None, k=1, maxlen=30,
stochastic=True, argmax=False):
# k is the beam size we have
if k > 1:
assert not stochastic, \
'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
# get initial state of decoder rnn and encoder context
ret = f_init(x)
next_state, ctx0 = ret[0], ret[1]
next_w = -1 * numpy.ones((1,)).astype('int64') # bos indicator
for ii in xrange(maxlen):
ctx = numpy.tile(ctx0, [live_k, 1])
inps = [next_w, ctx, next_state]
ret = f_next(*inps)
next_p, next_w, next_state = ret[0], ret[1], ret[2]
if stochastic:
if argmax:
nw = next_p[0].argmax()
else:
nw = next_w[0]
sample.append(nw)
sample_score -= numpy.log(next_p[0, nw])
if nw == 0:
break
else:
cand_scores = hyp_scores[:, None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k - dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k - dead_k).astype('float32')
new_hyp_states = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti] + [wi])
new_hyp_scores[idx] = copy.copy(costs[idx])
new_hyp_states.append(copy.copy(next_state[ti]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = numpy.array(hyp_states)
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score
# calculate the log probablities on a given corpus using translation model
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=False):
probs = []
n_done = 0
correct_num = 0
all_num = 0.
for x, y, label in iterator:
n_done += len(x)
all_num += len(label)
x, x_mask, y, y_mask, label = prepare_data(x, y, label,
n_words_src=options['n_words_src'],
n_words=options['n_words'])
pprobs, predict_label = f_log_probs(x, x_mask, y, y_mask, label)
for pp in pprobs:
probs.append(pp)
if numpy.isnan(numpy.mean(probs)):
ipdb.set_trace()
if verbose:
print >> sys.stderr, '%d samples computed' % (n_done)
correct_num += (label == predict_label).sum()
print 'correct ', correct_num, 'all ', all_num
return numpy.array(probs), correct_num/all_num
# optimizers
# name(hyperp, tparams, grads, inputs (list), cost) = f_grad_shared, f_update
def adam(lr, tparams, grads, inp, cost, beta1=0.9, beta2=0.999, e=1e-8):
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup, profile=profile)
updates = []
t_prev = theano.shared(numpy.float32(0.))
t = t_prev + 1.
lr_t = lr * tensor.sqrt(1. - beta2 ** t) / (1. - beta1 ** t)
for p, g in zip(tparams.values(), gshared):
m = theano.shared(p.get_value() * 0., p.name + '_mean')
v = theano.shared(p.get_value() * 0., p.name + '_variance')
m_t = beta1 * m + (1. - beta1) * g
v_t = beta2 * v + (1. - beta2) * g ** 2
step = lr_t * m_t / (tensor.sqrt(v_t) + e)
p_t = p - step
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((t_prev, t))
upreturn = [ item for sublist in updates for item in sublist]
f_update = theano.function([lr], upreturn, updates=updates,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, inp, cost):
print 'adadelta'
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rup2' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup + rg2up,
profile=profile)
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads, running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
f_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup + rgup + rg2up,
profile=profile)
updir = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_updir' % k)
for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(itemlist(tparams), updir_new)]
f_update = theano.function([lr], [], updates=updir_new + param_up,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def sgd(lr, tparams, grads, inp, cost):
gshared = [theano.shared(p.get_value() * 0.,
name='%s_grad' % k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup,
profile=profile)
pup = [(p, p - lr * g) for p, g in zip(itemlist(tparams), gshared)]
f_update = theano.function([lr], [], updates=pup, profile=profile)
return f_grad_shared, f_update
def train(dim_word=100, # word vector dimensionality
dim=1000, # the number of LSTM units
class_num=3,
encoder='gru',
decoder='gru_cond',
patience=100000, # early stopping patience
max_epochs=5000,
finish_after=10000000000, # finish after this many updates
dispFreq=100,
decay_c=0., # L2 regularization penalty
alpha_c=0., # alignment regularization
clip_c=-1., # gradient clipping threshold
lrate=0.01, # learning rate
n_words_src=100000, # source vocabulary size
n_words=100000, # target vocabulary size
maxlen=100, # maximum length of the description
optimizer='rmsprop',
batch_size=16,
valid_batch_size=16,
saveto='model.npz',
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
sampleFreq=100, # generate some samples after every sampleFreq
train_datasets=[
'../data/train_h.tok',
'../data/train_t.tok',
'../data/train_label.tok'],
valid_datasets=[
'../data/dev_h.tok',
'../data/dev_t.tok',
'../data/dev_label.tok'],
test_datasets=[
'../data/test_h.tok',
'../data/test_t.tok',
'../data/test_label.tok'],
dictionaries=[
'../data/snli_dict.pkl'],
embedings=[
'../data/snli_emb_300.pkl'],
use_dropout=False,
reload_=False,
overwrite=False):
# Model options
model_options = locals().copy()
log = logging.getLogger(os.path.basename(__file__).split('.')[0])
# load dictionaries and invert them
worddicts = [None] * len(dictionaries)
worddicts_r = [None] * len(dictionaries)
for ii, dd in enumerate(dictionaries):
with open(dd, 'rb') as f:
worddicts[ii] = pkl.load(f)
worddicts_r[ii] = dict()
for kk, vv in worddicts[ii].iteritems():
worddicts_r[ii][vv] = kk
print 'Loading embedings ...'
with open(embedings[0], 'rb') as f:
pretrained_embs = pkl.load(f)
#pretrained_embs = theano.shared(pretrained_embs, name='pretrained_embs')
print 'Done'
model_options['allembs'] = pretrained_embs
# reload options
if reload_ and os.path.exists(saveto):
print 'Reloading model options'
with open('%s.pkl' % saveto, 'rb') as f:
model_options = pkl.load(f)
print 'Loading data'
train = TextIterator(train_datasets[0], train_datasets[1],
train_datasets[2],
dictionaries[0],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=batch_size,
maxlen=maxlen)
valid = TextIterator(valid_datasets[0], valid_datasets[1],
valid_datasets[2],
dictionaries[0],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=valid_batch_size,
maxlen=maxlen)
test = TextIterator(test_datasets[0], test_datasets[1],
test_datasets[2],
dictionaries[0],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=valid_batch_size,
maxlen=maxlen)
print 'Building model'
params = init_params(model_options)
# reload parameters
if reload_ and os.path.exists(saveto):
print 'Reloading model parameters'
params = load_params(saveto, params)
tparams = init_tparams(params)
trng, use_noise, \
x, x_mask, y, y_mask, label, predict_label, \
cost = \
build_dam(tparams, model_options)
inps = [x, x_mask, y, y_mask, label]
# print 'Building sampler'
# f_init, f_next = build_sampler(tparams, model_options, trng, use_noise)
# before any regularizer
print 'Building f_log_probs...',
f_log_probs = theano.function(inps, [cost, predict_label], profile=profile)
print 'Done'
cost = cost.mean()
# apply L2 regularization on weights
# if decay_c > 0.:
# decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
# weight_decay = 0.
# for kk, vv in tparams.iteritems():
# weight_decay += (vv ** 2).sum()
# weight_decay *= decay_c
# cost += weight_decay
## regularize the alpha weights
#if alpha_c > 0. and not model_options['decoder'].endswith('simple'):
# alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
# alpha_reg = alpha_c * (
# (tensor.cast(y_mask.sum(0) // x_mask.sum(0), 'float32')[:, None] -
# opt_ret['dec_alphas'].sum(0)) ** 2).sum(1).mean()
# cost += alpha_reg
# after all regularizers - compile the computational graph for cost
print 'Building f_cost...',
f_cost = theano.function(inps, cost, profile=profile)
print 'Done'
print 'Computing gradient...',
grads = tensor.grad(cost, wrt=itemlist(tparams))
print 'Done'
# apply gradient clipping here
if clip_c > 0.:
g2 = 0.
for g in grads:
g2 += (g ** 2).sum()
new_grads = []
for g in grads:
new_grads.append(tensor.switch(g2 > (clip_c ** 2),
g / tensor.sqrt(g2) * clip_c,
g))
grads = new_grads
# compile the optimizer, the actual computational graph is compiled here
lr = tensor.scalar(name='lr')
print 'Building optimizers...',
f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost)
print 'Done'
print 'Optimization'
best_p = None
bad_counter = 0
bad_counter_acc = 0
uidx = 0
estop = False
history_errs = []
history_accs = []
epoch_accs = []
# reload history
if reload_ and os.path.exists(saveto):
rmodel = numpy.load(saveto)
history_errs = list(rmodel['history_errs'])
if 'uidx' in rmodel:
uidx = rmodel['uidx']
if validFreq == -1:
validFreq = len(train[0]) / batch_size
if saveFreq == -1:
saveFreq = len(train[0]) / batch_size
#if sampleFreq == -1:
# sampleFreq = len(train[0]) / batch_size
for eidx in xrange(max_epochs):
n_samples = 0
for x, y, label in train:
n_samples += len(x)
uidx += 1
use_noise.set_value(1.)
try:
x, x_mask, y, y_mask, label = prepare_data(x, y, label, maxlen=maxlen,
n_words_src=n_words_src,
n_words=n_words)
except ValueError:
print prepare_data(x, y, label, maxlen=maxlen)
raise
if x is None:
print 'Minibatch with zero sample under length ', maxlen
uidx -= 1
continue
ud_start = time.time()
# compute cost, grads and copy grads to shared variables
cost = f_grad_shared(x, x_mask, y, y_mask, label)
# do the update on parameters
#print 'Befor:'
#print tparams['ff_logit_W'].get_value()
f_update(lrate)
#print 'After:'
#print tparams['ff_logit_W'].get_value()
#update = f_update(lrate)
#print update
ud = time.time() - ud_start
# check for bad numbers, usually we remove non-finite elements
# and continue training - but not done here
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
return 1., 1., 1.
# verbose
if numpy.mod(uidx, dispFreq) == 0:
log.info('Epoch: %d Update: %d Cost: %f UD: %f'%(eidx, uidx, cost, ud))
# save the best model so far, in addition, save the latest model
# into a separate file with the iteration number for external eval
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving the best model...',
if best_p is not None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, uidx=uidx, **params)
pkl.dump(model_options, open('%s.pkl' % saveto, 'wb'))
print 'Done'
# save with uidx
if not overwrite:
print 'Saving the model at iteration {}...'.format(uidx),
saveto_uidx = '{}.iter{}.npz'.format(
os.path.splitext(saveto)[0], uidx)
numpy.savez(saveto_uidx, history_errs=history_errs,
uidx=uidx, **unzip(tparams))
print 'Done'
# validate model on validation set and early stop if necessary
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
#print 'Here:'
#print tparams['ff_logit_W'].get_value()
#print unzip(tparams)
valid_errs, valid_acc = pred_probs(f_log_probs, prepare_data,
model_options, valid)
valid_err = valid_errs.mean()
history_errs.append(valid_err)
test_errs, test_acc = pred_probs(f_log_probs, prepare_data,
model_options, test)
test_err = test_errs.mean()
history_accs.append(test_acc)
if uidx == 0 or valid_err <= numpy.array(history_errs).min():
best_p = unzip(tparams)
bad_counter = 0
if len(history_errs) > patience and valid_err >= \
numpy.array(history_errs)[:-patience].min():
bad_counter += 1
if bad_counter > patience:
print 'loss Early Stop!'
#estop = True
#break
if numpy.isnan(valid_err):
ipdb.set_trace()
log.info('Epoch: %d Update: %d ValidAcc: %f TestAcc: %f' % (eidx, uidx, valid_acc, test_acc))
# finish after this many updates
if uidx >= finish_after:
print 'Finishing after %d iterations!' % uidx
estop = True
break
print 'Seen %d samples' % n_samples
#if len(history_accs) > 0:
# epoch_accs.append(history_accs[-1])
#if len(epoch_accs) > 1 and epoch_accs[-1] <= numpy.array(epoch_accs)[:-1].max():
# bad_counter_acc += 1
# if bad_counter_acc > 2:
# print 'acc Early Stop Acc!'
# #estop = True
# break
if estop:
break
if best_p is not None:
zipp(best_p, tparams)
use_noise.set_value(0.)
valid_err, test_acc = pred_probs(f_log_probs, prepare_data,
model_options, test)
print 'test acc', test_acc
params = copy.copy(best_p)
numpy.savez(saveto, zipped_params=best_p,
history_errs=history_errs,
uidx=uidx,
**params)
return test_acc
if __name__ == '__main__':
pass
| |
import logging
import os
import uuid
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.db import models
from django.db.models import ManyToManyField
from django.utils.encoding import iri_to_uri
from django.utils.http import urlquote
from django.template.loader import get_template
from product_details import product_details
from pytz import common_timezones
from sorl.thumbnail import ImageField, get_thumbnail
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from mozillians.common import utils
from mozillians.common.templatetags.helpers import absolutify, gravatar
from mozillians.common.templatetags.helpers import offset_of_timezone
from mozillians.common.urlresolvers import reverse
from mozillians.groups.models import (Group, GroupAlias, GroupMembership,
Skill, SkillAlias)
from mozillians.phonebook.validators import (validate_email, validate_twitter,
validate_website, validate_username_not_url,
validate_phone_number, validate_linkedin)
from mozillians.users import get_languages_for_locale
from mozillians.users.managers import (EMPLOYEES,
MOZILLIANS, PRIVACY_CHOICES, PRIVILEGED,
PUBLIC, PUBLIC_INDEXABLE_FIELDS,
UserProfileManager, UserProfileQuerySet)
COUNTRIES = product_details.get_regions('en-US')
AVATAR_SIZE = (300, 300)
logger = logging.getLogger(__name__)
def _calculate_photo_filename(instance, filename):
"""Generate a unique filename for uploaded photo."""
return os.path.join(settings.USER_AVATAR_DIR, str(uuid.uuid4()) + '.jpg')
class PrivacyField(models.PositiveSmallIntegerField):
def __init__(self, *args, **kwargs):
myargs = {'default': MOZILLIANS,
'choices': PRIVACY_CHOICES}
myargs.update(kwargs)
super(PrivacyField, self).__init__(*args, **myargs)
class UserProfilePrivacyModel(models.Model):
_privacy_level = None
privacy_photo = PrivacyField()
privacy_full_name = PrivacyField()
privacy_full_name_local = PrivacyField()
privacy_ircname = PrivacyField()
privacy_email = PrivacyField()
privacy_bio = PrivacyField()
privacy_geo_city = PrivacyField()
privacy_geo_region = PrivacyField()
privacy_geo_country = PrivacyField()
privacy_city = PrivacyField()
privacy_region = PrivacyField()
privacy_country = PrivacyField()
privacy_groups = PrivacyField()
privacy_skills = PrivacyField()
privacy_languages = PrivacyField()
privacy_date_mozillian = PrivacyField()
privacy_timezone = PrivacyField()
privacy_tshirt = PrivacyField(choices=((PRIVILEGED, _lazy(u'Privileged')),),
default=PRIVILEGED)
privacy_title = PrivacyField()
privacy_story_link = PrivacyField()
CACHED_PRIVACY_FIELDS = None
class Meta:
abstract = True
@classmethod
def clear_privacy_fields_cache(cls):
"""
Clear any caching of the privacy fields.
(This is only used in testing.)
"""
cls.CACHED_PRIVACY_FIELDS = None
@classmethod
def privacy_fields(cls):
"""
Return a dictionary whose keys are the names of the fields in this
model that are privacy-controlled, and whose values are the default
values to use for those fields when the user is not privileged to
view their actual value.
Note: should be only used through UserProfile class. We should
fix this.
"""
# Cache on the class object
if cls.CACHED_PRIVACY_FIELDS is None:
privacy_fields = {}
field_names = cls._meta.get_all_field_names()
for name in field_names:
if name.startswith('privacy_') or not 'privacy_%s' % name in field_names:
# skip privacy fields and uncontrolled fields
continue
field = cls._meta.get_field(name)
# Okay, this is a field that is privacy-controlled
# Figure out a good default value for it (to show to users
# who aren't privileged to see the actual value)
if isinstance(field, ManyToManyField):
default = field.related.model.objects.none()
else:
default = field.get_default()
privacy_fields[name] = default
# HACK: There's not really an email field on UserProfile,
# but it's faked with a property
privacy_fields['email'] = u''
cls.CACHED_PRIVACY_FIELDS = privacy_fields
return cls.CACHED_PRIVACY_FIELDS
class UserProfile(UserProfilePrivacyModel):
REFERRAL_SOURCE_CHOICES = (
('direct', 'Mozillians'),
('contribute', 'Get Involved'),
)
objects = UserProfileManager.from_queryset(UserProfileQuerySet)()
user = models.OneToOneField(User)
full_name = models.CharField(max_length=255, default='', blank=False,
verbose_name=_lazy(u'Full Name'))
full_name_local = models.CharField(max_length=255, blank=True, default='',
verbose_name=_lazy(u'Name in local language'))
is_vouched = models.BooleanField(
default=False,
help_text='You can edit vouched status by editing invidual vouches')
can_vouch = models.BooleanField(
default=False,
help_text='You can edit can_vouch status by editing invidual vouches')
last_updated = models.DateTimeField(auto_now=True)
groups = models.ManyToManyField(Group, blank=True, related_name='members',
through=GroupMembership)
skills = models.ManyToManyField(Skill, blank=True, related_name='members')
bio = models.TextField(verbose_name=_lazy(u'Bio'), default='', blank=True)
photo = ImageField(default='', blank=True, upload_to=_calculate_photo_filename)
ircname = models.CharField(max_length=63, verbose_name=_lazy(u'IRC Nickname'),
default='', blank=True)
# validated geo data (validated that it's valid geo data, not that the
# mozillian is there :-) )
geo_country = models.ForeignKey('geo.Country', blank=True, null=True,
on_delete=models.SET_NULL)
geo_region = models.ForeignKey('geo.Region', blank=True, null=True, on_delete=models.SET_NULL)
geo_city = models.ForeignKey('geo.City', blank=True, null=True, on_delete=models.SET_NULL)
lat = models.FloatField(_lazy(u'Latitude'), blank=True, null=True)
lng = models.FloatField(_lazy(u'Longitude'), blank=True, null=True)
# django-cities-light fields
city = models.ForeignKey('cities_light.City', blank=True, null=True,
on_delete=models.SET_NULL)
region = models.ForeignKey('cities_light.Region', blank=True, null=True,
on_delete=models.SET_NULL)
country = models.ForeignKey('cities_light.Country', blank=True, null=True,
on_delete=models.SET_NULL)
basket_token = models.CharField(max_length=1024, default='', blank=True)
date_mozillian = models.DateField('When was involved with Mozilla',
null=True, blank=True, default=None)
timezone = models.CharField(max_length=100, blank=True, default='',
choices=zip(common_timezones, common_timezones))
tshirt = models.IntegerField(
_lazy(u'T-Shirt'), blank=True, null=True, default=None,
choices=(
(1, _lazy(u'Fitted Small')), (2, _lazy(u'Fitted Medium')),
(3, _lazy(u'Fitted Large')), (4, _lazy(u'Fitted X-Large')),
(5, _lazy(u'Fitted XX-Large')), (6, _lazy(u'Fitted XXX-Large')),
(7, _lazy(u'Straight-cut Small')), (8, _lazy(u'Straight-cut Medium')),
(9, _lazy(u'Straight-cut Large')), (10, _lazy(u'Straight-cut X-Large')),
(11, _lazy(u'Straight-cut XX-Large')), (12, _lazy(u'Straight-cut XXX-Large'))
))
title = models.CharField(_lazy(u'What do you do for Mozilla?'),
max_length=70, blank=True, default='')
story_link = models.URLField(
_lazy(u'Link to your contribution story'),
help_text=_lazy(u'If you have created something public that '
u'tells the story of how you came to be a '
u'Mozillian, specify that link here.'),
max_length=1024, blank=True, default='')
referral_source = models.CharField(max_length=32,
choices=REFERRAL_SOURCE_CHOICES,
default='direct')
def __unicode__(self):
"""Return this user's name when their profile is called."""
return self.display_name
def get_absolute_url(self):
return reverse('phonebook:profile_view', args=[self.user.username])
class Meta:
db_table = 'profile'
ordering = ['full_name']
def __getattribute__(self, attrname):
"""Special privacy aware __getattribute__ method.
This method returns the real value of the attribute of object,
if the privacy_level of the attribute is at least as large as
the _privacy_level attribute.
Otherwise it returns a default privacy respecting value for
the attribute, as defined in the privacy_fields dictionary.
special_functions provides methods that privacy safe their
respective properties, where the privacy modifications are
more complex.
"""
_getattr = (lambda x: super(UserProfile, self).__getattribute__(x))
privacy_fields = UserProfile.privacy_fields()
privacy_level = _getattr('_privacy_level')
special_functions = {
'accounts': '_accounts',
'alternate_emails': '_alternate_emails',
'email': '_primary_email',
'is_public_indexable': '_is_public_indexable',
'languages': '_languages',
'vouches_made': '_vouches_made',
'vouches_received': '_vouches_received',
'vouched_by': '_vouched_by',
'websites': '_websites'
}
if attrname in special_functions:
return _getattr(special_functions[attrname])
if not privacy_level or attrname not in privacy_fields:
return _getattr(attrname)
field_privacy = _getattr('privacy_%s' % attrname)
if field_privacy < privacy_level:
return privacy_fields.get(attrname)
return _getattr(attrname)
def _filter_accounts_privacy(self, accounts):
if self._privacy_level:
return accounts.filter(privacy__gte=self._privacy_level)
return accounts
@property
def _accounts(self):
_getattr = (lambda x: super(UserProfile, self).__getattribute__(x))
excluded_types = [ExternalAccount.TYPE_WEBSITE, ExternalAccount.TYPE_EMAIL]
accounts = _getattr('externalaccount_set').exclude(type__in=excluded_types)
return self._filter_accounts_privacy(accounts)
@property
def _alternate_emails(self):
_getattr = (lambda x: super(UserProfile, self).__getattribute__(x))
accounts = _getattr('externalaccount_set').filter(type=ExternalAccount.TYPE_EMAIL)
return self._filter_accounts_privacy(accounts)
@property
def _is_public_indexable(self):
for field in PUBLIC_INDEXABLE_FIELDS:
if getattr(self, field, None) and getattr(self, 'privacy_%s' % field, None) == PUBLIC:
return True
return False
@property
def _languages(self):
_getattr = (lambda x: super(UserProfile, self).__getattribute__(x))
if self._privacy_level > _getattr('privacy_languages'):
return _getattr('language_set').none()
return _getattr('language_set').all()
@property
def _primary_email(self):
_getattr = (lambda x: super(UserProfile, self).__getattribute__(x))
privacy_fields = UserProfile.privacy_fields()
if self._privacy_level and _getattr('privacy_email') < self._privacy_level:
email = privacy_fields['email']
return email
return _getattr('user').email
@property
def _vouched_by(self):
privacy_level = self._privacy_level
voucher = (UserProfile.objects.filter(vouches_made__vouchee=self)
.order_by('vouches_made__date'))
if voucher.exists():
voucher = voucher[0]
if privacy_level:
voucher.set_instance_privacy_level(privacy_level)
for field in UserProfile.privacy_fields():
if getattr(voucher, 'privacy_%s' % field) >= privacy_level:
return voucher
return None
return voucher
return None
def _vouches(self, type):
_getattr = (lambda x: super(UserProfile, self).__getattribute__(x))
vouch_ids = []
for vouch in _getattr(type).all():
vouch.vouchee.set_instance_privacy_level(self._privacy_level)
for field in UserProfile.privacy_fields():
if getattr(vouch.vouchee, 'privacy_%s' % field, 0) >= self._privacy_level:
vouch_ids.append(vouch.id)
vouches = _getattr(type).filter(pk__in=vouch_ids)
return vouches
@property
def _vouches_made(self):
_getattr = (lambda x: super(UserProfile, self).__getattribute__(x))
if self._privacy_level:
return self._vouches('vouches_made')
return _getattr('vouches_made')
@property
def _vouches_received(self):
_getattr = (lambda x: super(UserProfile, self).__getattribute__(x))
if self._privacy_level:
return self._vouches('vouches_received')
return _getattr('vouches_received')
@property
def _websites(self):
_getattr = (lambda x: super(UserProfile, self).__getattribute__(x))
accounts = _getattr('externalaccount_set').filter(type=ExternalAccount.TYPE_WEBSITE)
return self._filter_accounts_privacy(accounts)
@property
def display_name(self):
return self.full_name
@property
def privacy_level(self):
"""Return user privacy clearance."""
if (self.user.groups.filter(name='Managers').exists() or self.user.is_superuser):
return PRIVILEGED
if self.groups.filter(name='staff').exists():
return EMPLOYEES
if self.is_vouched:
return MOZILLIANS
return PUBLIC
@property
def is_complete(self):
"""Tests if a user has all the information needed to move on
past the original registration view.
"""
return self.display_name.strip() != ''
@property
def is_public(self):
"""Return True is any of the privacy protected fields is PUBLIC."""
# TODO needs update
for field in type(self).privacy_fields():
if getattr(self, 'privacy_%s' % field, None) == PUBLIC:
return True
return False
@property
def is_manager(self):
return self.user.is_superuser or self.user.groups.filter(name='Managers').exists()
@property
def date_vouched(self):
""" Return the date of the first vouch, if available."""
vouches = self.vouches_received.all().order_by('date')[:1]
if vouches:
return vouches[0].date
return None
def set_instance_privacy_level(self, level):
"""Sets privacy level of instance."""
self._privacy_level = level
def set_privacy_level(self, level, save=True):
"""Sets all privacy enabled fields to 'level'."""
for field in type(self).privacy_fields():
setattr(self, 'privacy_%s' % field, level)
if save:
self.save()
def set_membership(self, model, membership_list):
"""Alters membership to Groups and Skills."""
if model is Group:
m2mfield = self.groups
alias_model = GroupAlias
elif model is Skill:
m2mfield = self.skills
alias_model = SkillAlias
# Remove any visible groups that weren't supplied in this list.
if model is Group:
(GroupMembership.objects.filter(userprofile=self, group__visible=True)
.exclude(group__name__in=membership_list).delete())
else:
m2mfield.remove(*[g for g in m2mfield.all()
if g.name not in membership_list and g.is_visible])
# Add/create the rest of the groups
groups_to_add = []
for g in membership_list:
if alias_model.objects.filter(name=g).exists():
group = alias_model.objects.get(name=g).alias
else:
group = model.objects.create(name=g)
if group.is_visible:
groups_to_add.append(group)
if model is Group:
for group in groups_to_add:
group.add_member(self)
else:
m2mfield.add(*groups_to_add)
def get_photo_thumbnail(self, geometry='160x160', **kwargs):
if 'crop' not in kwargs:
kwargs['crop'] = 'center'
if self.photo:
return get_thumbnail(self.photo, geometry, **kwargs)
return get_thumbnail(settings.DEFAULT_AVATAR_PATH, geometry, **kwargs)
def get_photo_url(self, geometry='160x160', **kwargs):
"""Return photo url.
If privacy allows and no photo set, return gravatar link.
If privacy allows and photo set return local photo link.
If privacy doesn't allow return default local link.
"""
privacy_level = getattr(self, '_privacy_level', MOZILLIANS)
if (not self.photo and self.privacy_photo >= privacy_level):
return gravatar(self.user.email, size=geometry)
return absolutify(self.get_photo_thumbnail(geometry, **kwargs).url)
def is_vouchable(self, voucher):
"""Check whether self can receive a vouch from voucher."""
# If there's a voucher, they must be able to vouch.
if voucher and not voucher.can_vouch:
return False
# Maximum VOUCH_COUNT_LIMIT vouches per account, no matter what.
if self.vouches_received.all().count() >= settings.VOUCH_COUNT_LIMIT:
return False
# If you've already vouched this account, you cannot do it again
vouch_query = self.vouches_received.filter(voucher=voucher)
if voucher and vouch_query.exists():
return False
return True
def vouch(self, vouched_by, description='', autovouch=False):
if not self.is_vouchable(vouched_by):
return
vouch = self.vouches_received.create(
voucher=vouched_by,
date=datetime.now(),
description=description,
autovouch=autovouch
)
self._email_now_vouched(vouched_by, description)
return vouch
def auto_vouch(self):
"""Auto vouch mozilla.com users."""
emails = [acc.identifier for acc in
ExternalAccount.objects.filter(user=self, type=ExternalAccount.TYPE_EMAIL)]
emails.append(self.user.email)
email_exists = any([email for email in emails
if email.split('@')[1] in settings.AUTO_VOUCH_DOMAINS])
if email_exists and not self.vouches_received.filter(
description=settings.AUTO_VOUCH_REASON, autovouch=True).exists():
self.vouch(None, settings.AUTO_VOUCH_REASON, autovouch=True)
def _email_now_vouched(self, vouched_by, description=''):
"""Email this user, letting them know they are now vouched."""
name = None
voucher_profile_link = None
vouchee_profile_link = utils.absolutify(self.get_absolute_url())
if vouched_by:
name = vouched_by.full_name
voucher_profile_link = utils.absolutify(vouched_by.get_absolute_url())
number_of_vouches = self.vouches_received.all().count()
template = get_template('phonebook/emails/vouch_confirmation_email.txt')
message = template.render({
'voucher_name': name,
'voucher_profile_url': voucher_profile_link,
'vouchee_profile_url': vouchee_profile_link,
'vouch_description': description,
'functional_areas_url': utils.absolutify(reverse('groups:index_functional_areas')),
'groups_url': utils.absolutify(reverse('groups:index_groups')),
'first_vouch': number_of_vouches == 1,
'can_vouch_threshold': number_of_vouches == settings.CAN_VOUCH_THRESHOLD,
})
subject = _(u'You have been vouched on Mozillians.org')
filtered_message = message.replace('"', '"').replace(''', "'")
send_mail(subject, filtered_message, settings.FROM_NOREPLY,
[self.user.email])
def get_annotated_groups(self):
"""
Return a list of all the visible groups the user is a member of or pending
membership. The groups pending membership will have a .pending attribute
set to True, others will have it set False.
"""
groups = []
# Query this way so we only get the groups that the privacy controls allow the
# current user to see. We have to force evaluation of this query first, otherwise
# Django combines the whole thing into one query and loses the privacy control.
groups_manager = self.groups
# checks to avoid AttributeError exception b/c self.groups may returns
# EmptyQuerySet instead of the default manager due to privacy controls
if hasattr(groups_manager, 'visible'):
user_group_ids = list(groups_manager.visible().values_list('id', flat=True))
else:
user_group_ids = []
for membership in self.groupmembership_set.filter(group_id__in=user_group_ids):
group = membership.group
group.pending = (membership.status == GroupMembership.PENDING)
group.pending_terms = (membership.status == GroupMembership.PENDING_TERMS)
groups.append(group)
return groups
def timezone_offset(self):
"""
Return minutes the user's timezone is offset from UTC. E.g. if user is
4 hours behind UTC, returns -240.
If user has not set a timezone, returns None (not 0).
"""
if self.timezone:
return offset_of_timezone(self.timezone)
def save(self, *args, **kwargs):
self._privacy_level = None
autovouch = kwargs.pop('autovouch', True)
super(UserProfile, self).save(*args, **kwargs)
# Auto_vouch follows the first save, because you can't
# create foreign keys without a database id.
if autovouch:
self.auto_vouch()
class Vouch(models.Model):
vouchee = models.ForeignKey(UserProfile, related_name='vouches_received')
voucher = models.ForeignKey(UserProfile, related_name='vouches_made',
null=True, default=None, blank=True,
on_delete=models.SET_NULL)
description = models.TextField(max_length=500, verbose_name=_lazy(u'Reason for Vouching'),
default='')
autovouch = models.BooleanField(default=False)
date = models.DateTimeField()
class Meta:
verbose_name_plural = 'vouches'
unique_together = ('vouchee', 'voucher')
ordering = ['-date']
def __unicode__(self):
return u'{0} vouched by {1}'.format(self.vouchee, self.voucher)
class AbuseReport(models.Model):
TYPE_SPAM = 'spam'
TYPE_INAPPROPRIATE = 'inappropriate'
REPORT_TYPES = (
(TYPE_SPAM, 'Spam profile'),
(TYPE_INAPPROPRIATE, 'Inappropriate content')
)
reporter = models.ForeignKey(UserProfile, related_name='abuses_reported', null=True)
profile = models.ForeignKey(UserProfile, related_name='abuses')
type = models.CharField(choices=REPORT_TYPES, max_length=30, blank=False, default='')
is_akismet = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class UsernameBlacklist(models.Model):
value = models.CharField(max_length=30, unique=True)
is_regex = models.BooleanField(default=False)
def __unicode__(self):
return self.value
class Meta:
ordering = ['value']
class ExternalAccount(models.Model):
# Constants for type field values.
TYPE_AMO = 'AMO'
TYPE_BMO = 'BMO'
TYPE_EMAIL = 'EMAIL'
TYPE_GITHUB = 'GITHUB'
TYPE_MDN = 'MDN'
TYPE_SUMO = 'SUMO'
TYPE_FACEBOOK = 'FACEBOOK'
TYPE_TWITTER = 'TWITTER'
TYPE_AIM = 'AIM'
TYPE_GTALK = 'GTALK'
TYPE_SKYPE = 'SKYPE'
TYPE_YAHOO = 'YAHOO'
TYPE_WEBSITE = 'WEBSITE'
TYPE_BITBUCKET = 'BITBUCKET'
TYPE_SLIDESHARE = 'SLIDESHARE'
TYPE_WEBMAKER = 'WEBMAKER'
TYPE_MOWIKI = 'MOZILLAWIKI'
TYPE_REMO = 'REMO'
TYPE_LINKEDIN = 'LINKEDIN'
TYPE_JABBER = 'JABBER'
TYPE_DISCOURSE = 'DISCOURSE'
TYPE_LANYRD = 'LANYRD'
TYPE_LANDLINE = 'Phone (Landline)'
TYPE_MOBILE = 'Phone (Mobile)'
TYPE_MOVERBATIM = 'MOZILLAVERBATIM'
TYPE_MOLOCAMOTION = 'MOZILLALOCAMOTION'
TYPE_MOLOCATION = 'MOZILLALOCATION'
TYPE_MOPONTOON = 'MOZILLAPONTOON'
TYPE_TRANSIFEX = 'TRANSIFEX'
TYPE_TELEGRAM = 'TELEGRAM'
# Account type field documentation:
# name: The name of the service that this account belongs to. What
# users see
# url: If the service features profile pages for its users, then
# this field should be a link to that profile page. User's
# identifier should be replaced by the special string
# {identifier}.
# validator: Points to a function which will clean and validate
# user's entry. Function should return the cleaned
# data.
ACCOUNT_TYPES = {
TYPE_AMO: {'name': 'Mozilla Add-ons',
'url': 'https://addons.mozilla.org/user/{identifier}/',
'validator': validate_username_not_url},
TYPE_BMO: {'name': 'Bugzilla (BMO)',
'url': 'https://bugzilla.mozilla.org/user_profile?login={identifier}',
'validator': validate_username_not_url},
TYPE_EMAIL: {'name': 'Alternate email address',
'url': '',
'validator': validate_email},
TYPE_GITHUB: {'name': 'GitHub',
'url': 'https://github.com/{identifier}',
'validator': validate_username_not_url},
TYPE_BITBUCKET: {'name': 'Bitbucket',
'url': 'https://bitbucket.org/{identifier}',
'validator': validate_username_not_url},
TYPE_MDN: {'name': 'MDN',
'url': 'https://developer.mozilla.org/profiles/{identifier}',
'validator': validate_username_not_url},
TYPE_MOLOCATION: {'name': 'Mozilla Location Service',
'url': 'https://location.services.mozilla.com/leaders#{identifier}',
'validator': validate_username_not_url},
TYPE_SUMO: {'name': 'Mozilla Support',
'url': 'https://support.mozilla.org/user/{identifier}',
'validator': validate_username_not_url},
TYPE_FACEBOOK: {'name': 'Facebook',
'url': 'https://www.facebook.com/{identifier}',
'validator': validate_username_not_url},
TYPE_TWITTER: {'name': 'Twitter',
'url': 'https://twitter.com/{identifier}',
'validator': validate_twitter},
TYPE_AIM: {'name': 'AIM', 'url': ''},
TYPE_GTALK: {'name': 'Google+ Hangouts',
'url': '',
'validator': validate_email},
TYPE_SKYPE: {'name': 'Skype', 'url': ''},
TYPE_SLIDESHARE: {'name': 'SlideShare',
'url': 'http://www.slideshare.net/{identifier}',
'validator': validate_username_not_url},
TYPE_YAHOO: {'name': 'Yahoo! Messenger', 'url': ''},
TYPE_WEBSITE: {'name': 'Website URL',
'url': '',
'validator': validate_website},
TYPE_WEBMAKER: {'name': 'Mozilla Webmaker',
'url': 'https://{identifier}.makes.org',
'validator': validate_username_not_url},
TYPE_MOWIKI: {'name': 'Mozilla Wiki', 'url': 'https://wiki.mozilla.org/User:{identifier}',
'validator': validate_username_not_url},
TYPE_REMO: {'name': 'Mozilla Reps', 'url': 'https://reps.mozilla.org/u/{identifier}/',
'validator': validate_username_not_url},
TYPE_LINKEDIN: {'name': 'LinkedIn',
'url': 'https://www.linkedin.com/in/{identifier}/',
'validator': validate_linkedin},
TYPE_JABBER: {'name': 'XMPP/Jabber',
'url': '',
'validator': validate_email},
TYPE_DISCOURSE: {'name': 'Mozilla Discourse',
'url': 'https://discourse.mozilla-community.org/users/{identifier}',
'validator': validate_username_not_url},
TYPE_LANYRD: {'name': 'Lanyrd',
'url': 'http://lanyrd.com/profile/{identifier}/',
'validator': validate_username_not_url},
TYPE_LANDLINE: {'name': 'Phone (Landline)',
'url': '',
'validator': validate_phone_number},
TYPE_MOBILE: {'name': 'Phone (Mobile)',
'url': '',
'validator': validate_phone_number},
TYPE_MOVERBATIM: {'name': 'Mozilla Verbatim',
'url': 'https://localize.mozilla.org/accounts/{identifier}/',
'validator': validate_username_not_url},
TYPE_MOLOCAMOTION: {'name': 'Mozilla Locamotion',
'url': 'http://mozilla.locamotion.org/user/{identifier}/',
'validator': validate_username_not_url},
TYPE_MOPONTOON: {'name': 'Mozilla Pontoon',
'url': 'https://pontoon.mozilla.org/contributor/{identifier}/',
'validator': validate_email},
TYPE_TRANSIFEX: {'name': 'Transifex',
'url': 'https://www.transifex.com/accounts/profile/{identifier}/',
'validator': validate_username_not_url},
TYPE_TELEGRAM: {'name': 'Telegram',
'url': 'https://telegram.me/{identifier}',
'validator': validate_username_not_url},
}
user = models.ForeignKey(UserProfile)
identifier = models.CharField(max_length=255, verbose_name=_lazy(u'Account Username'))
type = models.CharField(max_length=30,
choices=sorted([(k, v['name']) for (k, v) in ACCOUNT_TYPES.iteritems()
if k != TYPE_EMAIL], key=lambda x: x[1]),
verbose_name=_lazy(u'Account Type'))
privacy = models.PositiveIntegerField(default=MOZILLIANS, choices=PRIVACY_CHOICES)
class Meta:
ordering = ['type']
unique_together = ('identifier', 'type', 'user')
def get_identifier_url(self):
url = self.ACCOUNT_TYPES[self.type]['url'].format(identifier=urlquote(self.identifier))
if self.type == 'LINKEDIN' and '://' in self.identifier:
return self.identifier
return iri_to_uri(url)
def unique_error_message(self, model_class, unique_check):
if model_class == type(self) and unique_check == ('identifier', 'type', 'user'):
return _('You already have an account with this name and type.')
else:
return super(ExternalAccount, self).unique_error_message(model_class, unique_check)
def __unicode__(self):
return self.type
class Language(models.Model):
code = models.CharField(max_length=63, choices=get_languages_for_locale('en'))
userprofile = models.ForeignKey(UserProfile)
class Meta:
ordering = ['code']
unique_together = ('code', 'userprofile')
def __unicode__(self):
return self.code
def get_english(self):
return self.get_code_display()
def get_native(self):
if not getattr(self, '_native', None):
languages = get_languages_for_locale(self.code)
for code, language in languages:
if code == self.code:
self._native = language
break
return self._native
def unique_error_message(self, model_class, unique_check):
if (model_class == type(self) and unique_check == ('code', 'userprofile')):
return _('This language has already been selected.')
return super(Language, self).unique_error_message(model_class, unique_check)
| |
# ripped from https://www.hex-rays.com/products/ida/support/freefiles/vb.idc
# and http://vrt-blog.snort.org/2014/08/discovering-dynamically-loaded-api-in.html
import ptypes,ndk
from ptypes import *
class Str(pstr.string): pass
class Byte(pint.uint8_t): pass
class Word(pint.uint16_t): pass
class Dword(pint.uint32_t): pass
class UUID(ndk.GUID): pass
class PVOID(ndk.PVOID): pass
class CLSID(ndk.GUID): pass
class GUID(ndk.GUID): pass
class BSTR(pstruct.type):
_fields_ = [
(Dword, 'length'),
(lambda s: A(Str, s['length'].li.int()), 'string'),
]
class Empty(ptype.undefined): pass
A = dyn.array
P = dyn.pointer
C = dyn.clone
OD = lambda t: dyn.rpointer(t, lambda s: s.p, Dword)
OW = lambda t: dyn.rpointer(t, lambda s: s.p, Word)
###
class DesignerInfo(pstruct.type):
_fields_ = [
(UUID, "uuidDesigner"), # CLSID of the Addin/Designer
(Dword, "cbStructSize"), # Total Size of the next fields.
(BSTR, "bstrAddinRegKey"), # Registry Key of the Addin
(BSTR, "bstrAddinName"), # Friendly Name of the Addin
(BSTR, "bstrAddinDescription"), # Description of Addin
(Dword, "dwLoadBehaviour"), # CLSID of Object
(BSTR, "bstrSatelliteDll"), # Satellite DLL, if specified
(BSTR, "bstrAdditionalRegKey"), # Extra Registry Key, if specified
(Dword, "dwCommandLineSafe"), # Specifies a GUI-less Addin if 1.
]
class COMRegistrationInfo(pstruct.type):
_fields_ = [
(lambda s: O(COMRegistrationInfo), "bNextObject"), # Offset to COM Interfaces Info
(OD(Str), "bObjectName"), # Offset to Object Name
(OD(Str), "bObjectDescription"), # Offset to Object Description
(Dword, "dwInstancing"), # Instancing Mode
(Dword, "dwObjectId"), # Current Object ID in the Project
(UUID, "uuidObject"), # CLSID of Object
(Dword, "fIsInterface"), # Specifies if the next CLSID is valid
(OD(CLSID), "bUuidObjectIFace"), # Offset to CLSID of Object Interface
(OD(CLSID), "bUuidEventsIFace"), # Offset to CLSID of Events Interface
(Dword, "fHasEvents"), # Specifies if the CLSID above is valid
(Dword, "dwMiscStatus"), # OLEMISC Flags (see MSDN docs)
(Byte, "fClassType"), # Class Type
(Byte, "fObjectType"), # Flag identifying the Object Type
(Word, "wToolboxBitmap32"), # Control Bitmap ID in Toolbox
(Word, "wDefaultIcon"), # Minimized Icon of Control Window
(Word, "fIsDesigner"), # Specifies whether this is a Designer
(OD(DesignerInfo), "bDesignerData"), # Offset to Designer Data
]
class COMRegistrationData(pstruct.type):
_fields_ = [
(P(COMRegistrationInfo), "bRegInfo"), # Offset to COM Interfaces Info
(OD(Str), "bSZProjectName"), # Offset to Project/Typelib Name
(OD(Str), "bSZHelpDirectory"), # Offset to Help Directory
(OD(Str), "bSZProjectDescription"), # Offset to Project Description
(UUID, "uuidProjectClsId"), # CLSID of Project/Typelib
(Dword, "dwTlbLcid"), # LCID of Type Library
(Word, "wUnknown"), # Might be something. Must check
(Word, "wTlbVerMajor"), # Typelib Major Version
(Word, "wTlbVerMinor"), # Typelib Minor Version
]
class ObjectTable(pstruct.type):
_fields_ = [
(PVOID, "lpHeapLink"), # Unused after compilation, always 0.
(PVOID, "lpExecProj"), # Pointer to VB Project Exec COM Object.
(P(lambda s: A(P(PrivateObjectDescriptor), s.getparent(ptype.pointer_t).p['dwCompiledObjects'].li.int())), "lpProjectInfo2"), # Secondary Project Information.
(Dword, "dwReserved"), # Always set to -1 after compiling. Unused
(Dword, "dwNull"), # Not used in compiled mode.
(PVOID, "lpProjectObject"), # Pointer to in-memory Project Data.
(UUID, "uuidObject"), # GUID of the Object Table.
(Word, "fCompileState"), # Internal flag used during compilation.
(Word, "dwTotalObjects"), # Total objects present in Project.
(Word, "dwCompiledObjects"), # Equal to above after compiling.
(Word, "dwObjectsInUse"), # Usually equal to above after compile.
(lambda s: P(A(PublicObjectDescriptor, s['dwTotalObjects'].li.int())), "lpObjectArray"), # Pointer to Object Descriptors
(Dword, "fIdeFlag"), # Flag/Pointer used in IDE only.
(PVOID, "lpIdeData"), # Flag/Pointer used in IDE only.
(PVOID, "lpIdeData2"), # Flag/Pointer used in IDE only.
(P(Str), "lpszProjectName"), # Pointer to Project Name.
(Dword, "dwLcid"), # LCID of Project.
(Dword, "dwLcid2"), # Alternate LCID of Project.
(PVOID, "lpIdeData3"), # Flag/Pointer used in IDE only.
(Dword, "dwIdentifier"), # Template Version of Structure.
]
class ProjectInformation(pstruct.type):
_fields_ = [
(Dword, "dwVersion"), # 5.00 in Hex (0x1F4). Version.
(P(ObjectTable), "lpObjectTable"), # Pointer to the Object Table
(Dword, "dwNull"), # Unused value after compilation.
(PVOID, "lpCodeStart"), # Points to start of code. Unused.
(PVOID, "lpCodeEnd"), # Points to end of code. Unused.
(Dword, "dwDataSize"), # Size of VB Object Structures. Unused.
(PVOID, "lpThreadSpace"), # Pointer to Pointer to Thread Object.
(PVOID, "lpVbaSeh"), # Pointer to VBA Exception Handler
(PVOID, "lpNativeCode"), # Pointer to .DATA section.
(dyn.clone(Str, length=0x210), "szPathInformation"), # Contains Path and ID string. < SP6
(P(lambda s: A(Dword, s.getparent(ptype.pointer_t).p['dwExternalCount'].li.int())), "lpExternalTable"), # Pointer to External Table.
(Dword, "dwExternalCount"), # Objects in the External Table.
]
class PrivateObjectDescriptor(pstruct.type):
_fields_ = [
(PVOID, "lpHeapLink"), # Unused after compilation, always 0.
(P(lambda s: ObjectInformation), "lpObjectInfo"), # Pointer to the Object Info for this Object.
(Dword, "dwReserved"), # Always set to -1 after compiling.
(Dword, "dwIdeData"), # [3] Not valid after compilation.
(Dword, "v_10"),
(Dword, "v_14"),
(PVOID, "lpObjectList"), # Points to the Parent Structure (Array)
(Dword, "dwIdeData2"), # Not valid after compilation.
(PVOID, "lpObjectList2"), # [3] Points to the Parent Structure (Array).
(Dword, "v_24"),
(Dword, "v_28"),
(Dword, "dwIdeData3"), # [3] Not valid after compilation.
(Dword, "v_30"),
(Dword, "v_34"),
(Dword, "dwObjectType"), # Type of the Object described.
(Dword, "dwIdentifier"), # Template Version of Structure.
]
class PublicObjectDescriptor(pstruct.type):
_fields_ = [
(P(lambda s: ObjectInformation), "lpObjectInfo"), # Pointer to the Object Info for this Object.
(Dword, "dwReserved"), # Always set to -1 after compiling.
(PVOID, "lpPublicBytes"), # Pointer to Public Variable Size integers.
(PVOID, "lpStaticBytes"), # Pointer to Static Variable Size integers.
(PVOID, "lpModulePublic"), # Pointer to Public Variables in DATA section
(PVOID, "lpModuleStatic"), # Pointer to Static Variables in DATA section
(P(Str), "lpszObjectName"), # Name of the Object.
(Dword, "dwMethodCount"), # Number of Methods in Object.
(lambda s: P(A(P(STR), s['dwMethodCount'].li.int)()), "lpMethodNames"), # If present, pointer to Method names array.
(OD(Dword), "bStaticVars"), # Offset to where to copy Static Variables.
(Dword, "fObjectType"), # Flags defining the Object Type.
(Dword, "dwNull"), # Not valid after compilation.
]
class ObjectInformation(pstruct.type):
_fields_ = [
(Word, "wRefCount"), # Always 1 after compilation.
(Word, "wObjectIndex"), # Index of this Object.
(P(ObjectTable), "lpObjectTable"), # Pointer to the Object Table
(Dword, "lpIdeData"), # Zero after compilation. Used in IDE only.
(P(PrivateObjectDescriptor), "lpPrivateObject"), # Pointer to Private Object Descriptor.
(Dword, "dwReserved"), # Always -1 after compilation.
(Dword, "dwNull"), # Unused.
(P(PublicObjectDescriptor), "lpObject"), # Back-Pointer to Public Object Descriptor.
(P(ProjectInformation), "lpProjectData"), # Pointer to in-memory Project Object.
(Word, "wMethodCount"), # Number of Methods
(Word, "wMethodCount2"), # Zeroed out after compilation. IDE only.
(lambda s: P(A(PVOID, s['wMethodCount'].li.int())), "lpMethods"), # Pointer to Array of Methods.
(Word, "wConstants"), # Number of Constants in Constant Pool.
(Word, "wMaxConstants"), # Constants to allocate in Constant Pool.
(PVOID, "lpIdeData2"), # Valid in IDE only.
(PVOID, "lpIdeData3"), # Valid in IDE only.
(PVOID, "lpConstants"), # Pointer to Constants Pool.
]
class OptionalObjectInformation(pstruct.type):
_fields_ = [
(Dword, "dwObjectGuids"), # How many GUIDs to Register. 2 = Designer
(lambda s: P(A(GUID, s['dwObjectGuids'].li.int())), "lpObjectGuid"), # Unique GUID of the Object *VERIFY*
(Dword, "dwNull"), # Unused.
(Dword, "lpuuidObjectTypes"), # Pointer to Array of Object Interface GUIDs
(Dword, "dwObjectTypeGuids"), # How many GUIDs in the Array above.
(P(lambda s: A(ControlInformation, s.p.p['dwControlCount'].li.int())), "lpControls2"), # Usually the same as lpControls.
(Dword, "dwNull2"), # Unused.
(P(lambda s: A(GUID, s.p.p['dwObjectGuids'].li.int())), "lpObjectGuid2"), # Pointer to Array of Object GUIDs.
(Dword, "dwControlCount"), # Number of Controls in array below.
(lambda s: P(A(ControlInformation, s['dwControlCount'].li.int())), "lpControls"), # Pointer to Controls Array.
(Word, "wEventCount"), # Number of Events in Event Array.
(Word, "wPCodeCount"), # Number of P-Codes used by this Object.
(OW(PVOID), "bWInitializeEvent"), # Offset to Initialize Event from Event Table.
(OW(PVOID), "bWTerminateEvent"), # Offset to Terminate Event in Event Table.
(lambda s: P(EventHandlerTable, s['wEventCount'].li.int()), "lpEvents"), # Pointer to Events Array.
(PVOID, "lpBasicClassObject"), # Pointer to in-memory Class Objects.
(Dword, "dwNull3"), # Unused.
(PVOID, "lpIdeData"), # Only valid in IDE.
]
class EventHandlerTable(pstruct.type):
_fields_ = [
(Dword, "dwNull"), # Always Null.
(PVOID, "lpControlType"), # Pointer to control type.
(PVOID, "lpObjectInfo"), # Pointer to object info.
(Dword, "lpQuery"), # Jump to EVENT_SINK_QueryInterface.
(Dword, "lpAddRef"), # Jump to EVENT_SINK_AddRef.
(Dword, "lpRelease"), # Jump to EVENT_SINK_Release.
(lambda s: A(EventHandlerType, s.getparent(OptionalObjectInformation)['wEventCount'].li.int()), 'Events'),
]
class EventHandlerType(ptype.pointer_t):
class _object_(Dword, pint.enum):
_values_ = [
(0, 'lpButton_Click'), # Ptr to Button Click Event Code.");
(1, 'lpButton_DragDrop'), # Ptr to Button DragDrop Event Code.");
(2, 'lpButton_DragOver'), # Ptr to Button DragOver Event Code.");
(3, 'lpButton_GotFocus'), # Ptr to Button GotFocus Event Code.");
(4, 'lpButton_KeyDown'), # Ptr to Button KeyDown Event Code.");
(5, 'lpButton_KeyPress'), # Ptr to Button KeyPress Event Code.");
(6, 'lpButton_KeyUp'), # Ptr to Button KeyUp Event Code.");
(7, 'lpButton_LostFocus'), # Ptr to Button LostFocus Event Code.");
(8, 'lpButton_MouseDown'), # Ptr to Button MouseDown Event Code.");
(9, 'lpButton_MouseMove'), # Ptr to Button MouseMove Event Code.");
(10, 'lpButton_MouseUp'), # Ptr to Button MouseUp Event Code.");
(11, 'lpButton_OLEDragOver'), # Ptr to Button OLEDragOver Event Code.");
(12, 'lpButton_OLEDragDrop'), # Ptr to Button OLEDragDrop Event Code.");
(13, 'lpButton_OLEGiveFeedback'), # Ptr to Button OLEGiveFeedback Event Code.");
(14, 'lpButton_OLEStartDrag'), # Ptr to Button OLEStartDrag Event Code.");
(15, 'lpButton_OLESetData'),# Ptr to Button OLESetData Event Code.");
(16, 'lpButton_OLECompleteDrag'), # Ptr to Button OLECompleteDrag Event Code.");
]
class Header(pstruct.type):
_fields_ = [
(P(Str), 'szVbMagic'), # "VB5!" String
(Word, 'wRuntimeBuild'), # Build of the VB6 Runtime
(C(Str, length=0xe), 'szLangDll'), # Language Extension DLL
(C(Str, length=0xe), 'szSecLangDll'), # 2nd Language Extension DLL
(Word, 'wRuntimeRevision'), # Internal Runtime Revision
(Dword, 'dwLCID'), # LCID of Language DLL
(Dword, 'dwSecLCID'), # LCID of 2nd Language DLL
(PVOID, 'lpSubMain'), # Pointer to Sub Main Code
(P(ProjectInformation), 'lpProjectData'), # Pointer to Project Data
(Dword, 'fMdlIntCtls'), # VB Control Flags for IDs < 32
(Dword, 'fMdlIntCtls2'), # VB Control Flags for IDs > 32
(Dword, 'dwThreadFlags'), # Threading Mode
(Dword, 'dwThreadCount'), # Threads to support in pool
(Word, 'wFormCount'), # Number of forms present
(Word, 'wExternalCount'), # Number of external controls
(Dword, 'dwThunkCount'), # Number of thunks to create
(PVOID, 'lpGuiTable'), # Pointer to GUI Table
(PVOID, 'lpExternalTable'), # Pointer to External Table
(P(COMRegistrationData), 'lpComRegisterData'), # Pointer to COM Information
(OD(Str), 'bSZProjectDescription'), # Offset to Project Description
(OD(Str), 'bSZProjectExeName'), # Offset to Project EXE Name
(OD(Str), 'bSZProjectHelpFile'), # Offset to Project Help File
(OD(Str), 'bSZProjectName'), # Offset to Project Name
]
###
class DynamicHandles(pstruct.type):
_fields_ = [
(ndk.DWORD, 'dwUnknown'),
(ndk.HANDLE, 'hModule'),
(ndk.PVOID, 'fnAddress'),
]
class DllFunctionCallStruct(pstruct.type):
_fields_ = [
(dyn.pointer(ndk.STRING), 'lpDllName'),
(dyn.pointer(ndk.STRING), 'lpExportName'),
(ndk.CHAR, 'sizeOfExportName'),
(DynamicHandles, 'sHandleData'),
]
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import socket
import json
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.jenkins_hook import JenkinsHook
import jenkins
from jenkins import JenkinsException
from requests import Request
from six.moves.urllib.error import HTTPError, URLError
try:
basestring
except NameError:
basestring = str # For python3 compatibility
def jenkins_request_with_headers(jenkins_server, req):
"""
We need to get the headers in addition to the body answer
to get the location from them
This function uses jenkins_request method from python-jenkins library
with just the return call changed
:param jenkins_server: The server to query
:param req: The request to execute
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
try:
response = jenkins_server.jenkins_request(req)
response_body = response.content
response_headers = response.headers
if response_body is None:
raise jenkins.EmptyResponseException(
"Error communicating with server[%s]: "
"empty response" % jenkins_server.server)
return {'body': response_body.decode('utf-8'), 'headers': response_headers}
except HTTPError as e:
# Jenkins's funky authentication means its nigh impossible to
# distinguish errors.
if e.code in [401, 403, 500]:
# six.moves.urllib.error.HTTPError provides a 'reason'
# attribute for all python version except for ver 2.6
# Falling back to HTTPError.msg since it contains the
# same info as reason
raise JenkinsException(
'Error in request. ' +
'Possibly authentication failed [%s]: %s' % (
e.code, e.msg)
)
elif e.code == 404:
raise jenkins.NotFoundException('Requested item could not be found')
else:
raise
except socket.timeout as e:
raise jenkins.TimeoutException('Error in request: %s' % e)
except URLError as e:
# python 2.6 compatibility to ensure same exception raised
# since URLError wraps a socket timeout on python 2.6.
if str(e.reason) == "timed out":
raise jenkins.TimeoutException('Error in request: %s' % e.reason)
raise JenkinsException('Error in request: %s' % e.reason)
class JenkinsJobTriggerOperator(BaseOperator):
"""
Trigger a Jenkins Job and monitor it's execution.
This operator depend on python-jenkins library,
version >= 0.4.15 to communicate with jenkins server.
You'll also need to configure a Jenkins connection in the connections screen.
:param jenkins_connection_id: The jenkins connection to use for this job
:type jenkins_connection_id: str
:param job_name: The name of the job to trigger
:type job_name: str
:param parameters: The parameters block to provide to jenkins. (templated)
:type parameters: str
:param sleep_time: How long will the operator sleep between each status
request for the job (min 1, default 10)
:type sleep_time: int
:param max_try_before_job_appears: The maximum number of requests to make
while waiting for the job to appears on jenkins server (default 10)
:type max_try_before_job_appears: int
"""
template_fields = ('parameters',)
template_ext = ('.json',)
ui_color = '#f9ec86'
@apply_defaults
def __init__(self,
jenkins_connection_id,
job_name,
parameters="",
sleep_time=10,
max_try_before_job_appears=10,
*args,
**kwargs):
super(JenkinsJobTriggerOperator, self).__init__(*args, **kwargs)
self.job_name = job_name
self.parameters = parameters
if sleep_time < 1:
sleep_time = 1
self.sleep_time = sleep_time
self.jenkins_connection_id = jenkins_connection_id
self.max_try_before_job_appears = max_try_before_job_appears
def build_job(self, jenkins_server):
"""
This function makes an API call to Jenkins to trigger a build for 'job_name'
It returned a dict with 2 keys : body and headers.
headers contains also a dict-like object which can be queried to get
the location to poll in the queue.
:param jenkins_server: The jenkins server where the job should be triggered
:return: Dict containing the response body (key body)
and the headers coming along (headers)
"""
# Warning if the parameter is too long, the URL can be longer than
# the maximum allowed size
if self.parameters and isinstance(self.parameters, basestring):
import ast
self.parameters = ast.literal_eval(self.parameters)
if not self.parameters:
# We need a None to call the non parametrized jenkins api end point
self.parameters = None
request = Request(jenkins_server.build_job_url(self.job_name,
self.parameters, None))
return jenkins_request_with_headers(jenkins_server, request)
def poll_job_in_queue(self, location, jenkins_server):
"""
This method poll the jenkins queue until the job is executed.
When we trigger a job through an API call,
the job is first put in the queue without having a build number assigned.
Thus we have to wait the job exit the queue to know its build number.
To do so, we have to add /api/json (or /api/xml) to the location
returned by the build_job call and poll this file.
When a 'executable' block appears in the json, it means the job execution started
and the field 'number' then contains the build number.
:param location: Location to poll, returned in the header of the build_job call
:param jenkins_server: The jenkins server to poll
:return: The build_number corresponding to the triggered job
"""
try_count = 0
location = location + '/api/json'
# TODO Use get_queue_info instead
# once it will be available in python-jenkins (v > 0.4.15)
self.log.info('Polling jenkins queue at the url %s', location)
while try_count < self.max_try_before_job_appears:
location_answer = jenkins_request_with_headers(jenkins_server,
Request(location))
if location_answer is not None:
json_response = json.loads(location_answer['body'])
if 'executable' in json_response:
build_number = json_response['executable']['number']
self.log.info('Job executed on Jenkins side with the build number %s',
build_number)
return build_number
try_count += 1
time.sleep(self.sleep_time)
raise AirflowException("The job hasn't been executed"
" after polling the queue %d times",
self.max_try_before_job_appears)
def get_hook(self):
return JenkinsHook(self.jenkins_connection_id)
def execute(self, context):
if not self.jenkins_connection_id:
self.log.error(
'Please specify the jenkins connection id to use.'
'You must create a Jenkins connection before'
' being able to use this operator')
raise AirflowException('The jenkins_connection_id parameter is missing,'
'impossible to trigger the job')
if not self.job_name:
self.log.error("Please specify the job name to use in the job_name parameter")
raise AirflowException('The job_name parameter is missing,'
'impossible to trigger the job')
self.log.info(
'Triggering the job %s on the jenkins : %s with the parameters : %s',
self.job_name, self.jenkins_connection_id, self.parameters)
jenkins_server = self.get_hook().get_jenkins_server()
jenkins_response = self.build_job(jenkins_server)
build_number = self.poll_job_in_queue(
jenkins_response['headers']['Location'], jenkins_server)
time.sleep(self.sleep_time)
keep_polling_job = True
build_info = None
while keep_polling_job:
try:
build_info = jenkins_server.get_build_info(name=self.job_name,
number=build_number)
if build_info['result'] is not None:
keep_polling_job = False
# Check if job had errors.
if build_info['result'] != 'SUCCESS':
raise AirflowException(
'Jenkins job failed, final state : %s.'
'Find more information on job url : %s'
% (build_info['result'], build_info['url']))
else:
self.log.info('Waiting for job to complete : %s , build %s',
self.job_name, build_number)
time.sleep(self.sleep_time)
except jenkins.NotFoundException as err:
raise AirflowException(
'Jenkins job status check failed. Final error was: %s'
% err.resp.status)
except jenkins.JenkinsException as err:
raise AirflowException(
'Jenkins call failed with error : %s, if you have parameters '
'double check them, jenkins sends back '
'this exception for unknown parameters'
'You can also check logs for more details on this exception '
'(jenkins_url/log/rss)', str(err))
if build_info:
# If we can we return the url of the job
# for later use (like retrieving an artifact)
return build_info['url']
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworksOperations(object):
"""VirtualNetworksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetwork"
"""Gets the specified virtual network by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetwork, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.VirtualNetwork
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
parameters, # type: "_models.VirtualNetwork"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetwork"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetwork')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
parameters, # type: "_models.VirtualNetwork"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetwork"]
"""Creates or updates a virtual network in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to the create or update virtual network operation.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.VirtualNetwork
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetwork or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_04_01.models.VirtualNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetwork"
"""Updates a virtual network tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to update virtual network tags.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetwork, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.VirtualNetwork
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkListResult"]
"""Gets all virtual networks in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.VirtualNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkListResult"]
"""Gets all virtual networks in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.VirtualNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks'} # type: ignore
def check_ip_address_availability(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
ip_address, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.IPAddressAvailabilityResult"
"""Checks whether a private IP address is available for use.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param ip_address: The private IP address to be verified.
:type ip_address: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IPAddressAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.IPAddressAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IPAddressAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.check_ip_address_availability.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['ipAddress'] = self._serialize.query("ip_address", ip_address, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IPAddressAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_ip_address_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability'} # type: ignore
def list_usage(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkListUsageResult"]
"""Lists usage stats.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListUsageResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_04_01.models.VirtualNetworkListUsageResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListUsageResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_usage.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListUsageResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_usage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/usages'} # type: ignore
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Nicira, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from oslo.config import cfg
from nova import exception
from nova.network import model as network_model
from nova import test
from nova.tests import fakelibvirt
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import vif
CONF = cfg.CONF
class LibvirtVifTestCase(test.TestCase):
net_bridge = {
'cidr': '101.168.1.0/24',
'cidr_v6': '101:1db9::/64',
'gateway_v6': '101:1db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'br0',
'bridge_interface': 'eth0',
'vlan': 99,
'gateway': '101.168.1.1',
'broadcast': '101.168.1.255',
'dns1': '8.8.8.8',
'id': 'network-id-xxx-yyy-zzz'
}
net_bridge_quantum = {
'cidr': '101.168.1.0/24',
'cidr_v6': '101:1db9::/64',
'gateway_v6': '101:1db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge_interface': 'eth0',
'vlan': 99,
'gateway': '101.168.1.1',
'broadcast': '101.168.1.255',
'dns1': '8.8.8.8',
'id': 'network-id-xxx-yyy-zzz'
}
mapping_bridge = {
'mac': 'ca:fe:de:ad:be:ef',
'gateway_v6': net_bridge['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
'vif_uuid': 'vif-xxx-yyy-zzz',
'vif_devname': 'tap-xxx-yyy-zzz',
'vif_type': network_model.VIF_TYPE_BRIDGE,
}
mapping_bridge_quantum = {
'mac': 'ca:fe:de:ad:be:ef',
'gateway_v6': net_bridge['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
'vif_uuid': 'vif-xxx-yyy-zzz',
'vif_devname': 'tap-xxx-yyy-zzz',
}
net_ovs = {
'cidr': '101.168.1.0/24',
'cidr_v6': '101:1db9::/64',
'gateway_v6': '101:1db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'br0',
'vlan': 99,
'gateway': '101.168.1.1',
'broadcast': '101.168.1.255',
'dns1': '8.8.8.8',
'id': 'network-id-xxx-yyy-zzz'
}
mapping_ovs = {
'mac': 'ca:fe:de:ad:be:ef',
'gateway_v6': net_ovs['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
'vif_uuid': 'vif-xxx-yyy-zzz',
'vif_devname': 'tap-xxx-yyy-zzz',
'vif_type': network_model.VIF_TYPE_OVS,
'ovs_interfaceid': 'aaa-bbb-ccc',
}
mapping_ovs_legacy = {
'mac': 'ca:fe:de:ad:be:ef',
'gateway_v6': net_ovs['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
'vif_uuid': 'vif-xxx-yyy-zzz',
}
net_8021 = {
'cidr': '101.168.1.0/24',
'cidr_v6': '101:1db9::/64',
'gateway_v6': '101:1db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'interface': 'eth0',
'vlan': 99,
'gateway': '101.168.1.1',
'broadcast': '101.168.1.255',
'dns1': '8.8.8.8',
'id': 'network-id-xxx-yyy-zzz'
}
mapping_8021qbh = {
'mac': 'ca:fe:de:ad:be:ef',
'vif_uuid': 'vif-xxx-yyy-zzz',
'vif_devname': 'tap-xxx-yyy-zzz',
'vif_type': network_model.VIF_TYPE_802_QBH,
'qbh_params': network_model.VIF8021QbhParams(
profileid="xxx-yyy-zzz"),
}
mapping_8021qbg = {
'mac': 'ca:fe:de:ad:be:ef',
'vif_uuid': 'vif-xxx-yyy-zzz',
'vif_devname': 'tap-xxx-yyy-zzz',
'vif_type': network_model.VIF_TYPE_802_QBG,
'qbg_params': network_model.VIF8021QbgParams(
managerid="xxx-yyy-zzz",
typeid="aaa-bbb-ccc",
typeidversion="1",
instanceid="ddd-eee-fff")
}
mapping_none = {
'mac': 'ca:fe:de:ad:be:ef',
'gateway_v6': net_bridge['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
'vif_uuid': 'vif-xxx-yyy-zzz',
'vif_devname': 'tap-xxx-yyy-zzz',
}
instance = {
'name': 'instance-name',
'uuid': 'instance-uuid'
}
def setUp(self):
super(LibvirtVifTestCase, self).setUp()
self.flags(allow_same_net_traffic=True)
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
def _get_instance_xml(self, driver, net, mapping, image_meta=None):
conf = vconfig.LibvirtConfigGuest()
conf.virt_type = "qemu"
conf.name = "fake-name"
conf.uuid = "fake-uuid"
conf.memory = 100 * 1024
conf.vcpus = 4
nic = driver.get_config(self.instance, net, mapping, image_meta)
conf.add_device(nic)
return conf.to_xml()
def test_multiple_nics(self):
conf = vconfig.LibvirtConfigGuest()
conf.virt_type = "qemu"
conf.name = "fake-name"
conf.uuid = "fake-uuid"
conf.memory = 100 * 1024
conf.vcpus = 4
# Tests multiple nic configuration and that target_dev is
# set for each
nics = [{'net_type': 'bridge',
'mac_addr': '00:00:00:00:00:0b',
'source_dev': 'b_source_dev',
'target_dev': 'b_target_dev'},
{'net_type': 'ethernet',
'mac_addr': '00:00:00:00:00:0e',
'source_dev': 'e_source_dev',
'target_dev': 'e_target_dev'},
{'net_type': 'direct',
'mac_addr': '00:00:00:00:00:0d',
'source_dev': 'd_source_dev',
'target_dev': 'd_target_dev'}]
for nic in nics:
nic_conf = vconfig.LibvirtConfigGuestInterface()
nic_conf.net_type = nic['net_type']
nic_conf.target_dev = nic['target_dev']
nic_conf.mac_addr = nic['mac_addr']
nic_conf.source_dev = nic['source_dev']
conf.add_device(nic_conf)
xml = conf.to_xml()
doc = etree.fromstring(xml)
for nic in nics:
path = "./devices/interface/[@type='%s']" % nic['net_type']
node = doc.find(path)
self.assertEqual(nic['net_type'], node.get("type"))
self.assertEqual(nic['mac_addr'],
node.find("mac").get("address"))
self.assertEqual(nic['target_dev'],
node.find("target").get("dev"))
def test_model_novirtio(self):
self.flags(libvirt_use_virtio_for_bridges=False,
libvirt_type='kvm')
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
ret = node.findall("model")
self.assertEqual(len(ret), 0)
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
def test_model_kvm(self):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='kvm')
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
model = node.find("model").get("type")
self.assertEqual(model, "virtio")
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
def test_model_kvm_custom(self):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='kvm')
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
image_meta = {'properties': {'hw_vif_model': 'e1000'}}
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge,
image_meta)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
model = node.find("model").get("type")
self.assertEqual(model, "e1000")
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
def test_model_kvm_bogus(self):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='kvm')
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
image_meta = {'properties': {'hw_vif_model': 'acme'}}
self.assertRaises(exception.UnsupportedHardware,
self._get_instance_xml,
d,
self.net_bridge,
self.mapping_bridge,
image_meta)
def test_model_qemu(self):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='qemu')
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
model = node.find("model").get("type")
self.assertEqual(model, "virtio")
driver = node.find("driver").get("name")
self.assertEqual(driver, "qemu")
def test_model_xen(self):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='xen')
def get_connection():
return fakelibvirt.Connection("xen:///system",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
ret = node.findall("model")
self.assertEqual(len(ret), 0)
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
def test_generic_driver_none(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
self.assertRaises(exception.NovaException,
self._get_instance_xml,
d,
self.net_bridge,
self.mapping_none)
def _check_bridge_driver(self, d, net, mapping, br_want):
xml = self._get_instance_xml(d, net, mapping)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
ret = node.findall("filterref")
self.assertEqual(len(ret), 1)
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
self.assertEqual(br_name, br_want)
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping_bridge['mac'])
def test_bridge_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtBridgeDriver(get_connection)
self._check_bridge_driver(d,
self.net_bridge,
self.mapping_bridge,
self.net_bridge['bridge'])
def test_generic_driver_bridge(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
self._check_bridge_driver(d,
self.net_bridge,
self.mapping_bridge,
self.net_bridge['bridge'])
def test_quantum_bridge_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.QuantumLinuxBridgeVIFDriver(get_connection)
br_want = 'brq' + self.net_bridge_quantum['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self._check_bridge_driver(d,
self.net_bridge_quantum,
self.mapping_bridge_quantum,
br_want)
def _check_ovs_ethernet_driver(self, d, net, mapping, dev_prefix):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, net, mapping)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
ret = node.findall("filterref")
self.assertEqual(len(ret), 0)
self.assertEqual(node.get("type"), "ethernet")
dev_name = node.find("target").get("dev")
self.assertTrue(dev_name.startswith(dev_prefix))
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping_ovs['mac'])
script = node.find("script").get("path")
self.assertEquals(script, "")
def test_ovs_ethernet_driver_legacy(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False,
9010)
d = vif.LibvirtOpenVswitchDriver(get_connection)
self._check_ovs_ethernet_driver(d,
self.net_ovs,
self.mapping_ovs_legacy,
"nic")
def test_ovs_ethernet_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False,
9010)
d = vif.LibvirtGenericVIFDriver(get_connection)
self._check_ovs_ethernet_driver(d,
self.net_ovs,
self.mapping_ovs,
"tap")
def _check_ovs_virtualport_driver(self, d, net, mapping, want_iface_id):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, net, mapping)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
ret = node.findall("filterref")
self.assertEqual(len(ret), 0)
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
self.assertEqual(br_name, "br0")
mac = node.find("mac").get("address")
self.assertEqual(mac, mapping['mac'])
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "openvswitch")
iface_id_found = False
for p_elem in vp.findall("parameters"):
iface_id = p_elem.get("interfaceid", None)
if iface_id:
self.assertEqual(iface_id, want_iface_id)
iface_id_found = True
self.assertTrue(iface_id_found)
def test_ovs_virtualport_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False,
9011)
d = vif.LibvirtOpenVswitchVirtualPortDriver(get_connection)
want_iface_id = 'vif-xxx-yyy-zzz'
self._check_ovs_virtualport_driver(d,
self.net_ovs,
self.mapping_ovs_legacy,
want_iface_id)
def test_generic_ovs_virtualport_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False,
9011)
d = vif.LibvirtGenericVIFDriver(get_connection)
want_iface_id = self.mapping_ovs['ovs_interfaceid']
self._check_ovs_virtualport_driver(d,
self.net_ovs,
self.mapping_ovs,
want_iface_id)
def _check_quantum_hybrid_driver(self, d, net, mapping, br_want):
self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
xml = self._get_instance_xml(d, net, mapping)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
ret = node.findall("filterref")
self.assertEqual(len(ret), 1)
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
self.assertEqual(br_name, br_want)
mac = node.find("mac").get("address")
self.assertEqual(mac, mapping['mac'])
def test_quantum_hybrid_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
br_want = "qbr" + self.mapping_ovs['vif_uuid']
br_want = br_want[:network_model.NIC_NAME_LEN]
d = vif.LibvirtHybridOVSBridgeDriver(get_connection)
self._check_quantum_hybrid_driver(d,
self.net_ovs,
self.mapping_ovs_legacy,
br_want)
def test_generic_hybrid_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
br_want = "qbr" + self.mapping_ovs['vif_uuid']
br_want = br_want[:network_model.NIC_NAME_LEN]
self._check_quantum_hybrid_driver(d,
self.net_ovs,
self.mapping_ovs,
br_want)
def test_generic_8021qbh_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_8021,
self.mapping_8021qbh)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
self.assertEqual(node.get("type"), "direct")
br_name = node.find("source").get("dev")
self.assertEqual(br_name, "eth0")
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping_8021qbh['mac'])
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbh")
profile_id_found = False
for p_elem in vp.findall("parameters"):
wantparams = self.mapping_8021qbh['qbh_params']
profile_id = p_elem.get("profileid", None)
if profile_id:
self.assertEqual(profile_id,
wantparams['profileid'])
profile_id_found = True
self.assertTrue(profile_id_found)
def test_generic_8021qbg_driver(self):
def get_connection():
return fakelibvirt.Connection("qemu:///session",
False)
d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_8021,
self.mapping_8021qbg)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
self.assertEqual(node.get("type"), "direct")
br_name = node.find("source").get("dev")
self.assertEqual(br_name, "eth0")
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping_8021qbg['mac'])
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbg")
manager_id_found = False
type_id_found = False
typeversion_id_found = False
instance_id_found = False
for p_elem in vp.findall("parameters"):
wantparams = self.mapping_8021qbg['qbg_params']
manager_id = p_elem.get("managerid", None)
type_id = p_elem.get("typeid", None)
typeversion_id = p_elem.get("typeidversion", None)
instance_id = p_elem.get("instanceid", None)
if manager_id:
self.assertEqual(manager_id,
wantparams['managerid'])
manager_id_found = True
if type_id:
self.assertEqual(type_id,
wantparams['typeid'])
type_id_found = True
if typeversion_id:
self.assertEqual(typeversion_id,
wantparams['typeidversion'])
typeversion_id_found = True
if instance_id:
self.assertEqual(instance_id,
wantparams['instanceid'])
instance_id_found = True
self.assertTrue(manager_id_found)
self.assertTrue(type_id_found)
self.assertTrue(typeversion_id_found)
self.assertTrue(instance_id_found)
| |
"""
Test reload for trained models.
"""
import os
import pytest
import unittest
import tempfile
import numpy as np
import deepchem as dc
import tensorflow as tf
import scipy
from flaky import flaky
from sklearn.ensemble import RandomForestClassifier
from deepchem.molnet.load_function.chembl25_datasets import CHEMBL25_TASKS
from deepchem.feat import create_char_to_idx
def test_sklearn_classifier_reload():
"""Test that trained model can be reloaded correctly."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
sklearn_model = RandomForestClassifier()
model_dir = tempfile.mkdtemp()
model = dc.models.SklearnModel(sklearn_model, model_dir)
# Fit trained model
model.fit(dataset)
model.save()
# Load trained model
reloaded_model = dc.models.SklearnModel(None, model_dir)
reloaded_model.reload()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_multitaskregressor_reload():
"""Test that MultitaskRegressor can be reloaded correctly."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model_dir = tempfile.mkdtemp()
model = dc.models.MultitaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
learning_rate=0.003,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
# Reload trained model
reloaded_model = dc.models.MultitaskRegressor(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)],
batch_size=n_samples,
learning_rate=0.003,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
def test_multitaskclassification_reload():
"""Test that MultitaskClassifier can be reloaded correctly."""
n_samples = 10
n_features = 3
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model_dir = tempfile.mkdtemp()
model = dc.models.MultitaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples,
optimizer=dc.models.optimizers.Adam(
learning_rate=0.0003, beta1=0.9, beta2=0.999),
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Reload trained model
reloaded_model = dc.models.MultitaskClassifier(
n_tasks,
n_features,
dropouts=[0.],
weight_init_stddevs=[.1],
batch_size=n_samples,
optimizer=dc.models.optimizers.Adam(
learning_rate=0.0003, beta1=0.9, beta2=0.999),
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_residual_classification_reload():
"""Test that a residual network can reload correctly."""
n_samples = 10
n_features = 5
n_tasks = 1
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(dc.metrics.accuracy_score)
model_dir = tempfile.mkdtemp()
model = dc.models.MultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[20] * 10,
dropouts=0.0,
batch_size=n_samples,
residual=True,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=500)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
# Reload trained model
reloaded_model = dc.models.MultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[20] * 10,
dropouts=0.0,
batch_size=n_samples,
residual=True,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_robust_multitask_classification_reload():
"""Test robust multitask overfits tiny data."""
n_tasks = 10
n_samples = 10
n_features = 3
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model_dir = tempfile.mkdtemp()
model = dc.models.RobustMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=25)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
# Reloaded Trained Model
reloaded_model = dc.models.RobustMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_normalizing_flow_model_reload():
"""Test that NormalizingFlowModel can be reloaded correctly."""
from deepchem.models.normalizing_flows import NormalizingFlow, NormalizingFlowModel
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
tfk = tf.keras
model_dir = tempfile.mkdtemp()
Made = tfb.AutoregressiveNetwork(
params=2, hidden_units=[512, 512], activation='relu', dtype='float64')
flow_layers = [tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=Made)]
# 3D Multivariate Gaussian base distribution
nf = NormalizingFlow(
base_distribution=tfd.MultivariateNormalDiag(
loc=np.zeros(2), scale_diag=np.ones(2)),
flow_layers=flow_layers)
nfm = NormalizingFlowModel(nf, model_dir=model_dir)
target_distribution = tfd.MultivariateNormalDiag(loc=np.array([1., 0.]))
dataset = dc.data.NumpyDataset(X=target_distribution.sample(96))
final = nfm.fit(dataset, nb_epoch=1)
x = np.zeros(2)
lp1 = nfm.flow.log_prob(x).numpy()
assert nfm.flow.sample().numpy().shape == (2,)
reloaded_model = NormalizingFlowModel(nf, model_dir=model_dir)
reloaded_model.restore()
# Check that reloaded model can sample from the distribution
assert reloaded_model.flow.sample().numpy().shape == (2,)
lp2 = reloaded_model.flow.log_prob(x).numpy()
# Check that density estimation is same for reloaded model
assert np.all(lp1 == lp2)
def test_robust_multitask_regressor_reload():
"""Test that RobustMultitaskRegressor can be reloaded correctly."""
n_tasks = 10
n_samples = 10
n_features = 3
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model_dir = tempfile.mkdtemp()
model = dc.models.RobustMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
# Reload trained model
reloaded_model = dc.models.RobustMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.003,
weight_init_stddevs=[.1],
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
def test_IRV_multitask_classification_reload():
"""Test IRV classifier can be reloaded."""
n_tasks = 5
n_samples = 10
n_features = 128
n_classes = 2
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.randint(2, size=(n_samples, n_features))
y = np.ones((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
IRV_transformer = dc.trans.IRVTransformer(5, n_tasks, dataset)
dataset_trans = IRV_transformer.transform(dataset)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model_dir = tempfile.mkdtemp()
model = dc.models.MultitaskIRVClassifier(
n_tasks,
K=5,
learning_rate=0.01,
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset_trans)
# Eval model on train
scores = model.evaluate(dataset_trans, [classification_metric])
assert scores[classification_metric.name] > .9
# Reload Trained Model
reloaded_model = dc.models.MultitaskIRVClassifier(
n_tasks,
K=5,
learning_rate=0.01,
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
@flaky
def test_progressive_classification_reload():
"""Test progressive multitask can reload."""
np.random.seed(123)
n_tasks = 5
n_samples = 10
n_features = 6
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
classification_metric = dc.metrics.Metric(
dc.metrics.accuracy_score, task_averager=np.mean)
model_dir = tempfile.mkdtemp()
model = dc.models.ProgressiveMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.001,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=400)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
# Reload Trained Model
reloaded_model = dc.models.ProgressiveMultitaskClassifier(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.001,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .9
def test_progressivemultitaskregressor_reload():
"""Test that ProgressiveMultitaskRegressor can be reloaded correctly."""
n_samples = 10
n_features = 3
n_tasks = 1
# Generate dummy dataset
np.random.seed(123)
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features)
y = np.random.rand(n_samples, n_tasks)
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model_dir = tempfile.mkdtemp()
model = dc.models.ProgressiveMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.001,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < .1
# Reload trained model
reloaded_model = dc.models.ProgressiveMultitaskRegressor(
n_tasks,
n_features,
layer_sizes=[50],
bypass_layer_sizes=[10],
dropouts=[0.],
learning_rate=0.001,
weight_init_stddevs=[.1],
alpha_init_stddevs=[.02],
batch_size=n_samples,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
def test_DAG_regression_reload():
"""Test DAG regressor reloads."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
mols = ["CC", "CCO", "CC", "CCC", "CCCCO", "CO", "CC", "CCCCC", "CCC", "CCCO"]
n_samples = len(mols)
X = featurizer(mols)
y = np.random.rand(n_samples, n_tasks)
dataset = dc.data.NumpyDataset(X, y)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_feat = 75
batch_size = 10
transformer = dc.trans.DAGTransformer(max_atoms=50)
dataset = transformer.transform(dataset)
model_dir = tempfile.mkdtemp()
model = dc.models.DAGModel(
n_tasks,
max_atoms=50,
n_atom_feat=n_feat,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression",
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .1
reloaded_model = dc.models.DAGModel(
n_tasks,
max_atoms=50,
n_atom_feat=n_feat,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression",
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
predset = transformer.transform(predset)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .1
def test_weave_classification_reload():
"""Test weave model can be reloaded."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
mols = ["CC", "CCCCC", "CCCCC", "CCC", "COOO", "COO", "OO"]
n_samples = len(mols)
X = featurizer(mols)
y = [1, 1, 1, 1, 0, 0, 0]
dataset = dc.data.NumpyDataset(X, y)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
batch_size = 5
model_dir = tempfile.mkdtemp()
model = dc.models.WeaveModel(
n_tasks,
batch_size=batch_size,
learning_rate=0.01,
mode="classification",
dropouts=0.0,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=100)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .6
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloaded_model = dc.models.WeaveModel(
n_tasks,
batch_size=batch_size,
learning_rate=0.003,
mode="classification",
dropouts=0.0,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
#Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .6
def test_MPNN_regression_reload():
"""Test MPNN can reload datasets."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
# Load mini log-solubility dataset.
featurizer = dc.feat.WeaveFeaturizer()
tasks = ["outcome"]
mols = ["C", "CO", "CC"]
n_samples = len(mols)
X = featurizer(mols)
y = np.random.rand(n_samples, n_tasks)
dataset = dc.data.NumpyDataset(X, y)
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
n_atom_feat = 75
n_pair_feat = 14
batch_size = 10
model_dir = tempfile.mkdtemp()
model = dc.models.MPNNModel(
n_tasks,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
T=2,
M=3,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression",
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=50)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
# Reload trained model
reloaded_model = dc.models.MPNNModel(
n_tasks,
n_atom_feat=n_atom_feat,
n_pair_feat=n_pair_feat,
T=2,
M=3,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression",
model_dir=model_dir)
reloaded_model.restore()
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] > .8
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
def test_textCNN_classification_reload():
"""Test textCNN model reloadinng."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
featurizer = dc.feat.RawFeaturizer()
tasks = ["outcome"]
mols = ["C", "CO", "CC"]
n_samples = len(mols)
X = featurizer(mols)
y = np.random.randint(2, size=(n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, ids=mols)
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score)
char_dict, length = dc.models.TextCNNModel.build_char_dict(dataset)
batch_size = 3
model_dir = tempfile.mkdtemp()
model = dc.models.TextCNNModel(
n_tasks,
char_dict,
seq_length=length,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="classification",
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=200)
# Eval model on train
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .8
# Reload trained model
reloaded_model = dc.models.TextCNNModel(
n_tasks,
char_dict,
seq_length=length,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="classification",
model_dir=model_dir)
reloaded_model.restore()
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .8
assert len(reloaded_model.model.get_weights()) == len(
model.model.get_weights())
for (reloaded, orig) in zip(reloaded_model.model.get_weights(),
model.model.get_weights()):
assert np.all(reloaded == orig)
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred, ids=predmols)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
assert len(model.model.layers) == len(reloaded_model.model.layers)
def test_1d_cnn_regression_reload():
"""Test that a 1D CNN can reload."""
n_samples = 10
n_features = 3
n_tasks = 1
np.random.seed(123)
X = np.random.rand(n_samples, 10, n_features)
y = np.random.randint(2, size=(n_samples, n_tasks)).astype(np.float32)
dataset = dc.data.NumpyDataset(X, y)
regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error)
model_dir = tempfile.mkdtemp()
model = dc.models.CNN(
n_tasks,
n_features,
dims=1,
dropouts=0,
kernel_size=3,
mode='regression',
learning_rate=0.003,
model_dir=model_dir)
# Fit trained model
model.fit(dataset, nb_epoch=200)
# Eval model on train
scores = model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
# Reload trained model
reloaded_model = dc.models.CNN(
n_tasks,
n_features,
dims=1,
dropouts=0,
kernel_size=3,
mode='regression',
learning_rate=0.003,
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
Xpred = np.random.rand(n_samples, 10, n_features)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [regression_metric])
assert scores[regression_metric.name] < 0.1
def test_graphconvmodel_reload():
featurizer = dc.feat.ConvMolFeaturizer()
tasks = ["outcome"]
n_tasks = len(tasks)
mols = ["C", "CO", "CC"]
n_samples = len(mols)
X = featurizer(mols)
y = np.array([0, 1, 0])
dataset = dc.data.NumpyDataset(X, y)
classification_metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
batch_size = 10
model_dir = tempfile.mkdtemp()
model = dc.models.GraphConvModel(
len(tasks),
batch_size=batch_size,
batch_normalize=False,
mode='classification',
model_dir=model_dir)
model.fit(dataset, nb_epoch=10)
scores = model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] >= 0.6
# Reload trained Model
reloaded_model = dc.models.GraphConvModel(
len(tasks),
batch_size=batch_size,
batch_normalize=False,
mode='classification',
model_dir=model_dir)
reloaded_model.restore()
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# Eval model on train
scores = reloaded_model.evaluate(dataset, [classification_metric])
assert scores[classification_metric.name] > .6
def test_chemception_reload():
"""Test that chemception models can be saved and reloaded."""
img_size = 80
img_spec = "engd"
res = 0.5
n_tasks = 1
featurizer = dc.feat.SmilesToImage(
img_size=img_size, img_spec=img_spec, res=res)
data_points = 10
mols = ["CCCCCCCC"] * data_points
X = featurizer(mols)
y = np.random.randint(0, 2, size=(data_points, n_tasks))
w = np.ones(shape=(data_points, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, mols)
classsification_metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
model_dir = tempfile.mkdtemp()
model = dc.models.ChemCeption(
n_tasks=n_tasks,
img_spec="engd",
model_dir=model_dir,
mode="classification")
model.fit(dataset, nb_epoch=3)
# Reload Trained Model
reloaded_model = dc.models.ChemCeption(
n_tasks=n_tasks,
img_spec="engd",
model_dir=model_dir,
mode="classification")
reloaded_model.restore()
# Check predictions match on random sample
predmols = ["CCCC", "CCCCCO", "CCCCC"]
Xpred = featurizer(predmols)
predset = dc.data.NumpyDataset(Xpred)
origpred = model.predict(predset)
reloadpred = reloaded_model.predict(predset)
assert np.all(origpred == reloadpred)
# TODO: This test is a little awkward. The Smiles2Vec model awkwardly depends on a dataset_file being available on disk. This needs to be cleaned up to match the standard model handling API.
def test_smiles2vec_reload():
"""Test that smiles2vec models can be saved and reloaded."""
dataset_file = os.path.join(os.path.dirname(__file__), "chembl_25_small.csv")
max_len = 250
pad_len = 10
max_seq_len = 20
char_to_idx = create_char_to_idx(
dataset_file, max_len=max_len, smiles_field="smiles")
feat = dc.feat.SmilesToSeq(
char_to_idx=char_to_idx, max_len=max_len, pad_len=pad_len)
n_tasks = 5
data_points = 10
loader = dc.data.CSVLoader(
tasks=CHEMBL25_TASKS, smiles_field='smiles', featurizer=feat)
dataset = loader.create_dataset(
inputs=[dataset_file], shard_size=10000, data_dir=tempfile.mkdtemp())
y = np.random.randint(0, 2, size=(data_points, n_tasks))
w = np.ones(shape=(data_points, n_tasks))
dataset = dc.data.NumpyDataset(dataset.X[:data_points, :max_seq_len], y, w,
dataset.ids[:data_points])
classsification_metric = dc.metrics.Metric(
dc.metrics.roc_auc_score, np.mean, mode="classification")
model_dir = tempfile.mkdtemp()
model = dc.models.Smiles2Vec(
char_to_idx=char_to_idx,
max_seq_len=max_seq_len,
use_conv=True,
n_tasks=n_tasks,
model_dir=model_dir,
mode="classification")
model.fit(dataset, nb_epoch=3)
# Reload Trained Model
reloaded_model = dc.models.Smiles2Vec(
char_to_idx=char_to_idx,
max_seq_len=max_seq_len,
use_conv=True,
n_tasks=n_tasks,
model_dir=model_dir,
mode="classification")
reloaded_model.restore()
# Check predictions match on original dataset
origpred = model.predict(dataset)
reloadpred = reloaded_model.predict(dataset)
assert np.all(origpred == reloadpred)
# TODO: We need a cleaner usage example for this
def test_DTNN_regression_reload():
"""Test DTNN can reload datasets."""
np.random.seed(123)
tf.random.set_seed(123)
n_tasks = 1
current_dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(current_dir, "example_DTNN.mat")
dataset = scipy.io.loadmat(input_file)
X = dataset['X']
y = dataset['T']
w = np.ones_like(y)
dataset = dc.data.NumpyDataset(X, y, w, ids=None)
n_tasks = y.shape[1]
regression_metric = dc.metrics.Metric(
dc.metrics.pearson_r2_score, task_averager=np.mean)
model_dir = tempfile.mkdtemp()
model = dc.models.DTNNModel(
n_tasks,
n_embedding=20,
n_distance=100,
learning_rate=1.0,
model_dir=model_dir,
mode="regression")
# Fit trained model
model.fit(dataset, nb_epoch=250)
# Eval model on train
pred = model.predict(dataset)
mean_rel_error = np.mean(np.abs(1 - pred / y))
assert mean_rel_error < 0.2
reloaded_model = dc.models.DTNNModel(
n_tasks,
n_embedding=20,
n_distance=100,
learning_rate=1.0,
model_dir=model_dir,
mode="regression")
reloaded_model.restore()
# Check predictions match on random sample
origpred = model.predict(dataset)
reloadpred = reloaded_model.predict(dataset)
assert np.all(origpred == reloadpred)
def generate_sequences(sequence_length, num_sequences):
for i in range(num_sequences):
seq = [
np.random.randint(10)
for x in range(np.random.randint(1, sequence_length + 1))
]
yield (seq, seq)
def test_seq2seq_reload():
"""Test reloading for seq2seq models."""
sequence_length = 8
tokens = list(range(10))
model_dir = tempfile.mkdtemp()
s = dc.models.SeqToSeq(
tokens,
tokens,
sequence_length,
encoder_layers=2,
decoder_layers=2,
embedding_dimension=150,
learning_rate=0.01,
dropout=0.1,
model_dir=model_dir)
# Train the model on random sequences. We aren't training long enough to
# really make it reliable, but I want to keep this test fast, and it should
# still be able to reproduce a reasonable fraction of input sequences.
s.fit_sequences(generate_sequences(sequence_length, 25000))
# Test it out.
tests = [seq for seq, target in generate_sequences(sequence_length, 50)]
pred1 = s.predict_from_sequences(tests, beam_width=1)
pred4 = s.predict_from_sequences(tests, beam_width=4)
reloaded_s = dc.models.SeqToSeq(
tokens,
tokens,
sequence_length,
encoder_layers=2,
decoder_layers=2,
embedding_dimension=150,
learning_rate=0.01,
dropout=0.1,
model_dir=model_dir)
reloaded_s.restore()
reloaded_pred1 = reloaded_s.predict_from_sequences(tests, beam_width=1)
assert len(pred1) == len(reloaded_pred1)
for (p1, r1) in zip(pred1, reloaded_pred1):
assert p1 == r1
reloaded_pred4 = reloaded_s.predict_from_sequences(tests, beam_width=4)
assert len(pred4) == len(reloaded_pred4)
for (p4, r4) in zip(pred4, reloaded_pred4):
assert p4 == r4
embeddings = s.predict_embeddings(tests)
pred1e = s.predict_from_embeddings(embeddings, beam_width=1)
pred4e = s.predict_from_embeddings(embeddings, beam_width=4)
reloaded_embeddings = reloaded_s.predict_embeddings(tests)
reloaded_pred1e = reloaded_s.predict_from_embeddings(
reloaded_embeddings, beam_width=1)
reloaded_pred4e = reloaded_s.predict_from_embeddings(
reloaded_embeddings, beam_width=4)
assert np.all(embeddings == reloaded_embeddings)
assert len(pred1e) == len(reloaded_pred1e)
for (p1e, r1e) in zip(pred1e, reloaded_pred1e):
assert p1e == r1e
assert len(pred4e) == len(reloaded_pred4e)
for (p4e, r4e) in zip(pred4e, reloaded_pred4e):
assert p4e == r4e
| |
"""
This is the main module for running the BERNAISE code.
More specific info will follow in a later commit.
"""
import dolfin as df
from common.cmd import parse_command_line, help_menu
from common.io import create_initial_folders, load_checkpoint, save_solution, \
load_parameters, load_mesh
__author__ = "Gaute Linga"
cmd_kwargs = parse_command_line()
# Check if user has called for help
if cmd_kwargs.get("help", False):
help_menu()
exit()
# Import problem and default parameters
default_problem = "simple"
exec("from problems.{} import *".format(
cmd_kwargs.get("problem", default_problem)))
# Problem specific parameters
parameters.update(problem())
# Internalize cmd arguments and mesh
vars().update(import_problem_hook(**vars()))
# If loading from checkpoint, update parameters from file, and then
# again from command line arguments.
if restart_folder:
info_red("Loading parameters from checkpoint.")
load_parameters(parameters, os.path.join(
restart_folder, "parameters.dat"))
internalize_cmd_kwargs(parameters, cmd_kwargs)
vars().update(parameters)
info_red("Loading mesh from checkpoint.")
mesh = load_mesh(os.path.join(restart_folder, "fields.h5"),
use_partition_from_file=True)
# Import solver functionality
exec("from solvers.{} import *".format(solver))
# Get subproblems
subproblems = get_subproblems(**vars())
# Declare finite elements
elements = dict()
for name, (family, degree, is_vector) in base_elements.items():
if is_vector:
elements[name] = df.VectorElement(family, mesh.ufl_cell(), degree)
else:
elements[name] = df.FiniteElement(family, mesh.ufl_cell(), degree)
# Declare function spaces
spaces = dict()
for name, subproblem in subproblems.items():
if len(subproblem) > 1:
spaces[name] = df.FunctionSpace(
mesh, df.MixedElement(
[elements[s["element"]] for s in subproblem]),
constrained_domain=constrained_domain(**vars()))
# If there is only one field in the subproblem, don't bother with
# the MixedElement.
elif len(subproblem) == 1:
spaces[name] = df.FunctionSpace(
mesh, elements[subproblem[0]["element"]],
constrained_domain=constrained_domain(**vars()))
else:
info_on_red("Something went wrong here!")
exit("")
# dim = mesh.topology().dim() # In case the velocity fields should be
# # segregated at some point
fields = []
field_to_subspace = dict()
field_to_subproblem = dict()
for name, subproblem in subproblems.items():
if len(subproblem) > 1:
for i, s in enumerate(subproblem):
field = s["name"]
fields.append(field)
field_to_subspace[field] = spaces[name].sub(i)
field_to_subproblem[field] = (name, i)
else:
field = subproblem[0]["name"]
fields.append(field)
field_to_subspace[field] = spaces[name]
field_to_subproblem[field] = (name, -1)
# Create initial folders for storing results
newfolder, tstepfiles = create_initial_folders(folder, restart_folder,
fields, tstep, parameters)
# Create overarching test and trial functions
test_functions = dict()
trial_functions = dict()
for name, subproblem in subproblems.items():
if len(subproblem) > 1:
test_functions[name] = df.TestFunctions(spaces[name])
trial_functions[name] = df.TrialFunctions(spaces[name])
else:
test_functions[name] = df.TestFunction(spaces[name])
trial_functions[name] = df.TrialFunction(spaces[name])
# Create work dictionaries for all subproblems
w_ = dict((subproblem, df.Function(space, name=subproblem))
for subproblem, space in spaces.items())
w_1 = dict((subproblem, df.Function(space, name=subproblem+"_1"))
for subproblem, space in spaces.items())
w_tmp = dict((subproblem, df.Function(space, name=subproblem+"_tmp"))
for subproblem, space in spaces.items())
# Shortcuts to the fields
x_ = dict()
for name, subproblem in subproblems.items():
if len(subproblem) > 1:
w_loc = df.split(w_[name])
for i, field in enumerate(subproblem):
x_[field["name"]] = w_loc[i]
else:
x_[subproblem[0]["name"]] = w_[name]
# If continuing from previously, restart from checkpoint
load_checkpoint(restart_folder, w_, w_1)
# Get boundary conditions, from fields to subproblems
bcs_tuple = create_bcs(**vars())
if len(bcs_tuple) == 3:
boundaries, bcs, bcs_pointwise = bcs_tuple
elif len(bcs_tuple) == 2:
boundaries, bcs = bcs_tuple
bcs_pointwise = None
else:
info_on_red("Wrong implementation of create_bcs.")
exit()
# Set up subdomains
subdomains = df.MeshFunction("size_t", mesh, mesh.topology().dim()-1)
subdomains.set_all(0)
boundary_to_mark = dict()
mark_to_boundary = dict()
for i, (boundary_name, markers) in enumerate(boundaries.items()):
for marker in markers:
marker.mark(subdomains, i+1)
boundary_to_mark[boundary_name] = i+1
mark_to_boundary[i] = boundary_name
# Subdomains check
if dump_subdomains:
subdomains_xdmf = df.XDMFFile("subdomains_dump.xdmf")
subdomains_xdmf.write(subdomains)
# Set up dirichlet part of bcs
dirichlet_bcs = dict()
for subproblem_name in subproblems.keys():
dirichlet_bcs[subproblem_name] = []
# Neumann BCs (per field)
neumann_bcs = dict()
for field in fields:
neumann_bcs[field] = dict()
for boundary_name, bcs_fields in bcs.items():
for field, bc in bcs_fields.items():
subproblem_name = field_to_subproblem[field][0]
subspace = field_to_subspace[field]
mark = boundary_to_mark[boundary_name]
if bc.is_dbc():
dirichlet_bcs[subproblem_name].append(
bc.dbc(subspace, subdomains, mark))
if bc.is_nbc():
neumann_bcs[field][boundary_name] = bc.nbc()
# Pointwise dirichlet bcs
for field, (value, c_code) in bcs_pointwise.items():
subproblem_name = field_to_subproblem[field][0]
subspace = field_to_subspace[field]
if not isinstance(value, df.Expression):
value = df.Constant(value)
dirichlet_bcs[subproblem_name].append(
df.DirichletBC(subspace, value, c_code, "pointwise"))
# Compute some mesh related stuff
dx = df.dx
ds = df.Measure("ds", domain=mesh, subdomain_data=subdomains)
normal = df.FacetNormal(mesh)
# Initialize solutions
w_init_fields = initialize(**vars())
if w_init_fields:
for name, subproblem in subproblems.items():
w_init_vector = []
if len(subproblem) > 1:
for i, s in enumerate(subproblem):
field = s["name"]
# Only change initial state if it is given in w_init_fields.
if field in w_init_fields:
w_init_field = w_init_fields[field]
else:
# Otherwise take the default value of that field.
w_init_field = w_[name].sub(i)
# Use df.project(df.as_vector(...)) with care...
num_subspaces = w_init_field.function_space().num_sub_spaces()
if num_subspaces == 0:
w_init_vector.append(w_init_field)
else:
for j in range(num_subspaces):
w_init_vector.append(w_init_field.sub(j))
# assert len(w_init_vector) == w_[name].value_size()
w_init = df.project(
df.as_vector(tuple(w_init_vector)), w_[name].function_space(),
solver_type="gmres", preconditioner_type="default")
else:
field = subproblem[0]["name"]
if field in w_init_fields:
w_init_field = w_init_fields[field]
else:
# Take default value...
w_init_field = w_[name]
w_init = df.project(w_init_field, w_[name].function_space(),
solver_type="gmres",
preconditioner_type="default")
w_[name].interpolate(w_init)
w_1[name].interpolate(w_init)
# Get rhs source terms (if any)
q_rhs = rhs_source(t=t_0, **vars())
# Setup problem
vars().update(setup(**vars()))
# Problem-specific hook before time loop
vars().update(start_hook(**vars()))
stop = False
t = t_0
# Initial state to XDMF
stop = save_solution(**vars())
total_computing_time = 0.
total_num_tsteps = 0
tstep_0 = tstep
timer = df.Timer("Simulation loop")
timer.start()
while not stop:
tstep_hook(**vars())
solve(**vars())
update(**vars())
t += dt
tstep += 1
stop = save_solution(**vars())
if tstep % info_intv == 0 or stop:
info_green("Time = {0:f}, timestep = {1:d}".format(t, tstep))
split_computing_time = timer.stop()
split_num_tsteps = tstep-tstep_0
timer.start()
tstep_0 = tstep
total_computing_time += split_computing_time
total_num_tsteps += split_num_tsteps
info_cyan("Computing time for previous {0:d}"
" timesteps: {1:f} seconds"
" ({2:f} seconds/timestep)".format(
split_num_tsteps, split_computing_time,
split_computing_time/split_num_tsteps))
df.list_timings(df.TimingClear.clear, [df.TimingType.wall])
if total_num_tsteps > 0:
info_cyan("Total computing time for all {0:d}"
" timesteps: {1:f} seconds"
" ({2:f} seconds/timestep)".format(
total_num_tsteps, total_computing_time,
total_computing_time/total_num_tsteps))
end_hook(**vars())
| |
#!/usr/bin/env python
"""A script to generate FileCheck statements for mlir unit tests.
This script is a utility to add FileCheck patterns to an mlir file.
NOTE: The input .mlir is expected to be the output from the parser, not a
stripped down variant.
Example usage:
$ generate-test-checks.py foo.mlir
$ mlir-opt foo.mlir -transformation | generate-test-checks.py
The script will heuristically insert CHECK/CHECK-LABEL commands for each line
within the file. By default this script will also try to insert string
substitution blocks for all SSA value names. The script is designed to make
adding checks to a test case fast, it is *not* designed to be authoritative
about what constitutes a good test!
"""
# Copyright 2019 The MLIR Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os # Used to advertise this file's name ("autogenerated_note").
import re
import sys
import string
ADVERT = '// NOTE: Assertions have been autogenerated by '
# Regex command to match an SSA identifier.
SSA_RE_STR = '[0-9]+|[a-zA-Z$._-][a-zA-Z0-9$._-]*'
SSA_RE = re.compile(SSA_RE_STR)
# Class used to generate and manage string substitution blocks for SSA value
# names.
class SSAVariableNamer:
def __init__(self):
self.scopes = []
self.name_counter = 0
# Generate a subsitution name for the given ssa value name.
def generate_name(self, ssa_name):
variable = 'VAL_' + str(self.name_counter)
self.name_counter += 1
self.scopes[-1][ssa_name] = variable
return variable
# Push a new variable name scope.
def push_name_scope(self):
self.scopes.append({})
# Pop the last variable name scope.
def pop_name_scope(self):
self.scopes.pop()
# Process a line of input that has been split at each SSA identifier '%'.
def process_line(line_chunks, variable_namer):
output_line = ''
# Process the rest that contained an SSA value name.
for chunk in line_chunks:
m = SSA_RE.match(chunk)
ssa_name = m.group(0)
# Check if an existing variable exists for this name.
variable = None
for scope in variable_namer.scopes:
variable = scope.get(ssa_name)
if variable is not None:
break
# If one exists, then output the existing name.
if variable is not None:
output_line += '[[' + variable + ']]'
else:
# Otherwise, generate a new variable.
variable = variable_namer.generate_name(ssa_name)
output_line += '[[' + variable + ':%.*]]'
# Append the non named group.
output_line += chunk[len(ssa_name):]
return output_line + '\n'
# Pre-process a line of input to remove any character sequences that will be
# problematic with FileCheck.
def preprocess_line(line):
# Replace any double brackets, '[[' with escaped replacements. '[['
# corresponds to variable names in FileCheck.
output_line = line.replace('[[', '{{\\[\\[}}')
# Replace any single brackets that are followed by an SSA identifier, the
# identifier will be replace by a variable; Creating the same situation as
# above.
output_line = output_line.replace('[%', '{{\\[}}%')
return output_line
def main():
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
'--check-prefix', default='CHECK', help='Prefix to use from check file.')
parser.add_argument(
'-o',
'--output',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout)
parser.add_argument(
'input',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin)
args = parser.parse_args()
# Open the given input file.
input_lines = [l.rstrip() for l in args.input]
args.input.close()
output_lines = []
# Generate a note used for the generated check file.
script_name = os.path.basename(__file__)
autogenerated_note = (ADVERT + 'utils/' + script_name)
output_lines.append(autogenerated_note + '\n')
# A map containing data used for naming SSA value names.
variable_namer = SSAVariableNamer()
for input_line in input_lines:
if not input_line:
continue
lstripped_input_line = input_line.lstrip()
# Lines with blocks begin with a ^. These lines have a trailing comment
# that needs to be stripped.
is_block = lstripped_input_line[0] == '^'
if is_block:
input_line = input_line.rsplit('//', 1)[0].rstrip()
# Top-level operations are heuristically the operations at nesting level 1.
is_toplevel_op = (not is_block and input_line.startswith(' ') and
input_line[2] != ' ' and input_line[2] != '}')
# If the line starts with a '}', pop the last name scope.
if lstripped_input_line[0] == '}':
variable_namer.pop_name_scope()
# If the line ends with a '{', push a new name scope.
if input_line[-1] == '{':
variable_namer.push_name_scope()
# Preprocess the input to remove any sequences that may be problematic with
# FileCheck.
input_line = preprocess_line(input_line)
# Split the line at the each SSA value name.
ssa_split = input_line.split('%')
# If this is a top-level operation use 'CHECK-LABEL', otherwise 'CHECK:'.
if not is_toplevel_op or not ssa_split[0]:
output_line = '// ' + args.check_prefix + ': '
# Pad to align with the 'LABEL' statements.
output_line += (' ' * len('-LABEL'))
# Output the first line chunk that does not contain an SSA name.
output_line += ssa_split[0]
# Process the rest of the input line.
output_line += process_line(ssa_split[1:], variable_namer)
else:
# Append a newline to the output to separate the logical blocks.
output_lines.append('\n')
output_line = '// ' + args.check_prefix + '-LABEL: '
# Output the first line chunk that does not contain an SSA name for the
# label.
output_line += ssa_split[0] + '\n'
# Process the rest of the input line on a separate check line.
if len(ssa_split) > 1:
output_line += '// ' + args.check_prefix + '-SAME: '
# Pad to align with the original position in the line.
output_line += ' ' * len(ssa_split[0])
# Process the rest of the line.
output_line += process_line(ssa_split[1:], variable_namer)
# Append the output line.
output_lines.append(output_line)
# Write the output.
for output_line in output_lines:
args.output.write(output_line)
args.output.write('\n')
args.output.close()
if __name__ == '__main__':
main()
| |
from datetime import (
date,
datetime,
)
import itertools
import numpy as np
import pytest
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import (
Index,
MultiIndex,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
def test_constructor_single_level():
result = MultiIndex(
levels=[["foo", "bar", "baz", "qux"]], codes=[[0, 1, 2, 3]], names=["first"]
)
assert isinstance(result, MultiIndex)
expected = Index(["foo", "bar", "baz", "qux"], name="first")
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ["first"]
def test_constructor_no_levels():
msg = "non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=[], codes=[])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=[])
with pytest.raises(TypeError, match=msg):
MultiIndex(codes=[])
def test_constructor_nonhashable_names():
# GH 20527
levels = [[1, 2], ["one", "two"]]
codes = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = (["foo"], ["bar"])
msg = r"MultiIndex\.name must be a hashable type"
with pytest.raises(TypeError, match=msg):
MultiIndex(levels=levels, codes=codes, names=names)
# With .rename()
mi = MultiIndex(
levels=[[1, 2], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=("foo", "bar"),
)
renamed = [["foor"], ["barr"]]
with pytest.raises(TypeError, match=msg):
mi.rename(names=renamed)
# With .set_names()
with pytest.raises(TypeError, match=msg):
mi.set_names(names=renamed)
def test_constructor_mismatched_codes_levels(idx):
codes = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
msg = "Length of levels and codes must be the same"
with pytest.raises(ValueError, match=msg):
MultiIndex(levels=levels, codes=codes)
length_error = (
r"On level 0, code max \(3\) >= length of level \(1\)\. "
"NOTE: this index is in an inconsistent state"
)
label_error = r"Unequal code lengths: \[4, 2\]"
code_value_error = r"On level 0, code value \(-2\) < -1"
# important to check that it's looking at the right thing.
with pytest.raises(ValueError, match=length_error):
MultiIndex(levels=[["a"], ["b"]], codes=[[0, 1, 2, 3], [0, 3, 4, 1]])
with pytest.raises(ValueError, match=label_error):
MultiIndex(levels=[["a"], ["b"]], codes=[[0, 0, 0, 0], [0, 0]])
# external API
with pytest.raises(ValueError, match=length_error):
idx.copy().set_levels([["a"], ["b"]])
with pytest.raises(ValueError, match=label_error):
idx.copy().set_codes([[0, 0, 0, 0], [0, 0]])
# test set_codes with verify_integrity=False
# the setting should not raise any value error
idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]], verify_integrity=False)
# code value smaller than -1
with pytest.raises(ValueError, match=code_value_error):
MultiIndex(levels=[["a"], ["b"]], codes=[[0, -2], [0, 0]])
def test_na_levels():
# GH26408
# test if codes are re-assigned value -1 for levels
# with missing values (NaN, NaT, None)
result = MultiIndex(
levels=[[np.nan, None, pd.NaT, 128, 2]], codes=[[0, -1, 1, 2, 3, 4]]
)
expected = MultiIndex(
levels=[[np.nan, None, pd.NaT, 128, 2]], codes=[[-1, -1, -1, -1, 3, 4]]
)
tm.assert_index_equal(result, expected)
result = MultiIndex(
levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[0, -1, 1, 2, 3, 4]]
)
expected = MultiIndex(
levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[-1, -1, 1, -1, 3, -1]]
)
tm.assert_index_equal(result, expected)
# verify set_levels and set_codes
result = MultiIndex(
levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]]
).set_levels([[np.nan, "s", pd.NaT, 128, None]])
tm.assert_index_equal(result, expected)
result = MultiIndex(
levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[1, 2, 2, 2, 2, 2]]
).set_codes([[0, -1, 1, 2, 3, 4]])
tm.assert_index_equal(result, expected)
def test_copy_in_constructor():
levels = np.array(["a", "b", "c"])
codes = np.array([1, 1, 2, 0, 0, 1, 1])
val = codes[0]
mi = MultiIndex(levels=[levels, levels], codes=[codes, codes], copy=True)
assert mi.codes[0][0] == val
codes[0] = 15
assert mi.codes[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
# ----------------------------------------------------------------------------
# from_arrays
# ----------------------------------------------------------------------------
def test_from_arrays(idx):
arrays = [
np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)
]
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp("20130101")], ["a", "b"]])
assert result.levels[0].equals(Index([Timestamp("20130101")]))
assert result.levels[1].equals(Index(["a", "b"]))
def test_from_arrays_iterator(idx):
# GH 18434
arrays = [
np.asarray(lev).take(level_codes)
for lev, level_codes in zip(idx.levels, idx.codes)
]
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=idx.names)
tm.assert_index_equal(result, idx)
# invalid iterator input
msg = "Input must be a list / sequence of array-likes."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(0)
def test_from_arrays_tuples(idx):
arrays = tuple(
tuple(np.asarray(lev).take(level_codes))
for lev, level_codes in zip(idx.levels, idx.codes)
)
# tuple of tuples as input
result = MultiIndex.from_arrays(arrays, names=idx.names)
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize(
("idx1", "idx2"),
[
(
pd.period_range("2011-01-01", freq="D", periods=3),
pd.period_range("2015-01-01", freq="H", periods=3),
),
(
date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"),
date_range("2015-01-01 10:00", freq="H", periods=3, tz="Asia/Tokyo"),
),
(
pd.timedelta_range("1 days", freq="D", periods=3),
pd.timedelta_range("2 hours", freq="H", periods=3),
),
],
)
def test_from_arrays_index_series_period_datetimetz_and_timedelta(idx1, idx2):
result = MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed():
idx1 = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern")
idx2 = date_range("2015-01-01 10:00", freq="H", periods=3)
idx3 = pd.timedelta_range("1 days", freq="D", periods=3)
idx4 = pd.period_range("2011-01-01", freq="D", periods=3)
result = MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = MultiIndex.from_arrays(
[Series(idx1), Series(idx2), Series(idx3), Series(idx4)]
)
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical():
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=True)
result = MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=["A"])
assert isinstance(result, MultiIndex)
expected = Index([], name="A")
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ["A"]
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list("ABC")[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, codes=[[]] * N, names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"invalid_sequence_of_arrays",
[
1,
[1],
[1, 2],
[[1], 2],
[1, [2]],
"a",
["a"],
["a", "b"],
[["a"], "b"],
(1,),
(1, 2),
([1], 2),
(1, [2]),
"a",
("a",),
("a", "b"),
(["a"], "b"),
[(1,), 2],
[1, (2,)],
[("a",), "b"],
((1,), 2),
(1, (2,)),
(("a",), "b"),
],
)
def test_from_arrays_invalid_input(invalid_sequence_of_arrays):
msg = "Input must be a list / sequence of array-likes"
with pytest.raises(TypeError, match=msg):
MultiIndex.from_arrays(arrays=invalid_sequence_of_arrays)
@pytest.mark.parametrize(
"idx1, idx2", [([1, 2, 3], ["a", "b"]), ([], ["a", "b"]), ([1, 2, 3], [])]
)
def test_from_arrays_different_lengths(idx1, idx2):
# see gh-13599
msg = "^all arrays must be same length$"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_arrays([idx1, idx2])
def test_from_arrays_respects_none_names():
# GH27292
a = Series([1, 2, 3], name="foo")
b = Series(["a", "b", "c"], name="bar")
result = MultiIndex.from_arrays([a, b], names=None)
expected = MultiIndex(
levels=[[1, 2, 3], ["a", "b", "c"]], codes=[[0, 1, 2], [0, 1, 2]], names=None
)
tm.assert_index_equal(result, expected)
# ----------------------------------------------------------------------------
# from_tuples
# ----------------------------------------------------------------------------
def test_from_tuples():
msg = "Cannot infer number of levels from empty list"
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples([])
expected = MultiIndex(
levels=[[1, 3], [2, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"]
)
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=["a", "b"])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator():
# GH 18434
# input iterator for tuples
expected = MultiIndex(
levels=[[1, 3], [2, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"]
)
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=["a", "b"])
tm.assert_index_equal(result, expected)
# input non-iterables
msg = "Input must be a list / sequence of tuple-likes."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_tuples(0)
def test_from_tuples_empty():
# GH 16777
result = MultiIndex.from_tuples([], names=["a", "b"])
expected = MultiIndex.from_arrays(arrays=[[], []], names=["a", "b"])
tm.assert_index_equal(result, expected)
def test_from_tuples_index_values(idx):
result = MultiIndex.from_tuples(idx)
assert (result.values == idx.values).all()
def test_tuples_with_name_string():
# GH 15110 and GH 14848
li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)]
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
Index(li, name="abc")
with pytest.raises(ValueError, match=msg):
Index(li, name="a")
def test_from_tuples_with_tuple_label():
# GH 15457
expected = pd.DataFrame(
[[2, 1, 2], [4, (1, 2), 3]], columns=["a", "b", "c"]
).set_index(["a", "b"])
idx = MultiIndex.from_tuples([(2, 1), (4, (1, 2))], names=("a", "b"))
result = pd.DataFrame([2, 3], columns=["c"], index=idx)
tm.assert_frame_equal(expected, result)
# ----------------------------------------------------------------------------
# from_product
# ----------------------------------------------------------------------------
def test_from_product_empty_zero_levels():
# 0 levels
msg = "Must pass non-zero number of levels/codes"
with pytest.raises(ValueError, match=msg):
MultiIndex.from_product([])
def test_from_product_empty_one_level():
result = MultiIndex.from_product([[]], names=["A"])
expected = Index([], name="A")
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ["A"]
@pytest.mark.parametrize(
"first, second", [([], []), (["foo", "bar", "baz"], []), ([], ["a", "b", "c"])]
)
def test_from_product_empty_two_levels(first, second):
names = ["A", "B"]
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second], codes=[[], []], names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("N", list(range(4)))
def test_from_product_empty_three_levels(N):
# GH12258
names = ["A", "B", "C"]
lvl2 = list(range(N))
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []], codes=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"invalid_input", [1, [1], [1, 2], [[1], 2], "a", ["a"], ["a", "b"], [["a"], "b"]]
)
def test_from_product_invalid_input(invalid_input):
msg = r"Input must be a list / sequence of iterables|Input must be list-like"
with pytest.raises(TypeError, match=msg):
MultiIndex.from_product(iterables=invalid_input)
def test_from_product_datetimeindex():
dt_index = date_range("2000-01-01", periods=2)
mi = MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike(
[
(1, Timestamp("2000-01-01")),
(1, Timestamp("2000-01-02")),
(2, Timestamp("2000-01-01")),
(2, Timestamp("2000-01-02")),
]
)
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_rangeindex():
# RangeIndex is preserved by factorize, so preserved in levels
rng = Index(range(5))
other = ["a", "b"]
mi = MultiIndex.from_product([rng, other])
tm.assert_index_equal(mi._levels[0], rng, exact=True)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize("f", [lambda x: x, lambda x: Series(x), lambda x: x.values])
def test_from_product_index_series_categorical(ordered, f):
# GH13743
first = ["foo", "bar"]
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=ordered)
expected = pd.CategoricalIndex(
list("abcaab") + list("abcaab"), categories=list("bac"), ordered=ordered
)
result = MultiIndex.from_product([first, f(idx)])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_from_product():
first = ["foo", "bar", "buz"]
second = ["a", "b", "c"]
names = ["first", "second"]
result = MultiIndex.from_product([first, second], names=names)
tuples = [
("foo", "a"),
("foo", "b"),
("foo", "c"),
("bar", "a"),
("bar", "b"),
("bar", "c"),
("buz", "a"),
("buz", "b"),
("buz", "c"),
]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator():
# GH 18434
first = ["foo", "bar", "buz"]
second = ["a", "b", "c"]
names = ["first", "second"]
tuples = [
("foo", "a"),
("foo", "b"),
("foo", "c"),
("bar", "a"),
("bar", "b"),
("bar", "c"),
("buz", "a"),
("buz", "b"),
("buz", "c"),
]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
msg = "Input must be a list / sequence of iterables."
with pytest.raises(TypeError, match=msg):
MultiIndex.from_product(0)
@pytest.mark.parametrize(
"a, b, expected_names",
[
(
Series([1, 2, 3], name="foo"),
Series(["a", "b"], name="bar"),
["foo", "bar"],
),
(Series([1, 2, 3], name="foo"), ["a", "b"], ["foo", None]),
([1, 2, 3], ["a", "b"], None),
],
)
def test_from_product_infer_names(a, b, expected_names):
# GH27292
result = MultiIndex.from_product([a, b])
expected = MultiIndex(
levels=[[1, 2, 3], ["a", "b"]],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=expected_names,
)
tm.assert_index_equal(result, expected)
def test_from_product_respects_none_names():
# GH27292
a = Series([1, 2, 3], name="foo")
b = Series(["a", "b"], name="bar")
result = MultiIndex.from_product([a, b], names=None)
expected = MultiIndex(
levels=[[1, 2, 3], ["a", "b"]],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=None,
)
tm.assert_index_equal(result, expected)
def test_from_product_readonly():
# GH#15286 passing read-only array to from_product
a = np.array(range(3))
b = ["a", "b"]
expected = MultiIndex.from_product([a, b])
a.setflags(write=False)
result = MultiIndex.from_product([a, b])
tm.assert_index_equal(result, expected)
def test_create_index_existing_name(idx):
# GH11193, when an existing index is passed, and a new name is not
# specified, the new index should inherit the previous object name
index = idx
index.names = ["foo", "bar"]
result = Index(index)
expected = Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
)
)
tm.assert_index_equal(result, expected)
result = Index(index, name="A")
expected = Index(
Index(
[
("foo", "one"),
("foo", "two"),
("bar", "one"),
("baz", "two"),
("qux", "one"),
("qux", "two"),
],
dtype="object",
),
name="A",
)
tm.assert_index_equal(result, expected)
# ----------------------------------------------------------------------------
# from_frame
# ----------------------------------------------------------------------------
def test_from_frame():
# GH 22420
df = pd.DataFrame(
[["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]], columns=["L1", "L2"]
)
expected = MultiIndex.from_tuples(
[("a", "a"), ("a", "b"), ("b", "a"), ("b", "b")], names=["L1", "L2"]
)
result = MultiIndex.from_frame(df)
tm.assert_index_equal(expected, result)
@pytest.mark.parametrize(
"non_frame",
[
Series([1, 2, 3, 4]),
[1, 2, 3, 4],
[[1, 2], [3, 4], [5, 6]],
Index([1, 2, 3, 4]),
np.array([[1, 2], [3, 4], [5, 6]]),
27,
],
)
def test_from_frame_error(non_frame):
# GH 22420
with pytest.raises(TypeError, match="Input must be a DataFrame"):
MultiIndex.from_frame(non_frame)
def test_from_frame_dtype_fidelity():
# GH 22420
df = pd.DataFrame(
{
"dates": date_range("19910905", periods=6, tz="US/Eastern"),
"a": [1, 1, 1, 2, 2, 2],
"b": pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True),
"c": ["x", "x", "y", "z", "x", "y"],
}
)
original_dtypes = df.dtypes.to_dict()
expected_mi = MultiIndex.from_arrays(
[
date_range("19910905", periods=6, tz="US/Eastern"),
[1, 1, 1, 2, 2, 2],
pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True),
["x", "x", "y", "z", "x", "y"],
],
names=["dates", "a", "b", "c"],
)
mi = MultiIndex.from_frame(df)
mi_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)}
tm.assert_index_equal(expected_mi, mi)
assert original_dtypes == mi_dtypes
@pytest.mark.parametrize(
"names_in,names_out", [(None, [("L1", "x"), ("L2", "y")]), (["x", "y"], ["x", "y"])]
)
def test_from_frame_valid_names(names_in, names_out):
# GH 22420
df = pd.DataFrame(
[["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]],
columns=MultiIndex.from_tuples([("L1", "x"), ("L2", "y")]),
)
mi = MultiIndex.from_frame(df, names=names_in)
assert mi.names == names_out
@pytest.mark.parametrize(
"names,expected_error_msg",
[
("bad_input", "Names should be list-like for a MultiIndex"),
(["a", "b", "c"], "Length of names must match number of levels in MultiIndex"),
],
)
def test_from_frame_invalid_names(names, expected_error_msg):
# GH 22420
df = pd.DataFrame(
[["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]],
columns=MultiIndex.from_tuples([("L1", "x"), ("L2", "y")]),
)
with pytest.raises(ValueError, match=expected_error_msg):
MultiIndex.from_frame(df, names=names)
def test_index_equal_empty_iterable():
# #16844
a = MultiIndex(levels=[[], []], codes=[[], []], names=["a", "b"])
b = MultiIndex.from_arrays(arrays=[[], []], names=["a", "b"])
tm.assert_index_equal(a, b)
def test_raise_invalid_sortorder():
# Test that the MultiIndex constructor raise when a incorrect sortorder is given
# GH#28518
levels = [[0, 1], [0, 1, 2]]
# Correct sortorder
MultiIndex(
levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], sortorder=2
)
with pytest.raises(ValueError, match=r".* sortorder 2 with lexsort_depth 1.*"):
MultiIndex(
levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], sortorder=2
)
with pytest.raises(ValueError, match=r".* sortorder 1 with lexsort_depth 0.*"):
MultiIndex(
levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], sortorder=1
)
def test_datetimeindex():
idx1 = pd.DatetimeIndex(
["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"] * 2, tz="Asia/Tokyo"
)
idx2 = date_range("2010/01/01", periods=6, freq="M", tz="US/Eastern")
idx = MultiIndex.from_arrays([idx1, idx2])
expected1 = pd.DatetimeIndex(
["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"], tz="Asia/Tokyo"
)
tm.assert_index_equal(idx.levels[0], expected1)
tm.assert_index_equal(idx.levels[1], idx2)
# from datetime combos
# GH 7888
date1 = np.datetime64("today")
date2 = datetime.today()
date3 = Timestamp.today()
for d1, d2 in itertools.product([date1, date2, date3], [date1, date2, date3]):
index = MultiIndex.from_product([[d1], [d2]])
assert isinstance(index.levels[0], pd.DatetimeIndex)
assert isinstance(index.levels[1], pd.DatetimeIndex)
# but NOT date objects, matching Index behavior
date4 = date.today()
index = MultiIndex.from_product([[date4], [date2]])
assert not isinstance(index.levels[0], pd.DatetimeIndex)
assert isinstance(index.levels[1], pd.DatetimeIndex)
def test_constructor_with_tz():
index = pd.DatetimeIndex(
["2013/01/01 09:00", "2013/01/02 09:00"], name="dt1", tz="US/Pacific"
)
columns = pd.DatetimeIndex(
["2014/01/01 09:00", "2014/01/02 09:00"], name="dt2", tz="Asia/Tokyo"
)
result = MultiIndex.from_arrays([index, columns])
assert result.names == ["dt1", "dt2"]
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
result = MultiIndex.from_arrays([Series(index), Series(columns)])
assert result.names == ["dt1", "dt2"]
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
def test_multiindex_inference_consistency():
# check that inference behavior matches the base class
v = date.today()
arr = [v, v]
idx = Index(arr)
assert idx.dtype == object
mi = MultiIndex.from_arrays([arr])
lev = mi.levels[0]
assert lev.dtype == object
mi = MultiIndex.from_product([arr])
lev = mi.levels[0]
assert lev.dtype == object
mi = MultiIndex.from_tuples([(x,) for x in arr])
lev = mi.levels[0]
assert lev.dtype == object
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Identity service."""
from keystone.common import dependency
from keystone.common import logging
from keystone.common import manager
from keystone import config
from keystone import exception
CONF = config.CONF
LOG = logging.getLogger(__name__)
def filter_user(user_ref):
"""Filter out private items in a user dict.
'password', 'tenants' and 'groups' are never returned.
:returns: user_ref
"""
if user_ref:
user_ref = user_ref.copy()
user_ref.pop('password', None)
user_ref.pop('tenants', None)
user_ref.pop('groups', None)
user_ref.pop('domains', None)
try:
user_ref['extra'].pop('password', None)
user_ref['extra'].pop('tenants', None)
except KeyError:
pass
return user_ref
@dependency.provider('identity_api')
class Manager(manager.Manager):
"""Default pivot point for the Identity backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
def __init__(self):
super(Manager, self).__init__(CONF.identity.driver)
def authenticate(self, context, user_id=None,
tenant_id=None, password=None):
"""Authenticate a given user and password and
authorize them for a tenant.
:returns: (user_ref, tenant_ref, metadata_ref)
:raises: AssertionError
"""
user_ref = self.driver.authenticate_user(user_id, password)
return self.driver.authorize_for_project(user_ref, tenant_id)
def create_user(self, context, user_id, user_ref):
user = user_ref.copy()
if 'enabled' not in user:
user['enabled'] = True
return self.driver.create_user(user_id, user)
def create_group(self, context, group_id, group_ref):
group = group_ref.copy()
if 'description' not in group:
group['description'] = ''
return self.driver.create_group(group_id, group)
def create_project(self, context, tenant_id, tenant_ref):
tenant = tenant_ref.copy()
if 'enabled' not in tenant:
tenant['enabled'] = True
if 'description' not in tenant:
tenant['description'] = ''
return self.driver.create_project(tenant_id, tenant)
class Driver(object):
"""Interface description for an Identity driver."""
def authenticate_user(self, user_id, password):
"""Authenticate a given user and password.
:returns: user_ref
:raises: AssertionError
"""
raise exception.NotImplemented()
def authorize_for_project(self, tenant_id, user_ref):
"""Authenticate a given user for a tenant.
:returns: (user_ref, tenant_ref, metadata_ref)
:raises: AssertionError
"""
raise exception.NotImplemented()
def get_project_by_name(self, tenant_name, domain_id):
"""Get a tenant by name.
:returns: tenant_ref
:raises: keystone.exception.ProjectNotFound
"""
raise exception.NotImplemented()
def get_user_by_name(self, user_name, domain_id):
"""Get a user by name.
:returns: user_ref
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented()
def add_user_to_project(self, tenant_id, user_id):
"""Add user to a tenant by creating a default role relationship.
:raises: keystone.exception.ProjectNotFound,
keystone.exception.UserNotFound
"""
self.add_role_to_user_and_project(user_id,
tenant_id,
config.CONF.member_role_id)
def remove_user_from_project(self, tenant_id, user_id):
"""Remove user from a tenant
:raises: keystone.exception.ProjectNotFound,
keystone.exception.UserNotFound
"""
roles = self.get_roles_for_user_and_project(user_id, tenant_id)
if not roles:
raise exception.NotFound(tenant_id)
for role_id in roles:
self.remove_role_from_user_and_project(user_id, tenant_id, role_id)
def get_project_users(self, tenant_id):
"""Lists all users with a relationship to the specified project.
:returns: a list of user_refs or an empty set.
:raises: keystone.exception.ProjectNotFound
"""
raise exception.NotImplemented()
def get_projects_for_user(self, user_id):
"""Get the tenants associated with a given user.
:returns: a list of tenant_id's.
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented()
def get_roles_for_user_and_project(self, user_id, tenant_id):
"""Get the roles associated with a user within given tenant.
:returns: a list of role ids.
:raises: keystone.exception.UserNotFound,
keystone.exception.ProjectNotFound
"""
raise exception.NotImplemented()
def get_roles_for_user_and_domain(self, user_id, domain_id):
"""Get the roles associated with a user within given domain.
:returns: a list of role ids.
:raises: keystone.exception.UserNotFound,
keystone.exception.ProjectNotFound
"""
def update_metadata_for_group_domain_roles(self, metadata_ref,
user_id, domain_id):
group_refs = self.list_groups_for_user(user_id=user_id)
for x in group_refs:
try:
metadata_ref.update(
self.get_metadata(group_id=x['id'],
domain_id=domain_id))
except exception.MetadataNotFound:
# no group grant, skip
pass
def update_metadata_for_user_domain_roles(self, metadata_ref,
user_id, domain_id):
try:
metadata_ref.update(self.get_metadata(user_id=user_id,
domain_id=domain_id))
except exception.MetadataNotFound:
pass
self.get_user(user_id)
self.get_domain(domain_id)
metadata_ref = {}
update_metadata_for_user_domain_roles(self, metadata_ref,
user_id, domain_id)
update_metadata_for_group_domain_roles(self, metadata_ref,
user_id, domain_id)
return list(set(metadata_ref.get('roles', [])))
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
"""Add a role to a user within given tenant.
:raises: keystone.exception.UserNotFound,
keystone.exception.ProjectNotFound,
keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
"""Remove a role from a user within given tenant.
:raises: keystone.exception.UserNotFound,
keystone.exception.ProjectNotFound,
keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
# metadata crud
def get_metadata(self, user_id=None, tenant_id=None,
domain_id=None, group_id=None):
"""Gets the metadata for the specified user/group on project/domain.
:raises: keystone.exception.MetadataNotFound
:returns: metadata
"""
raise exception.NotImplemented()
def create_metadata(self, user_id, tenant_id, metadata,
domain_id=None, group_id=None):
"""Creates the metadata for the specified user/group on project/domain.
:returns: metadata created
"""
raise exception.NotImplemented()
def update_metadata(self, user_id, tenant_id, metadata,
domain_id=None, group_id=None):
"""Updates the metadata for the specified user/group on project/domain.
:returns: metadata updated
"""
raise exception.NotImplemented()
# domain crud
def create_domain(self, domain_id, domain):
"""Creates a new domain.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
def list_domains(self):
"""List all domains in the system.
:returns: a list of domain_refs or an empty list.
"""
raise exception.NotImplemented()
def get_domain(self, domain_id):
"""Get a domain by ID.
:returns: domain_ref
:raises: keystone.exception.DomainNotFound
"""
raise exception.NotImplemented()
def get_domain_by_name(self, domain_name):
"""Get a domain by name.
:returns: domain_ref
:raises: keystone.exception.DomainNotFound
"""
raise exception.NotImplemented()
def update_domain(self, domain_id, domain):
"""Updates an existing domain.
:raises: keystone.exception.DomainNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented()
def delete_domain(self, domain_id):
"""Deletes an existing domain.
:raises: keystone.exception.DomainNotFound
"""
raise exception.NotImplemented()
# project crud
def create_project(self, project_id, project):
"""Creates a new project.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
def list_projects(self):
"""List all projects in the system.
:returns: a list of project_refs or an empty list.
"""
raise exception.NotImplemented()
def list_user_projects(self, user_id):
"""List all projects associated with a given user.
:returns: a list of project_refs or an empty list.
"""
raise exception.NotImplemented()
def get_project(self, project_id):
"""Get a project by ID.
:returns: project_ref
:raises: keystone.exception.ProjectNotFound
"""
raise exception.NotImplemented()
def update_project(self, project_id, project):
"""Updates an existing project.
:raises: keystone.exception.ProjectNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented()
def delete_project(self, project_id):
"""Deletes an existing project.
:raises: keystone.exception.ProjectNotFound
"""
raise exception.NotImplemented()
# user crud
def create_user(self, user_id, user):
"""Creates a new user.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
def list_users(self):
"""List all users in the system.
:returns: a list of user_refs or an empty list.
"""
raise exception.NotImplemented()
def list_users_in_group(self, group_id):
"""List all users in a group.
:returns: a list of user_refs or an empty list.
"""
raise exception.NotImplemented()
def get_user(self, user_id):
"""Get a user by ID.
:returns: user_ref
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented()
def update_user(self, user_id, user):
"""Updates an existing user.
:raises: keystone.exception.UserNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented()
def add_user_to_group(self, user_id, group_id):
"""Adds a user to a group.
:raises: keystone.exception.UserNotFound,
keystone.exception.GroupNotFound
"""
raise exception.NotImplemented()
def check_user_in_group(self, user_id, group_id):
"""Checks if a user is a member of a group.
:raises: keystone.exception.UserNotFound,
keystone.exception.GroupNotFound
"""
raise exception.NotImplemented()
def remove_user_from_group(self, user_id, group_id):
"""Removes a user from a group.
:raises: keystone.exception.NotFound
"""
raise exception.NotImplemented()
def delete_user(self, user_id):
"""Deletes an existing user.
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented()
# role crud
def create_role(self, role_id, role):
"""Creates a new role.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
def list_roles(self):
"""List all roles in the system.
:returns: a list of role_refs or an empty list.
"""
raise exception.NotImplemented()
def get_role(self, role_id):
"""Get a role by ID.
:returns: role_ref
:raises: keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
def update_role(self, role_id, role):
"""Updates an existing role.
:raises: keystone.exception.RoleNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented()
def delete_role(self, role_id):
"""Deletes an existing role.
:raises: keystone.exception.RoleNotFound
"""
raise exception.NotImplemented()
# group crud
def create_group(self, group_id, group):
"""Creates a new group.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
def list_groups(self):
"""List all groups in the system.
:returns: a list of group_refs or an empty list.
"""
raise exception.NotImplemented()
def list_groups_for_user(self, user_id):
"""List all groups a user is in
:returns: a list of group_refs or an empty list.
"""
raise exception.NotImplemented()
def get_group(self, group_id):
"""Get a group by ID.
:returns: group_ref
:raises: keystone.exception.GroupNotFound
"""
raise exception.NotImplemented()
def update_group(self, group_id, group):
"""Updates an existing group.
:raises: keystone.exceptionGroupNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented()
def delete_group(self, group_id):
"""Deletes an existing group.
:raises: keystone.exception.GroupNotFound
"""
raise exception.NotImplemented()
| |
# TrumpScript Tokenizer
# 1/16/2016
import os
import random
import re
import sys
from src.trumpscript.allowed_words import ALLOWED
from src.trumpscript.constants import *
from src.trumpscript.disallowed_words import DISALLOWED
class Tokenizer:
@staticmethod
def toke(token_type, token_value, line) -> dict:
"""
Create a mapping for the given token
:param token_type: the type of the token
:param token_value: The token's value
:param line: The line number for this token
:return: A mapping of the properties to their values
"""
return {"type": token_type, "value": token_value, "line": line}
@staticmethod
def tokenize(filename):
"""
Tokenize the given file
:param filename:
:return: The tokens in the file
"""
Tokenizer._no_pc()
tokens = Tokenizer._first_pass(filename)
tokens = Tokenizer._second_pass(tokens)
return tokens
@staticmethod
def _no_pc() -> None:
"""
Make sure the currently-running OS is not Windows
:return:
"""
if os.name == 'nt':
Tokenizer._error(0, 'os')
@staticmethod
def _first_pass(filename) -> list:
"""
Tokenize the given file
:param filename: the file to tokenize
:return: The tokens in the file
"""
end_word = re.compile("[:!,;\.\s\?]")
with open(filename, 'r') as src:
data = src.read().lower()
tokens = []
line = 1
i = 0
while i < len(data):
c = data[i]
# Spaces, newlines, and periods
if c.isspace() or c == ".":
if c == "\n":
line += 1
pass
# Operators (special symbol form) and punctuation
elif c == "+":
tokens.append(Tokenizer.toke(T_Plus, None, line))
elif c == "-":
tokens.append(Tokenizer.toke(T_Minus, None, line))
elif c == "*":
tokens.append(Tokenizer.toke(T_Times, None, line))
elif c == "/":
tokens.append(Tokenizer.toke(T_Over, None, line))
elif c == "<":
tokens.append(Tokenizer.toke(T_Less, None, line))
elif c == ">":
tokens.append(Tokenizer.toke(T_Greater, None, line))
# Closures and precedence
elif c == ",":
tokens.append(Tokenizer.toke(T_LParen, None, line))
elif c == ";":
tokens.append(Tokenizer.toke(T_RParen, None, line))
elif c == ":":
tokens.append(Tokenizer.toke(T_LBrace, None, line))
elif c == "!":
tokens.append(Tokenizer.toke(T_RBrace, None, line))
# Don't forget question marks
elif c == "?":
tokens.append(Tokenizer.toke(T_Question, None, line))
# Integers (no floating point)
elif c.isdigit():
num = ""
while data[i].isdigit():
num += data[i]
i += 1
else:
tokens.append(Tokenizer.toke(T_Num, int(num), line))
i -= 1 # Read one char too many, readjust.
# Words and keywords
elif c.isalpha():
word = ""
while i < len(data) and (data[i].isalpha() or data[i] == "'"):
word += data[i]
i += 1
if i < len(data) and not end_word.match(data[i]):
Tokenizer._error(line, 'nonword')
i -= 1 # Read one char too many, readjust.
# Keywords
if word == "is" or word == "are":
tokens.append(Tokenizer.toke(T_Is, None, line))
elif word == "if":
tokens.append(Tokenizer.toke(T_If, None, line))
elif word == "else" or word == "otherwise":
tokens.append(Tokenizer.toke(T_Else, None, line))
elif word == "true" or word == "facts" or word == "truth" or word == "fact":
tokens.append(Tokenizer.toke(T_True, None, line))
elif word == "false" or word == "lies" or word == "nonsense" or word == "lie":
tokens.append(Tokenizer.toke(T_False, None, line))
elif word == "not":
tokens.append(Tokenizer.toke(T_Not, None, line))
elif word == "and":
tokens.append(Tokenizer.toke(T_And, None, line))
elif word == "or":
tokens.append(Tokenizer.toke(T_Or, None, line))
elif word == "make":
tokens.append(Tokenizer.toke(T_Make, None, line))
elif word == "tell" or word == "say":
tokens.append(Tokenizer.toke(T_Print, None, line))
# English form of the operators
elif word == "plus":
tokens.append(Tokenizer.toke(T_Plus, None, line))
elif word == "minus":
tokens.append(Tokenizer.toke(T_Minus, None, line))
elif word == "times":
tokens.append(Tokenizer.toke(T_Times, None, line))
elif word == "over":
tokens.append(Tokenizer.toke(T_Over, None, line))
elif word == "less" or word == "fewer" or word == "smaller":
tokens.append(Tokenizer.toke(T_Less, None, line))
elif word == "more" or word == "greater" or word == "larger":
tokens.append(Tokenizer.toke(T_Greater, None, line))
# Otherwise, it's just a word, interpreting is the lexer's job
else:
tokens.append(Tokenizer.toke(T_Word, word, line))
# Strings
elif c == '"':
i += 1
quote = ""
while data[i] != '"':
quote += data[i]
i += 1
if i >= len(data):
Tokenizer._error(line, 'unterminated_quote')
pass
tokens.append(Tokenizer.toke(T_Quote, quote, line))
else:
pass
Tokenizer._error(line, 'nonword')
i += 1
return tokens
@staticmethod
def _second_pass(tokens):
"""
Makes the second pass for tokenization purposes
:param tokens: The tokens on which we're taking a second pass
:return: The tokens after the second pass
"""
# Make sure we do "America is great"
if not Tokenizer._check_for_freedom(tokens):
Tokenizer._error(tokens[-1]['line'], 'freedom')
# Convert "as long as" to while
tokens = Tokenizer._combine_whiles(tokens)
# Ensure words are English
Tokenizer._ensure_freedom(tokens)
# Ensure all numbers are greater than 1 million, and that 4.5B is converted to 10B
Tokenizer._fudge_the_numbers(tokens)
return tokens
@staticmethod
def _fudge_the_numbers(tokens) -> None:
"""
Make sure all numbers have values in excess of 1M, and convert 4.5B to 10B if we encounter it
:param tokens: The tokens to enforce these rules on
:return: None, throws an error is rules are violated. Also mutates tokens in-place
"""
million = 10 ** 6
forbes_worth = 4.5 * 10 ** 9
real_worth = 10 * 10 ** 9
for token in tokens:
if token['type'] == T_Num:
value = token['value']
if value < million:
Tokenizer._error(token['line'], 'too_small')
if value == forbes_worth:
token['value'] = real_worth
@staticmethod
def _is_word_allowed(word) -> bool:
"""
Check to see if a given word is allowed
:param word: Word to check and see if it's allowed
:return: true if the word is valid, false otherwise
"""
# First, make sure we haven't explicitly banned the word
if word in DISALLOWED:
return False
# Now see if it's simple English, or some variation on huuuuge
if word in ALLOWED or re.match('^[Hh][Uu]+[Gg][Ee]$', word) is not None:
return True
else:
return False
@staticmethod
def _ensure_freedom(tokens) -> None:
"""
Make sure all the variables are in our corpus of allowed words
:param tokens: the tokens to filter
:return: None, throws error upon infraction of rule
"""
for token in tokens:
if token['type'] == T_Word and not Tokenizer._is_word_allowed(token['value']):
print(token['value'] + "?")
Tokenizer._error(token['line'], 'nonword')
@staticmethod
def _combine_whiles(tokens) -> list:
"""
Combine the words "as long as" to make a while token
:param tokens: The tokens to combine on
:return: The tokens with
"""
combine_at = []
for idx in range(len(tokens)):
if tokens[idx]['type'] == T_Word and tokens[idx]['value'] == 'as' and idx + 2 < len(tokens):
if (tokens[idx + 1]['type'] == T_Word and tokens[idx + 1]['value'] == 'long') and (
tokens[idx + 2]['type'] == T_Word and tokens[idx + 2]['value'] == 'as'):
combine_at.append(idx)
# Cover the degenerate case like "as long as long as"
non_overlapping = []
for value in combine_at:
if value - 2 not in non_overlapping:
non_overlapping.append(value)
# Now combine the tokens and return
for idx in reversed(non_overlapping):
line = tokens[idx]['line']
for dummy in range(3):
tokens.pop(idx)
tokens.insert(idx, Tokenizer.toke(T_While, None, line))
return tokens
@staticmethod
def _check_for_freedom(tokens) -> bool:
"""
Make sure that in the tokens passed, the last three are tokens representing the phrase "America is great"
:param tokens: The tokens to verify
:return: True if the check holds, false otherwise
"""
last_three = tokens[-3:]
if len(last_three) != 3:
return False
# Tokens for "America is great"
expected = [Tokenizer.toke(T_Word, 'america', 0),
Tokenizer.toke(T_Is, None, 0),
Tokenizer.toke(T_Word, 'great', 0)]
# Make sure our types and values match each of the expected
for idx in range(3):
if expected[idx]['type'] != last_three[idx]['type'] or expected[idx]['value'] != last_three[idx]['value']:
return False
for idx in range(3):
tokens.pop()
return True
@staticmethod
def _error(line, message_code) -> None:
"""
Prints the error message and then aborts the program
:param line: The line the error occurred on
:param message_code: String code associated with the error message
:return: None
"""
print("Parsing error:")
print("What are you doing on line " + str(line) + "?")
if message_code in ERROR_CODES:
print(random.choice(ERROR_CODES[message_code]))
else:
print(random.choice(ERROR_CODES['default']))
sys.exit(2)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModelCLI tool.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import pickle
import shutil
import sys
from absl.testing import parameterized
import numpy as np
from six import StringIO
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import save
from tensorflow.python.tools import saved_model_cli
from tensorflow.python.training.tracking import tracking
SAVED_MODEL_PATH = ('cc/saved_model/testdata/half_plus_two/00000123')
@contextlib.contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class SavedModelCLITestCase(test.TestCase, parameterized.TestCase):
def testShowCommandAll(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(['show', '--dir', base_path, '--all'])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
# pylint: disable=line-too-long
exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['classify_x2_to_y3']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x2:0
The given SavedModel SignatureDef contains the following output(s):
outputs['scores'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y3:0
Method name is: tensorflow/serving/classify
signature_def['classify_x_to_y']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['scores'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/classify
signature_def['regress_x2_to_y3']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x2:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y3:0
Method name is: tensorflow/serving/regress
signature_def['regress_x_to_y']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/regress
signature_def['regress_x_to_y2']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y2:0
Method name is: tensorflow/serving/regress
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['x'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x:0
The given SavedModel SignatureDef contains the following output(s):
outputs['y'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/predict"""
# pylint: enable=line-too-long
self.maxDiff = None # Produce a useful error msg if the comparison fails
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowAllWithFunctions(self):
class DummyModel(tracking.AutoTrackable):
"""Model with callable polymorphic functions specified."""
@def_function.function
def func1(self, a, b, c):
if c:
return a + b
else:
return a * b
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32)
])
def func2(self, x):
return x + 2
@def_function.function
def __call__(self, y, c=7):
return y + 2 * c
saved_model_dir = os.path.join(test.get_temp_dir(), 'dummy_model')
dummy_model = DummyModel()
# Call with specific values to create new polymorphic function traces.
dummy_model.func1(
constant_op.constant(5), constant_op.constant(9), True)
dummy_model(constant_op.constant(5))
save.save(dummy_model, saved_model_dir)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(['show', '--dir', saved_model_dir, '--all'])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['__saved_model_init_op']:
The given SavedModel SignatureDef contains the following input(s):
The given SavedModel SignatureDef contains the following output(s):
outputs['__saved_model_init_op'] tensor_info:
dtype: DT_INVALID
shape: unknown_rank
name: NoOp
Method name is:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['x'] tensor_info:
dtype: DT_FLOAT
shape: (2, 2)
name: serving_default_x:0
The given SavedModel SignatureDef contains the following output(s):
outputs['output_0'] tensor_info:
dtype: DT_FLOAT
shape: (2, 2)
name: PartitionedCall:0
Method name is: tensorflow/serving/predict
Defined Functions:
Function Name: '__call__'
Option #1
Callable with:
Argument #1
y: TensorSpec(shape=(), dtype=tf.int32, name='y')
Argument #2
DType: int
Value: 7
Function Name: 'func1'
Option #1
Callable with:
Argument #1
a: TensorSpec(shape=(), dtype=tf.int32, name='a')
Argument #2
b: TensorSpec(shape=(), dtype=tf.int32, name='b')
Argument #3
DType: bool
Value: True
Function Name: 'func2'
Option #1
Callable with:
Argument #1
x: TensorSpec(shape=(2, 2), dtype=tf.float32, name='x')
""".strip() # pylint: enable=line-too-long
self.maxDiff = None # Produce a useful error msg if the comparison fails
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowAllWithPureConcreteFunction(self):
class DummyModel(tracking.AutoTrackable):
"""Model with a callable concrete function."""
def __init__(self):
function = def_function.function(
self.multiply,
input_signature=[
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)
])
self.pure_concrete_function = function.get_concrete_function()
super(DummyModel, self).__init__()
def multiply(self, a, b):
return a * b
saved_model_dir = os.path.join(test.get_temp_dir(), 'dummy_model')
dummy_model = DummyModel()
save.save(dummy_model, saved_model_dir)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(['show', '--dir', saved_model_dir, '--all'])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['__saved_model_init_op']:
The given SavedModel SignatureDef contains the following input(s):
The given SavedModel SignatureDef contains the following output(s):
outputs['__saved_model_init_op'] tensor_info:
dtype: DT_INVALID
shape: unknown_rank
name: NoOp
Method name is:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['a'] tensor_info:
dtype: DT_FLOAT
shape: ()
name: serving_default_a:0
inputs['b'] tensor_info:
dtype: DT_FLOAT
shape: ()
name: serving_default_b:0
The given SavedModel SignatureDef contains the following output(s):
outputs['output_0'] tensor_info:
dtype: DT_FLOAT
shape: ()
name: PartitionedCall:0
Method name is: tensorflow/serving/predict
Defined Functions:
Function Name: 'pure_concrete_function'
Option #1
Callable with:
Argument #1
a: TensorSpec(shape=(), dtype=tf.float32, name='a')
Argument #2
b: TensorSpec(shape=(), dtype=tf.float32, name='b')
""".strip() # pylint: enable=line-too-long
self.maxDiff = None # Produce a useful error msg if the comparison fails
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandTags(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(['show', '--dir', base_path])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
exp_out = 'The given SavedModel contains the following tag-sets:\n\'serve\''
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandSignature(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(
['show', '--dir', base_path, '--tag_set', 'serve'])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
exp_header = ('The given SavedModel MetaGraphDef contains SignatureDefs '
'with the following keys:')
exp_start = 'SignatureDef key: '
exp_keys = [
'"classify_x2_to_y3"', '"classify_x_to_y"', '"regress_x2_to_y3"',
'"regress_x_to_y"', '"regress_x_to_y2"', '"serving_default"'
]
# Order of signatures does not matter
self.assertMultiLineEqual(
output,
'\n'.join([exp_header] + [exp_start + exp_key for exp_key in exp_keys]))
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandErrorNoTagSet(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(
['show', '--dir', base_path, '--tag_set', 'badtagset'])
with self.assertRaises(RuntimeError):
saved_model_cli.show(args)
def testShowCommandInputsOutputs(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args([
'show', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'serving_default'
])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
expected_output = (
'The given SavedModel SignatureDef contains the following input(s):\n'
' inputs[\'x\'] tensor_info:\n'
' dtype: DT_FLOAT\n shape: (-1, 1)\n name: x:0\n'
'The given SavedModel SignatureDef contains the following output(s):\n'
' outputs[\'y\'] tensor_info:\n'
' dtype: DT_FLOAT\n shape: (-1, 1)\n name: y:0\n'
'Method name is: tensorflow/serving/predict')
self.assertEqual(output, expected_output)
self.assertEqual(err.getvalue().strip(), '')
def testPrintREFTypeTensor(self):
ref_tensor_info = meta_graph_pb2.TensorInfo()
ref_tensor_info.dtype = types_pb2.DT_FLOAT_REF
with captured_output() as (out, err):
saved_model_cli._print_tensor_info(ref_tensor_info)
self.assertTrue('DT_FLOAT_REF' in out.getvalue().strip())
self.assertEqual(err.getvalue().strip(), '')
def testInputPreProcessFormats(self):
input_str = 'input1=/path/file.txt[ab3];input2=file2'
input_expr_str = 'input3=np.zeros([2,2]);input4=[4,5]'
input_dict = saved_model_cli.preprocess_inputs_arg_string(input_str)
input_expr_dict = saved_model_cli.preprocess_input_exprs_arg_string(
input_expr_str)
self.assertTrue(input_dict['input1'] == ('/path/file.txt', 'ab3'))
self.assertTrue(input_dict['input2'] == ('file2', None))
print(input_expr_dict['input3'])
self.assertAllClose(input_expr_dict['input3'], np.zeros([2, 2]))
self.assertAllClose(input_expr_dict['input4'], [4, 5])
self.assertTrue(len(input_dict) == 2)
self.assertTrue(len(input_expr_dict) == 2)
def testInputPreProcessFileNames(self):
input_str = (r'inputx=C:\Program Files\data.npz[v:0];'
r'input:0=c:\PROGRA~1\data.npy')
input_dict = saved_model_cli.preprocess_inputs_arg_string(input_str)
self.assertTrue(input_dict['inputx'] == (r'C:\Program Files\data.npz',
'v:0'))
self.assertTrue(input_dict['input:0'] == (r'c:\PROGRA~1\data.npy', None))
def testInputPreProcessErrorBadFormat(self):
input_str = 'inputx=file[[v1]v2'
with self.assertRaises(RuntimeError):
saved_model_cli.preprocess_inputs_arg_string(input_str)
input_str = 'inputx:file'
with self.assertRaises(RuntimeError):
saved_model_cli.preprocess_inputs_arg_string(input_str)
input_str = 'inputx:np.zeros((5))'
with self.assertRaises(RuntimeError):
saved_model_cli.preprocess_input_exprs_arg_string(input_str)
def testInputParserNPY(self):
x0 = np.array([[1], [2]])
x1 = np.array(range(6)).reshape(2, 3)
input0_path = os.path.join(test.get_temp_dir(), 'input0.npy')
input1_path = os.path.join(test.get_temp_dir(), 'input1.npy')
np.save(input0_path, x0)
np.save(input1_path, x1)
input_str = 'x0=' + input0_path + '[x0];x1=' + input1_path
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
input_str, '', '')
self.assertTrue(np.all(feed_dict['x0'] == x0))
self.assertTrue(np.all(feed_dict['x1'] == x1))
def testInputParserNPZ(self):
x0 = np.array([[1], [2]])
input_path = os.path.join(test.get_temp_dir(), 'input.npz')
np.savez(input_path, a=x0)
input_str = 'x=' + input_path + '[a];y=' + input_path
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
input_str, '', '')
self.assertTrue(np.all(feed_dict['x'] == x0))
self.assertTrue(np.all(feed_dict['y'] == x0))
def testInputParserPickle(self):
pkl0 = {'a': 5, 'b': np.array(range(4))}
pkl1 = np.array([1])
pkl2 = np.array([[1], [3]])
input_path0 = os.path.join(test.get_temp_dir(), 'pickle0.pkl')
input_path1 = os.path.join(test.get_temp_dir(), 'pickle1.pkl')
input_path2 = os.path.join(test.get_temp_dir(), 'pickle2.pkl')
with open(input_path0, 'wb') as f:
pickle.dump(pkl0, f)
with open(input_path1, 'wb') as f:
pickle.dump(pkl1, f)
with open(input_path2, 'wb') as f:
pickle.dump(pkl2, f)
input_str = 'x=' + input_path0 + '[b];y=' + input_path1 + '[c];'
input_str += 'z=' + input_path2
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
input_str, '', '')
self.assertTrue(np.all(feed_dict['x'] == pkl0['b']))
self.assertTrue(np.all(feed_dict['y'] == pkl1))
self.assertTrue(np.all(feed_dict['z'] == pkl2))
def testInputParserPythonExpression(self):
x1 = np.ones([2, 10])
x2 = np.array([[1], [2], [3]])
x3 = np.mgrid[0:5, 0:5]
x4 = [[3], [4]]
input_expr_str = ('x1=np.ones([2,10]);x2=np.array([[1],[2],[3]]);'
'x3=np.mgrid[0:5,0:5];x4=[[3],[4]]')
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
'', input_expr_str, '')
self.assertTrue(np.all(feed_dict['x1'] == x1))
self.assertTrue(np.all(feed_dict['x2'] == x2))
self.assertTrue(np.all(feed_dict['x3'] == x3))
self.assertTrue(np.all(feed_dict['x4'] == x4))
def testInputParserBoth(self):
x0 = np.array([[1], [2]])
input_path = os.path.join(test.get_temp_dir(), 'input.npz')
np.savez(input_path, a=x0)
x1 = np.ones([2, 10])
input_str = 'x0=' + input_path + '[a]'
input_expr_str = 'x1=np.ones([2,10])'
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
input_str, input_expr_str, '')
self.assertTrue(np.all(feed_dict['x0'] == x0))
self.assertTrue(np.all(feed_dict['x1'] == x1))
def testInputParserBothDuplicate(self):
x0 = np.array([[1], [2]])
input_path = os.path.join(test.get_temp_dir(), 'input.npz')
np.savez(input_path, a=x0)
x1 = np.ones([2, 10])
input_str = 'x0=' + input_path + '[a]'
input_expr_str = 'x0=np.ones([2,10])'
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
input_str, input_expr_str, '')
self.assertTrue(np.all(feed_dict['x0'] == x1))
def testInputParserErrorNoName(self):
x0 = np.array([[1], [2]])
x1 = np.array(range(5))
input_path = os.path.join(test.get_temp_dir(), 'input.npz')
np.savez(input_path, a=x0, b=x1)
input_str = 'x=' + input_path
with self.assertRaises(RuntimeError):
saved_model_cli.load_inputs_from_input_arg_string(input_str, '', '')
def testInputParserErrorWrongName(self):
x0 = np.array([[1], [2]])
x1 = np.array(range(5))
input_path = os.path.join(test.get_temp_dir(), 'input.npz')
np.savez(input_path, a=x0, b=x1)
input_str = 'x=' + input_path + '[c]'
with self.assertRaises(RuntimeError):
saved_model_cli.load_inputs_from_input_arg_string(input_str, '', '')
def testRunCommandInputExamples(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'regress_x_to_y', '--input_examples',
'inputs=[{"x":[8.0],"x2":[5.0]}, {"x":[4.0],"x2":[3.0]}]', '--outdir',
output_dir
])
saved_model_cli.run(args)
y_actual = np.load(os.path.join(output_dir, 'outputs.npy'))
y_expected = np.array([[6.0], [4.0]])
self.assertAllEqual(y_expected, y_actual)
def testRunCommandExistingOutdir(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
input_path = os.path.join(test.get_temp_dir(), 'testRunCommand_inputs.npz')
np.savez(input_path, x0=x, x1=x_notused)
output_file = os.path.join(test.get_temp_dir(), 'outputs.npy')
if os.path.exists(output_file):
os.remove(output_file)
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'regress_x2_to_y3', '--inputs', 'inputs=' + input_path + '[x0]',
'--outdir',
test.get_temp_dir()
])
saved_model_cli.run(args)
y_actual = np.load(output_file)
y_expected = np.array([[3.5], [4.0]])
self.assertAllClose(y_expected, y_actual)
def testRunCommandNewOutdir(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
input_path = os.path.join(test.get_temp_dir(),
'testRunCommandNewOutdir_inputs.npz')
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
np.savez(input_path, x0=x, x1=x_notused)
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir',
output_dir
])
saved_model_cli.run(args)
y_actual = np.load(os.path.join(output_dir, 'y.npy'))
y_expected = np.array([[2.5], [3.0]])
self.assertAllClose(y_expected, y_actual)
def testRunCommandOutOverwrite(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
input_path = os.path.join(test.get_temp_dir(),
'testRunCommandOutOverwrite_inputs.npz')
np.savez(input_path, x0=x, x1=x_notused)
output_file = os.path.join(test.get_temp_dir(), 'y.npy')
open(output_file, 'a').close()
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir',
test.get_temp_dir(), '--overwrite'
])
saved_model_cli.run(args)
y_actual = np.load(output_file)
y_expected = np.array([[2.5], [3.0]])
self.assertAllClose(y_expected, y_actual)
def testRunCommandInvalidInputKeyError(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'regress_x2_to_y3', '--input_exprs', 'x2=np.ones((3,1))'
])
with self.assertRaises(ValueError):
saved_model_cli.run(args)
def testRunCommandInputExamplesNotListError(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'regress_x_to_y', '--input_examples', 'inputs={"x":8.0,"x2":5.0}',
'--outdir', output_dir
])
with self.assertRaisesRegexp(ValueError, 'must be a list'):
saved_model_cli.run(args)
def testRunCommandInputExamplesFeatureValueNotListError(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'regress_x_to_y', '--input_examples', 'inputs=[{"x":8.0,"x2":5.0}]',
'--outdir', output_dir
])
with self.assertRaisesRegexp(ValueError, 'feature value must be a list'):
saved_model_cli.run(args)
def testRunCommandInputExamplesFeatureBadType(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'regress_x_to_y', '--input_examples', 'inputs=[{"x":[[1],[2]]}]',
'--outdir', output_dir
])
with self.assertRaisesRegexp(ValueError, 'is not supported'):
saved_model_cli.run(args)
def testRunCommandOutputFileExistError(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
input_path = os.path.join(test.get_temp_dir(),
'testRunCommandOutOverwrite_inputs.npz')
np.savez(input_path, x0=x, x1=x_notused)
output_file = os.path.join(test.get_temp_dir(), 'y.npy')
open(output_file, 'a').close()
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir',
test.get_temp_dir()
])
with self.assertRaises(RuntimeError):
saved_model_cli.run(args)
def testRunCommandInputNotGivenError(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'serving_default'
])
with self.assertRaises(AttributeError):
saved_model_cli.run(args)
def testRunCommandWithDebuggerEnabled(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
input_path = os.path.join(test.get_temp_dir(),
'testRunCommandNewOutdir_inputs.npz')
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
np.savez(input_path, x0=x, x1=x_notused)
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir',
output_dir, '--tf_debug'
])
def fake_wrapper_session(sess):
return sess
with test.mock.patch.object(local_cli_wrapper,
'LocalCLIDebugWrapperSession',
side_effect=fake_wrapper_session,
autospec=True) as fake:
saved_model_cli.run(args)
fake.assert_called_with(test.mock.ANY)
y_actual = np.load(os.path.join(output_dir, 'y.npy'))
y_expected = np.array([[2.5], [3.0]])
self.assertAllClose(y_expected, y_actual)
def testScanCommand(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
args = self.parser.parse_args(['scan', '--dir', base_path])
with captured_output() as (out, _):
saved_model_cli.scan(args)
output = out.getvalue().strip()
self.assertTrue('does not contain blacklisted ops' in output)
def testScanCommandFoundBlacklistedOp(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
args = self.parser.parse_args(
['scan', '--dir', base_path, '--tag_set', 'serve'])
op_blacklist = saved_model_cli._OP_BLACKLIST
saved_model_cli._OP_BLACKLIST = set(['VariableV2'])
with captured_output() as (out, _):
saved_model_cli.scan(args)
saved_model_cli._OP_BLACKLIST = op_blacklist
output = out.getvalue().strip()
self.assertTrue('\'VariableV2\'' in output)
def testAOTCompileCPUWrongSignatureDefKey(self):
if not test.is_built_with_xla():
self.skipTest('Skipping test because XLA is not compiled in.')
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(), 'aot_compile_cpu_dir')
args = self.parser.parse_args(
['aot_compile_cpu', '--dir', base_path, '--tag_set', 'serve',
'--output_prefix', output_dir,
'--cpp_class', 'Compiled',
'--signature_def_key', 'MISSING'])
with self.assertRaisesRegexp(ValueError, 'Unable to find signature_def'):
saved_model_cli.aot_compile_cpu(args)
class AOTCompileDummyModel(tracking.AutoTrackable):
"""Model compatible with XLA compilation."""
def __init__(self):
self.var = variables.Variable(1.0, name='my_var')
self.write_var = variables.Variable(1.0, name='write_var')
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32),
# Test unused inputs.
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
])
def func2(self, x, y):
del y
return {'res': x + self.var}
@def_function.function(input_signature=[
# Test large inputs.
tensor_spec.TensorSpec(shape=(2048, 16), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
])
def func3(self, x, y):
del y
return {'res': x + self.var}
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
])
def func_write(self, x, y):
del y
self.write_var.assign(x + self.var)
return {'res': self.write_var}
@parameterized.named_parameters(
('VariablesToFeedNone', '', 'func2'),
('VariablesToFeedAll', 'all', 'func2'),
('VariablesToFeedMyVar', 'my_var', 'func2'),
('VariablesToFeedNoneLargeConstant', '', 'func3'),
('WriteToWriteVar', 'all', 'func_write'),
)
def testAOTCompileCPUFreezesAndCompiles(self, variables_to_feed, func):
if not test.is_built_with_xla():
self.skipTest('Skipping test because XLA is not compiled in.')
saved_model_dir = os.path.join(test.get_temp_dir(), 'dummy_model')
dummy_model = self.AOTCompileDummyModel()
func = getattr(dummy_model, func)
with self.cached_session():
self.evaluate(dummy_model.var.initializer)
self.evaluate(dummy_model.write_var.initializer)
save.save(dummy_model, saved_model_dir, signatures={'func': func})
self.parser = saved_model_cli.create_parser()
output_prefix = os.path.join(test.get_temp_dir(), 'aot_compile_cpu_dir/out')
args = self.parser.parse_args([
'aot_compile_cpu', '--dir', saved_model_dir, '--tag_set', 'serve',
'--signature_def_key', 'func',
'--output_prefix', output_prefix, '--variables_to_feed',
variables_to_feed, '--cpp_class', 'Generated'
]) # Use the default seving signature_key.
with test.mock.patch.object(logging, 'warn') as captured_warn:
saved_model_cli.aot_compile_cpu(args)
self.assertRegexpMatches(
str(captured_warn.call_args),
'Signature input key \'y\'.*has been pruned while freezing the graph.')
self.assertTrue(file_io.file_exists('{}.o'.format(output_prefix)))
self.assertTrue(file_io.file_exists('{}.h'.format(output_prefix)))
self.assertTrue(file_io.file_exists('{}_metadata.o'.format(output_prefix)))
self.assertTrue(
file_io.file_exists('{}_makefile.inc'.format(output_prefix)))
header_contents = file_io.read_file_to_string('{}.h'.format(output_prefix))
self.assertIn('class Generated', header_contents)
self.assertIn('arg_feed_x_data', header_contents)
self.assertIn('result_fetch_res_data', header_contents)
# arg_y got filtered out as it's not used by the output.
self.assertNotIn('arg_feed_y_data', header_contents)
if variables_to_feed:
# Read-only-variables' setters preserve constness.
self.assertIn('set_var_param_my_var_data(const float', header_contents)
self.assertNotIn('set_var_param_my_var_data(float', header_contents)
if func == dummy_model.func_write:
# Writeable variables setters do not preserve constness.
self.assertIn('set_var_param_write_var_data(float', header_contents)
self.assertNotIn(
'set_var_param_write_var_data(const float', header_contents)
makefile_contents = file_io.read_file_to_string(
'{}_makefile.inc'.format(output_prefix))
self.assertIn('-D_GLIBCXX_USE_CXX11_ABI=', makefile_contents)
if __name__ == '__main__':
test.main()
| |
from nose.tools import *
from tests.base import ApiTestCase
from tests.factories import AuthUserFactory, PreprintFactory, PreprintProviderFactory
from api.base.settings.defaults import API_BASE
class TestPreprintRelationshipPreprintProvider(ApiTestCase):
def setUp(self):
super(TestPreprintRelationshipPreprintProvider, self).setUp()
self.user = AuthUserFactory()
self.read_write_user = AuthUserFactory()
self.preprint = PreprintFactory(creator=self.user, providers=None)
self.preprint.add_contributor(self.read_write_user)
self.preprint.save()
self.preprint_provider_one = PreprintProviderFactory()
self.preprint_provider_two = PreprintProviderFactory()
self.preprint_preprint_providers_url = self.create_url(self.preprint._id)
def create_url(self, preprint_id):
return '/{0}preprints/{1}/relationships/preprint_providers/'.format(API_BASE, preprint_id)
def create_payload(self, *preprint_provider_ids):
data = []
for provider_id in preprint_provider_ids:
data.append({'type': 'preprint_providers', 'id': provider_id})
return {'data': data}
def test_add_preprint_providers(self):
assert_equal(self.preprint.preprint_providers, None)
res = self.app.post_json_api(
self.preprint_preprint_providers_url,
self.create_payload(self.preprint_provider_one._id, self.preprint_provider_two._id),
auth=self.user.auth
)
assert_equal(res.status_code, 201)
# check the relationship
self.preprint.reload()
assert_in(self.preprint_provider_one, self.preprint.preprint_providers)
assert_in(self.preprint_provider_two, self.preprint.preprint_providers)
def test_add_through_patch_one_provider_while_removing_other(self):
self.preprint.preprint_providers = [self.preprint_provider_one]
self.preprint.save()
assert_in(self.preprint_provider_one, self.preprint.preprint_providers)
assert_not_in(self.preprint_provider_two, self.preprint.preprint_providers)
res = self.app.patch_json_api(
self.preprint_preprint_providers_url,
self.create_payload(self.preprint_provider_two._id),
auth=self.user.auth
)
assert_equal(res.status_code, 200)
self.preprint.reload()
assert_not_in(self.preprint_provider_one, self.preprint.preprint_providers)
assert_in(self.preprint_provider_two, self.preprint.preprint_providers)
def test_add_through_post_to_preprint_with_provider(self):
self.preprint.preprint_providers = [self.preprint_provider_one]
self.preprint.save()
assert_in(self.preprint_provider_one, self.preprint.preprint_providers)
assert_not_in(self.preprint_provider_two, self.preprint.preprint_providers)
res = self.app.post_json_api(
self.preprint_preprint_providers_url,
self.create_payload(self.preprint_provider_two._id),
auth=self.user.auth
)
assert_equal(res.status_code, 201)
self.preprint.reload()
assert_in(self.preprint_provider_one, self.preprint.preprint_providers)
assert_in(self.preprint_provider_two, self.preprint.preprint_providers)
def test_add_provider_with_no_permissions(self):
new_user = AuthUserFactory()
new_user.save()
res = self.app.post_json_api(
self.preprint_preprint_providers_url,
self.create_payload(self.preprint_provider_one._id),
auth=new_user.auth,
expect_errors=True,
)
assert_equal(res.status_code, 403)
def test_delete_nothing(self):
res = self.app.delete_json_api(
self.preprint_preprint_providers_url,
self.create_payload(),
auth=self.user.auth
)
assert_equal(res.status_code, 204)
def test_remove_providers(self):
self.preprint.preprint_providers = [self.preprint_provider_one]
self.preprint.save()
assert_in(self.preprint_provider_one, self.preprint.preprint_providers)
res = self.app.put_json_api(
self.preprint_preprint_providers_url,
self.create_payload(),
auth=self.user.auth
)
assert_equal(res.status_code, 200)
self.preprint.reload()
assert_equal(self.preprint.preprint_providers, [])
def test_remove_providers_with_no_auth(self):
res = self.app.put_json_api(
self.preprint_preprint_providers_url,
self.create_payload(),
expect_errors=True
)
assert_equal(res.status_code, 401)
def test_using_post_making_no_changes_returns_204(self):
self.preprint.preprint_providers = [self.preprint_provider_one]
self.preprint.save()
assert_in(self.preprint_provider_one, self.preprint.preprint_providers)
res = self.app.post_json_api(
self.preprint_preprint_providers_url,
self.create_payload(self.preprint_provider_one._id),
auth=self.user.auth
)
assert_equal(res.status_code, 204)
self.preprint.reload()
assert_in(self.preprint_provider_one, self.preprint.preprint_providers)
def test_delete_user_is_admin(self):
self.preprint.preprint_providers = [self.preprint_provider_one]
self.preprint.save()
assert_in(self.preprint_provider_one, self.preprint.preprint_providers)
res = self.app.delete_json_api(
self.preprint_preprint_providers_url,
self.create_payload(self.preprint_provider_one._id),
auth=self.user.auth
)
assert_equal(res.status_code, 204)
self.preprint.reload()
assert_not_in(self.preprint_provider_one, self.preprint.preprint_providers)
def test_delete_provider_user_is_read_write(self):
self.preprint.preprint_providers = [self.preprint_provider_one]
self.preprint.save()
assert_in(self.preprint_provider_one, self.preprint.preprint_providers)
res = self.app.delete_json_api(
self.preprint_preprint_providers_url,
self.create_payload(self.preprint_provider_one._id),
auth=self.read_write_user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
self.preprint.reload()
assert_in(self.preprint_provider_one, self.preprint.preprint_providers)
def test_add_provider_user_is_read_write(self):
self.preprint.preprint_providers = []
self.preprint.preprint_providers.append(self.preprint_provider_one)
self.preprint.save()
assert_in(self.preprint_provider_one, self.preprint.preprint_providers)
res = self.app.post_json_api(
self.preprint_preprint_providers_url,
self.create_payload(self.preprint_provider_two._id),
auth=self.read_write_user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
self.preprint.reload()
assert_in(self.preprint_provider_one, self.preprint.preprint_providers)
def test_change_provider_user_is_read_write(self):
self.preprint.preprint_providers = []
self.preprint.preprint_providers.append(self.preprint_provider_one)
self.preprint.save()
assert_in(self.preprint_provider_one, self.preprint.preprint_providers)
res = self.app.put_json_api(
self.preprint_preprint_providers_url,
self.create_payload(self.preprint_provider_two._id),
auth=self.read_write_user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
self.preprint.reload()
assert_in(self.preprint_provider_one, self.preprint.preprint_providers)
def test_get_relationship_information(self):
res = self.app.get(self.preprint_preprint_providers_url,auth=self.user.auth)
assert_equal(res.status_code, 200)
def test_invalid_relationship_type(self):
invalid_type_payload = self.create_payload(self.preprint_provider_one._id)
invalid_type_payload['data'][0]['type'] = 'socks'
res = self.app.put_json_api(
self.preprint_preprint_providers_url,
invalid_type_payload,
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 409)
def test_provider_does_not_exist(self):
res = self.app.post_json_api(
self.preprint_preprint_providers_url,
self.create_payload('nope nope nope'),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 404)
| |
#!/usr/bin/env python
# CREATED:2013-03-08 15:25:18 by Brian McFee <brm2132@columbia.edu>
# unit tests for librosa.filters
#
# This test suite verifies that librosa core routines match (numerically) the output
# of various DPWE matlab implementations on a broad range of input parameters.
#
# All test data is generated by the Matlab script "makeTestData.m".
# Each test loads in a .mat file which contains the input and desired output for a given
# function. The test then runs the librosa implementation and verifies the results
# against the desired output, typically via numpy.allclose().
#
# Disable cache
import os
try:
os.environ.pop("LIBROSA_CACHE_DIR")
except KeyError:
pass
from contextlib2 import nullcontext as dnr
import glob
import numpy as np
import scipy.io
import pytest
import librosa
# -- utilities --#
def files(pattern):
test_files = glob.glob(pattern)
test_files.sort()
return test_files
def load(infile):
DATA = scipy.io.loadmat(infile, chars_as_strings=True)
return DATA
# -- --#
# -- Tests --#
@pytest.mark.parametrize(
"infile", files(os.path.join("tests", "data", "feature-hz_to_mel-*.mat"))
)
def test_hz_to_mel(infile):
DATA = load(infile)
z = librosa.hz_to_mel(DATA["f"], DATA["htk"])
assert np.allclose(z, DATA["result"])
@pytest.mark.parametrize(
"infile", files(os.path.join("tests", "data", "feature-mel_to_hz-*.mat"))
)
def test_mel_to_hz(infile):
DATA = load(infile)
z = librosa.mel_to_hz(DATA["f"], DATA["htk"])
assert np.allclose(z, DATA["result"])
# Test for scalar conversion too
z0 = librosa.mel_to_hz(DATA["f"][0], DATA["htk"])
assert np.allclose(z0, DATA["result"][0])
@pytest.mark.parametrize(
"infile", files(os.path.join("tests", "data", "feature-hz_to_octs-*.mat"))
)
def test_hz_to_octs(infile):
DATA = load(infile)
z = librosa.hz_to_octs(DATA["f"])
assert np.allclose(z, DATA["result"])
@pytest.mark.parametrize(
"infile", files(os.path.join("tests", "data", "feature-melfb-*.mat"))
)
def test_melfb(infile):
DATA = load(infile)
wts = librosa.filters.mel(
DATA["sr"][0, 0],
DATA["nfft"][0, 0],
n_mels=DATA["nfilts"][0, 0],
fmin=DATA["fmin"][0, 0],
fmax=DATA["fmax"][0, 0],
htk=DATA["htk"][0, 0],
)
# Our version only returns the real-valued part.
# Pad out.
wts = np.pad(wts, [(0, 0), (0, int(DATA["nfft"][0] // 2 - 1))], mode="constant")
assert wts.shape == DATA["wts"].shape
assert np.allclose(wts, DATA["wts"])
@pytest.mark.parametrize(
"infile", files(os.path.join("tests", "data", "feature-melfbnorm-*.mat"))
)
def test_melfbnorm(infile):
DATA = load(infile)
# if DATA['norm'] is empty, pass None.
if DATA["norm"].shape[-1] == 0:
norm = None
else:
norm = DATA["norm"][0, 0]
wts = librosa.filters.mel(
DATA["sr"][0, 0],
DATA["nfft"][0, 0],
n_mels=DATA["nfilts"][0, 0],
fmin=DATA["fmin"][0, 0],
fmax=DATA["fmax"][0, 0],
htk=DATA["htk"][0, 0],
norm=norm,
)
# Pad out.
wts = np.pad(wts, [(0, 0), (0, int(DATA["nfft"][0] // 2 - 1))], mode="constant")
assert wts.shape == DATA["wts"].shape
assert np.allclose(wts, DATA["wts"])
@pytest.mark.parametrize("norm", [1, 2, np.inf])
def test_mel_norm(norm):
M = librosa.filters.mel(22050, 2048, norm=norm)
if norm == 1:
assert np.allclose(np.sum(np.abs(M), axis=1), 1)
elif norm == 2:
assert np.allclose(np.sum(np.abs(M ** 2), axis=1), 1)
elif norm == np.inf:
assert np.allclose(np.max(np.abs(M), axis=1), 1)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_mel_badnorm():
librosa.filters.mel(22050, 2048, norm="garbage")
def test_mel_gap():
# This configuration should trigger some empty filters
sr = 44100
n_fft = 1024
fmin = 0
fmax = 2000
n_mels = 128
htk = True
with pytest.warns(UserWarning, match="Empty filters"):
librosa.filters.mel(sr, n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax, htk=htk)
@pytest.mark.parametrize(
"infile", files(os.path.join("tests", "data", "feature-chromafb-*.mat"))
)
def test_chromafb(infile):
DATA = load(infile)
octwidth = DATA["octwidth"][0, 0]
if octwidth == 0:
octwidth = None
# Convert A440 parameter to tuning parameter
A440 = DATA["a440"][0, 0]
tuning = DATA["nchroma"][0, 0] * (np.log2(A440) - np.log2(440.0))
wts = librosa.filters.chroma(
DATA["sr"][0, 0],
DATA["nfft"][0, 0],
DATA["nchroma"][0, 0],
tuning=tuning,
ctroct=DATA["ctroct"][0, 0],
octwidth=octwidth,
norm=2,
base_c=False,
)
# Our version only returns the real-valued part.
# Pad out.
wts = np.pad(wts, [(0, 0), (0, int(DATA["nfft"][0, 0] // 2 - 1))], mode="constant")
assert wts.shape == DATA["wts"].shape
assert np.allclose(wts, DATA["wts"])
# Testing two tones, 261.63 Hz and 440 Hz
@pytest.mark.parametrize("freq", [261.63, 440])
def test_chroma_issue1295(freq):
tone_1 = librosa.tone(frequency=freq, sr=22050, duration=1)
chroma_1 = librosa.feature.chroma_stft(
y=tone_1, sr=22050, n_chroma=120, base_c=True
)
actual_argmax = np.unravel_index(chroma_1.argmax(), chroma_1.shape)
if freq == 261.63:
assert actual_argmax == (5, 43)
elif freq == 440:
assert actual_argmax == (86, 43)
@pytest.mark.parametrize("n", [16, 16.0, 16.25, 16.75])
@pytest.mark.parametrize(
"window_name",
[
"barthann",
"bartlett",
"blackman",
"blackmanharris",
"bohman",
"boxcar",
"cosine",
"flattop",
"hamming",
"hann",
"hanning",
"nuttall",
"parzen",
"triang",
],
)
def test__window(n, window_name):
window = getattr(scipy.signal.windows, window_name)
wdec = librosa.filters.__float_window(window)
if n == int(n):
n = int(n)
assert np.allclose(wdec(n), window(n))
else:
wf = wdec(n)
fn = int(np.floor(n))
assert not np.any(wf[fn:])
@pytest.mark.parametrize("sr", [11025])
@pytest.mark.parametrize("fmin", [None, librosa.note_to_hz("C3")])
@pytest.mark.parametrize("n_bins", [12, 24])
@pytest.mark.parametrize("bins_per_octave", [12, 24])
@pytest.mark.parametrize("filter_scale", [1, 2])
@pytest.mark.parametrize("norm", [1, 2])
@pytest.mark.parametrize("pad_fft", [False, True])
def test_constant_q(sr, fmin, n_bins, bins_per_octave, filter_scale, pad_fft, norm):
F, lengths = librosa.filters.constant_q(
sr,
fmin=fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
filter_scale=filter_scale,
pad_fft=pad_fft,
norm=norm,
)
assert np.all(lengths <= F.shape[1])
assert len(F) == n_bins
if not pad_fft:
return
assert np.mod(np.log2(F.shape[1]), 1.0) == 0.0
# Check for vanishing negative frequencies
F_fft = np.abs(np.fft.fft(F, axis=1))
# Normalize by row-wise peak
F_fft = F_fft / np.max(F_fft, axis=1, keepdims=True)
assert not np.any(F_fft[:, -F_fft.shape[1] // 2 :] > 1e-4)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize(
"sr,fmin,n_bins,bins_per_octave,filter_scale,norm",
[
(11025, 11025 / 2.0, 1, 12, 1, 1),
(11025, -60, 1, 12, 1, 1),
(11025, 60, 1, -12, 1, 1),
(11025, 60, -1, 12, 1, 1),
(11025, 60, 1, 12, -1, 1),
(11025, 60, 1, 12, 1, -1),
],
)
def test_constant_q_badparams(sr, fmin, n_bins, bins_per_octave, filter_scale, norm):
librosa.filters.constant_q(
sr,
fmin=fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
filter_scale=filter_scale,
pad_fft=True,
norm=norm,
)
def test_window_bandwidth():
hann_bw = librosa.filters.window_bandwidth("hann")
hann_scipy_bw = librosa.filters.window_bandwidth(scipy.signal.hann)
assert hann_bw == hann_scipy_bw
def test_window_bandwidth_dynamic():
# Test with a window constructor guaranteed to not exist in
# the dictionary.
# should behave like a box filter, which has enbw == 1
assert librosa.filters.window_bandwidth(lambda n: np.ones(n)) == 1
@pytest.mark.xfail(raises=ValueError)
def test_window_bandwidth_missing():
librosa.filters.window_bandwidth("made up window name")
def binstr(m):
out = []
for row in m:
line = [" "] * len(row)
for i in np.flatnonzero(row):
line[i] = "."
out.append("".join(line))
return "\n".join(out)
@pytest.mark.parametrize("n_octaves", [2, 3, 4])
@pytest.mark.parametrize("semitones", [1, 3])
@pytest.mark.parametrize("n_chroma", [12, 24, 36])
@pytest.mark.parametrize("fmin", [None] + list(librosa.midi_to_hz(range(48, 61))))
@pytest.mark.parametrize("base_c", [False, True])
@pytest.mark.parametrize("window", [None, [1]])
def test_cq_to_chroma(n_octaves, semitones, n_chroma, fmin, base_c, window):
bins_per_octave = 12 * semitones
n_bins = n_octaves * bins_per_octave
if np.mod(bins_per_octave, n_chroma) != 0:
ctx = pytest.raises(librosa.ParameterError)
else:
ctx = dnr()
with ctx:
# Fake up a cqt matrix with the corresponding midi notes
if fmin is None:
midi_base = 24 # C2
else:
midi_base = librosa.hz_to_midi(fmin)
midi_notes = np.linspace(
midi_base,
midi_base + n_bins * 12.0 / bins_per_octave,
endpoint=False,
num=n_bins,
)
# We don't care past 2 decimals here.
# the log2 inside hz_to_midi can cause problems though.
midi_notes = np.around(midi_notes, decimals=2)
C = np.diag(midi_notes)
cq2chr = librosa.filters.cq_to_chroma(
n_input=C.shape[0],
bins_per_octave=bins_per_octave,
n_chroma=n_chroma,
fmin=fmin,
base_c=base_c,
window=window,
)
chroma = cq2chr.dot(C)
for i in range(n_chroma):
v = chroma[i][chroma[i] != 0]
v = np.around(v, decimals=2)
if base_c:
resid = np.mod(v, 12)
else:
resid = np.mod(v - 9, 12)
resid = np.round(resid * n_chroma / 12.0)
assert np.allclose(np.mod(i - resid, 12), 0.0), i - resid
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_get_window_fail():
librosa.filters.get_window(None, 32)
@pytest.mark.parametrize("window", ["hann", "hann", 4.0, ("kaiser", 4.0)])
def test_get_window(window):
w1 = librosa.filters.get_window(window, 32)
w2 = scipy.signal.get_window(window, 32)
assert np.allclose(w1, w2)
def test_get_window_func():
w1 = librosa.filters.get_window(scipy.signal.boxcar, 32)
w2 = scipy.signal.get_window("boxcar", 32)
assert np.allclose(w1, w2)
@pytest.mark.parametrize(
"pre_win", [scipy.signal.hann(16), list(scipy.signal.hann(16)), [1, 1, 1]]
)
def test_get_window_pre(pre_win):
win = librosa.filters.get_window(pre_win, len(pre_win))
assert np.allclose(win, pre_win)
def test_semitone_filterbank():
# We test against Chroma Toolbox' elliptical semitone filterbank
# load data from chroma toolbox
gt_fb = scipy.io.loadmat(
os.path.join(
"tests", "data", "filter-muliratefb-MIDI_FB_ellip_pitch_60_96_22050_Q25"
),
squeeze_me=True,
)["h"]
# standard parameters reproduce settings from chroma toolbox
mut_ft_ba, mut_srs_ba = librosa.filters.semitone_filterbank(flayout="ba")
mut_ft_sos, mut_srs_sos = librosa.filters.semitone_filterbank(flayout="sos")
for cur_filter_id in range(len(mut_ft_ba)):
cur_filter_gt = gt_fb[cur_filter_id + 23]
cur_filter_mut = mut_ft_ba[cur_filter_id]
cur_filter_mut_sos = scipy.signal.sos2tf(mut_ft_sos[cur_filter_id])
cur_a_gt = cur_filter_gt[0]
cur_b_gt = cur_filter_gt[1]
cur_a_mut = cur_filter_mut[1]
cur_b_mut = cur_filter_mut[0]
cur_a_mut_sos = cur_filter_mut_sos[1]
cur_b_mut_sos = cur_filter_mut_sos[0]
# we deviate from the chroma toolboxes for pitches 94 and 95
# (filters 70 and 71) by processing them with a higher samplerate
if (cur_filter_id != 70) and (cur_filter_id != 71):
assert np.allclose(cur_a_gt, cur_a_mut)
assert np.allclose(cur_b_gt, cur_b_mut, atol=1e-4)
assert np.allclose(cur_a_gt, cur_a_mut_sos)
assert np.allclose(cur_b_gt, cur_b_mut_sos, atol=1e-4)
@pytest.mark.parametrize("n", [9, 17])
@pytest.mark.parametrize("window", ["hann", "rect"])
@pytest.mark.parametrize("angle", [None, np.pi / 4, np.pi / 6])
@pytest.mark.parametrize("slope", [1, 2, 0.5])
@pytest.mark.parametrize("zero_mean", [False, True])
def test_diagonal_filter(n, window, angle, slope, zero_mean):
kernel = librosa.filters.diagonal_filter(
window, n, slope=slope, angle=angle, zero_mean=zero_mean
)
# In the no-rotation case, check that the filter is shaped correctly
if angle == np.pi / 4 and not zero_mean:
win_unnorm = librosa.filters.get_window(window, n, fftbins=False)
win_unnorm /= win_unnorm.sum()
assert np.allclose(np.diag(kernel), win_unnorm)
# First check: zero-mean
if zero_mean:
assert np.isclose(kernel.sum(), 0)
else:
assert np.isclose(kernel.sum(), 1) and np.all(kernel >= 0)
# Now check if the angle transposes correctly
if angle is None:
# If we're using the slope API, then the transposed kernel
# will have slope 1/slope
k2 = librosa.filters.diagonal_filter(
window, n, slope=1.0 / slope, angle=angle, zero_mean=zero_mean
)
else:
# If we're using the angle API, then the transposed kernel
# will have angle pi/2 - angle
k2 = librosa.filters.diagonal_filter(
window, n, slope=slope, angle=np.pi / 2 - angle, zero_mean=zero_mean
)
assert np.allclose(k2, kernel.T)
| |
"""
Discretizers classes, to be used in lime_tabular
"""
import numpy as np
import sklearn
import sklearn.tree
import scipy
from sklearn.utils import check_random_state
from abc import ABCMeta, abstractmethod
class BaseDiscretizer():
"""
Abstract class - Build a class that inherits from this class to implement
a custom discretizer.
Method bins() is to be redefined in the child class, as it is the actual
custom part of the discretizer.
"""
__metaclass__ = ABCMeta # abstract class
def __init__(self, data, categorical_features, feature_names, labels=None, random_state=None,
data_stats=None):
"""Initializer
Args:
data: numpy 2d array
categorical_features: list of indices (ints) corresponding to the
categorical columns. These features will not be discretized.
Everything else will be considered continuous, and will be
discretized.
categorical_names: map from int to list of names, where
categorical_names[x][y] represents the name of the yth value of
column x.
feature_names: list of names (strings) corresponding to the columns
in the training data.
data_stats: must have 'means', 'stds', 'mins' and 'maxs', use this
if you don't want these values to be computed from data
"""
self.to_discretize = ([x for x in range(data.shape[1])
if x not in categorical_features])
self.data_stats = data_stats
self.names = {}
self.lambdas = {}
self.means = {}
self.stds = {}
self.mins = {}
self.maxs = {}
self.random_state = check_random_state(random_state)
# To override when implementing a custom binning
bins = self.bins(data, labels)
bins = [np.unique(x) for x in bins]
# Read the stats from data_stats if exists
if data_stats:
self.means = self.data_stats.get("means")
self.stds = self.data_stats.get("stds")
self.mins = self.data_stats.get("mins")
self.maxs = self.data_stats.get("maxs")
for feature, qts in zip(self.to_discretize, bins):
n_bins = qts.shape[0] # Actually number of borders (= #bins-1)
boundaries = np.min(data[:, feature]), np.max(data[:, feature])
name = feature_names[feature]
self.names[feature] = ['%s <= %.2f' % (name, qts[0])]
for i in range(n_bins - 1):
self.names[feature].append('%.2f < %s <= %.2f' %
(qts[i], name, qts[i + 1]))
self.names[feature].append('%s > %.2f' % (name, qts[n_bins - 1]))
self.lambdas[feature] = lambda x, qts=qts: np.searchsorted(qts, x)
discretized = self.lambdas[feature](data[:, feature])
# If data stats are provided no need to compute the below set of details
if data_stats:
continue
self.means[feature] = []
self.stds[feature] = []
for x in range(n_bins + 1):
selection = data[discretized == x, feature]
mean = 0 if len(selection) == 0 else np.mean(selection)
self.means[feature].append(mean)
std = 0 if len(selection) == 0 else np.std(selection)
std += 0.00000000001
self.stds[feature].append(std)
self.mins[feature] = [boundaries[0]] + qts.tolist()
self.maxs[feature] = qts.tolist() + [boundaries[1]]
@abstractmethod
def bins(self, data, labels):
"""
To be overridden
Returns for each feature to discretize the boundaries
that form each bin of the discretizer
"""
raise NotImplementedError("Must override bins() method")
def discretize(self, data):
"""Discretizes the data.
Args:
data: numpy 2d or 1d array
Returns:
numpy array of same dimension, discretized.
"""
ret = data.copy()
for feature in self.lambdas:
if len(data.shape) == 1:
ret[feature] = int(self.lambdas[feature](ret[feature]))
else:
ret[:, feature] = self.lambdas[feature](
ret[:, feature]).astype(int)
return ret
def get_undiscretize_values(self, feature, values):
mins = np.array(self.mins[feature])[values]
maxs = np.array(self.maxs[feature])[values]
means = np.array(self.means[feature])[values]
stds = np.array(self.stds[feature])[values]
minz = (mins - means) / stds
maxz = (maxs - means) / stds
min_max_unequal = (minz != maxz)
ret = minz
ret[np.where(min_max_unequal)] = scipy.stats.truncnorm.rvs(
minz[min_max_unequal],
maxz[min_max_unequal],
loc=means[min_max_unequal],
scale=stds[min_max_unequal],
random_state=self.random_state
)
return ret
def undiscretize(self, data):
ret = data.copy()
for feature in self.means:
if len(data.shape) == 1:
ret[feature] = self.get_undiscretize_values(
feature, ret[feature].astype(int).reshape(-1, 1)
)
else:
ret[:, feature] = self.get_undiscretize_values(
feature, ret[:, feature].astype(int)
)
return ret
class StatsDiscretizer(BaseDiscretizer):
"""
Class to be used to supply the data stats info when discretize_continuous is true
"""
def __init__(self, data, categorical_features, feature_names, labels=None, random_state=None,
data_stats=None):
BaseDiscretizer.__init__(self, data, categorical_features,
feature_names, labels=labels,
random_state=random_state,
data_stats=data_stats)
def bins(self, data, labels):
bins_from_stats = self.data_stats.get("bins")
bins = []
if bins_from_stats is not None:
for feature in self.to_discretize:
bins_from_stats_feature = bins_from_stats.get(feature)
if bins_from_stats_feature is not None:
qts = np.array(bins_from_stats_feature)
bins.append(qts)
return bins
class QuartileDiscretizer(BaseDiscretizer):
def __init__(self, data, categorical_features, feature_names, labels=None, random_state=None):
BaseDiscretizer.__init__(self, data, categorical_features,
feature_names, labels=labels,
random_state=random_state)
def bins(self, data, labels):
bins = []
for feature in self.to_discretize:
qts = np.array(np.percentile(data[:, feature], [25, 50, 75]))
bins.append(qts)
return bins
class DecileDiscretizer(BaseDiscretizer):
def __init__(self, data, categorical_features, feature_names, labels=None, random_state=None):
BaseDiscretizer.__init__(self, data, categorical_features,
feature_names, labels=labels,
random_state=random_state)
def bins(self, data, labels):
bins = []
for feature in self.to_discretize:
qts = np.array(np.percentile(data[:, feature],
[10, 20, 30, 40, 50, 60, 70, 80, 90]))
bins.append(qts)
return bins
class EntropyDiscretizer(BaseDiscretizer):
def __init__(self, data, categorical_features, feature_names, labels=None, random_state=None):
if(labels is None):
raise ValueError('Labels must be not None when using \
EntropyDiscretizer')
BaseDiscretizer.__init__(self, data, categorical_features,
feature_names, labels=labels,
random_state=random_state)
def bins(self, data, labels):
bins = []
for feature in self.to_discretize:
# Entropy splitting / at most 8 bins so max_depth=3
dt = sklearn.tree.DecisionTreeClassifier(criterion='entropy',
max_depth=3,
random_state=self.random_state)
x = np.reshape(data[:, feature], (-1, 1))
dt.fit(x, labels)
qts = dt.tree_.threshold[np.where(dt.tree_.children_left > -1)]
if qts.shape[0] == 0:
qts = np.array([np.median(data[:, feature])])
else:
qts = np.sort(qts)
bins.append(qts)
return bins
| |
"""Logging
"""
import sys
import os
import logging
from pip import backwardcompat
import colorama, pkg_resources
def _color_wrap(*colors):
def wrapped(inp):
return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
return wrapped
def should_color(consumer, environ, std=(sys.stdout, sys.stderr)):
real_consumer = (consumer if not isinstance(consumer, colorama.AnsiToWin32)
else consumer.wrapped)
# If consumer isn't stdout or stderr we shouldn't colorize it
if real_consumer not in std:
return False
# If consumer is a tty we should color it
if hasattr(real_consumer, "isatty") and real_consumer.isatty():
return True
# If we have an ASNI term we should color it
if environ.get("TERM") == "ANSI":
return True
# If anything else we should not color it
return False
def should_warn(current_version, removal_version):
# Our Significant digits on versions is 2, so remove everything but the
# first two places.
current_version = ".".join(current_version.split(".")[:2])
removal_version = ".".join(removal_version.split(".")[:2])
# Our warning threshold is one minor version before removal, so we
# decrement the minor version by one
major, minor = removal_version.split(".")
minor = str(int(minor) - 1)
warn_version = ".".join([major, minor])
# Test if our current_version should be a warn
return (pkg_resources.parse_version(current_version)
< pkg_resources.parse_version(warn_version))
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
VERBOSE_DEBUG = logging.DEBUG - 1
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO + logging.WARN) / 2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [VERBOSE_DEBUG, DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
COLORS = {
WARN: _color_wrap(colorama.Fore.YELLOW),
ERROR: _color_wrap(colorama.Fore.RED),
FATAL: _color_wrap(colorama.Fore.RED),
}
def __init__(self):
self.consumers = []
self.indent = 0
self.explicit_levels = False
self.in_progress = None
self.in_progress_hanging = False
def add_consumers(self, *consumers):
if sys.platform.startswith("win"):
for level, consumer in consumers:
if hasattr(consumer, "write"):
self.consumers.append(
(level, colorama.AnsiToWin32(consumer)),
)
else:
self.consumers.append((level, consumer))
else:
self.consumers.extend(consumers)
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.ERROR, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def deprecated(self, removal_version, msg, *args, **kwargs):
"""
Logs deprecation message which is log level WARN if the
``removal_version`` is > 1 minor release away and log level ERROR
otherwise.
removal_version should be the version that the deprecated feature is
expected to be removed in, so something that will not exist in
version 1.7, but will in 1.6 would have a removal_version of 1.7.
"""
from pip import __version__
if should_warn(__version__, removal_version):
self.warn(msg, *args, **kwargs)
else:
self.error(msg, *args, **kwargs)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
# render
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' ' * self.indent + rendered
if self.explicit_levels:
## FIXME: should this be a name, not a level number?
rendered = '%02i %s' % (level, rendered)
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if hasattr(consumer, 'write'):
write_content = rendered + '\n'
if should_color(consumer, os.environ):
# We are printing to stdout or stderr and it supports
# colors so render our text colored
colorizer = self.COLORS.get(level, lambda x: x)
write_content = colorizer(write_content)
consumer.write(write_content)
if hasattr(consumer, 'flush'):
consumer.flush()
else:
consumer(rendered)
def _show_progress(self):
"""Should we display download progress?"""
return (self.stdout_level_matches(self.NOTIFY) and sys.stdout.isatty())
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self._show_progress():
sys.stdout.write(' ' * self.indent + msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
self.last_message = None
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self._show_progress():
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
# These erase any messages shown with show_progress (besides .'s)
logger.show_progress('')
logger.show_progress('')
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self, message=None):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
if message is None:
sys.stdout.write('.')
sys.stdout.flush()
else:
if self.last_message:
padding = ' ' * max(0, len(self.last_message) - len(message))
else:
padding = ''
sys.stdout.write('\r%s%s%s%s' %
(' ' * self.indent, self.in_progress, message, padding))
sys.stdout.flush()
self.last_message = message
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger()
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None or stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
def move_stdout_to_stderr(self):
to_remove = []
to_add = []
for consumer_level, consumer in self.consumers:
if consumer == sys.stdout:
to_remove.append((consumer_level, consumer))
to_add.append((consumer_level, sys.stderr))
for item in to_remove:
self.consumers.remove(item)
self.consumers.extend(to_add)
logger = Logger()
| |
import pytest
from tests.utils import load_fixtures
@load_fixtures("tests/fixtures/test_fixture.yaml")
@pytest.mark.parametrize(
"login, password",
[
("tom", "testpassword"),
("kate", "testpassword"),
("paul", "testpassword"),
(None, None),
],
)
@pytest.mark.django_db
def test_node_superuser_required_permission(client, test_kwargs, login, password):
client.login(username=login, password=password)
query = """
query{
superuserPet(id: "U3VwZXJVc2VyUmVxdWlyZWRQZXROb2RlOjE="){
name,
race,
}
}
"""
response = client.post(data=query, **test_kwargs)
result = response.json()
if login == "tom":
assert result["data"] == {"superuserPet": {"name": "Snakey", "race": "snake"}}
else:
assert result["data"] == {"superuserPet": None}
@load_fixtures("tests/fixtures/test_fixture.yaml")
@pytest.mark.parametrize(
"login, password",
[
("tom", "testpassword"),
("kate", "testpassword"),
("paul", "testpassword"),
(None, None),
],
)
@pytest.mark.django_db
def test_filter_superuser_required_permission(client, test_kwargs, login, password):
client.login(username=login, password=password)
query = """
query{
allSuperuserPets{
edges{
node{
id,
name,
race,
}
}
}
}
"""
response = client.post(data=query, **test_kwargs)
result = response.json()
if login == "tom":
assert result["data"] == {
"allSuperuserPets": {
"edges": [
{
"node": {
"id": "U3VwZXJVc2VyUmVxdWlyZWRQZXROb2RlOjE=",
"name": "Snakey",
"race": "snake",
}
},
{
"node": {
"id": "U3VwZXJVc2VyUmVxdWlyZWRQZXROb2RlOjI=",
"name": "Pawn",
"race": "cat",
}
},
{
"node": {
"id": "U3VwZXJVc2VyUmVxdWlyZWRQZXROb2RlOjM=",
"name": "Rex",
"race": "dog",
}
},
]
}
}
else:
assert result["data"] == {"allSuperuserPets": {"edges": []}}
@load_fixtures("tests/fixtures/test_fixture.yaml")
@pytest.mark.parametrize(
"login, password",
[
("tom", "testpassword"),
("kate", "testpassword"),
("paul", "testpassword"),
(None, None),
],
)
@pytest.mark.django_db
def test_node_staff_required_permission(client, test_kwargs, login, password):
client.login(username=login, password=password)
query = """
query{
staffPet(id: "U3RhZmZSZXF1aXJlZFBldE5vZGU6MQ=="){
name,
race,
}
}
"""
response = client.post(data=query, **test_kwargs)
result = response.json()
if login == "tom":
assert result["data"] == {"staffPet": {"name": "Snakey", "race": "snake"}}
else:
assert result["data"] == {"staffPet": None}
@load_fixtures("tests/fixtures/test_fixture.yaml")
@pytest.mark.parametrize(
"login, password",
[
("tom", "testpassword"),
("kate", "testpassword"),
("paul", "testpassword"),
(None, None),
],
)
@pytest.mark.django_db
def test_filter_staff_required_permission(client, test_kwargs, login, password):
client.login(username=login, password=password)
query = """
query{
allStaffPets{
edges{
node{
id,
name,
race,
}
}
}
}
"""
response = client.post(data=query, **test_kwargs)
result = response.json()
if login == "tom":
assert result["data"] == {
"allStaffPets": {
"edges": [
{
"node": {
"id": "U3RhZmZSZXF1aXJlZFBldE5vZGU6MQ==",
"name": "Snakey",
"race": "snake",
}
},
{
"node": {
"id": "U3RhZmZSZXF1aXJlZFBldE5vZGU6Mg==",
"name": "Pawn",
"race": "cat",
}
},
{
"node": {
"id": "U3RhZmZSZXF1aXJlZFBldE5vZGU6Mw==",
"name": "Rex",
"race": "dog",
}
},
]
}
}
else:
assert result["data"] == {"allStaffPets": {"edges": []}}
@load_fixtures("tests/fixtures/test_fixture.yaml")
@pytest.mark.parametrize(
"login, password",
[
("tom", "testpassword"),
("kate", "testpassword"),
("paul", "testpassword"),
(None, None),
],
)
@pytest.mark.django_db
def test_node_allow_authenticated_permission(client, test_kwargs, login, password):
client.login(username=login, password=password)
query = """
query{
userPet(id: "QWxsb3dBdXRoZW50aWNhdGVkUGV0Tm9kZTox"){
name,
race,
}
}
"""
response = client.post(data=query, **test_kwargs)
result = response.json()
if login in ("tom", "kate", "paul"):
assert result["data"] == {"userPet": {"name": "Snakey", "race": "snake"}}
else:
assert result["data"] == {"userPet": None}
@load_fixtures("tests/fixtures/test_fixture.yaml")
@pytest.mark.parametrize(
"login, password",
[
("tom", "testpassword"),
("kate", "testpassword"),
("paul", "testpassword"),
(None, None),
],
)
@pytest.mark.django_db
def test_filter_allow_authenticated_permission(client, test_kwargs, login, password):
client.login(username=login, password=password)
query = """
query{
allUserPets{
edges{
node{
id,
name,
race
}
}
}
}
"""
response = client.post(data=query, **test_kwargs)
result = response.json()
if login in ("tom", "kate", "paul"):
assert result["data"] == {
"allUserPets": {
"edges": [
{
"node": {
"id": "QWxsb3dBdXRoZW50aWNhdGVkUGV0Tm9kZTox",
"name": "Snakey",
"race": "snake",
}
},
{
"node": {
"id": "QWxsb3dBdXRoZW50aWNhdGVkUGV0Tm9kZToy",
"name": "Pawn",
"race": "cat",
}
},
{
"node": {
"id": "QWxsb3dBdXRoZW50aWNhdGVkUGV0Tm9kZToz",
"name": "Rex",
"race": "dog",
}
},
]
}
}
else:
assert result["data"] == {"allUserPets": {"edges": []}}
@load_fixtures("tests/fixtures/test_fixture.yaml")
@pytest.mark.parametrize(
"login, password",
[
("tom", "testpassword"),
("kate", "testpassword"),
("paul", "testpassword"),
(None, None),
],
)
@pytest.mark.django_db
def test_node_allow_any_permission(client, test_kwargs, login, password):
client.login(username=login, password=password)
query = """
query{
pet(id: "QWxsb3dBbnlQZXROb2RlOjE="){
name,
race,
}
}
"""
response = client.post(data=query, **test_kwargs)
result = response.json()
assert result["data"] == {"pet": {"name": "Snakey", "race": "snake"}}
@load_fixtures("tests/fixtures/test_fixture.yaml")
@pytest.mark.parametrize(
"login, password",
[
("tom", "testpassword"),
("kate", "testpassword"),
("paul", "testpassword"),
(None, None),
],
)
@pytest.mark.django_db
def test_filter_allow_any_permission(client, test_kwargs, login, password):
client.login(username=login, password=password)
query = """
query{
allPets{
edges{
node{
id,
name,
race
}
}
}
}
"""
response = client.post(data=query, **test_kwargs)
result = response.json()
assert result["data"] == {
"allPets": {
"edges": [
{
"node": {
"id": "QWxsb3dBbnlQZXROb2RlOjE=",
"name": "Snakey",
"race": "snake",
}
},
{
"node": {
"id": "QWxsb3dBbnlQZXROb2RlOjI=",
"name": "Pawn",
"race": "cat",
}
},
{
"node": {
"id": "QWxsb3dBbnlQZXROb2RlOjM=",
"name": "Rex",
"race": "dog",
}
},
]
}
}
@load_fixtures("tests/fixtures/test_fixture.yaml")
@pytest.mark.parametrize(
"login, password",
[
("tom", "testpassword"),
("kate", "testpassword"),
("paul", "testpassword"),
(None, None),
],
)
@pytest.mark.django_db
def test_node_non_existent_object(client, test_kwargs, login, password):
client.login(username=login, password=password)
query = """
query{
pet(id: "QWxsb3dBbnlQZXROb2RlOjE1"){
name,
race,
}
}
"""
response = client.post(data=query, **test_kwargs)
result = response.json()
assert result["data"] == {"pet": None}
@load_fixtures("tests/fixtures/test_fixture.yaml")
@pytest.mark.parametrize(
"query_name, id, expected",
[
(
"allowOrNotAllowPet",
"QWxsb3dPck5vdEFsbG93UGV0Tm9kZTox",
{"allowOrNotAllowPet": {"name": "Snakey"}},
),
(
"allowAndNotAllowPet",
"QWxsb3dBbmROb3RBbGxvd1BldE5vZGU6MQ==",
{"allowAndNotAllowPet": None},
),
(
"allowAndNotNotAllowPet",
"QWxsb3dBbmROb3ROb3RBbGxvd1BldE5vZGU6MQ==",
{"allowAndNotNotAllowPet": {"name": "Snakey"}},
),
(
"notNotAllowPet",
"Tm90Tm90QWxsb3dQZXROb2RlOjE=",
{"notNotAllowPet": {"name": "Snakey"}},
),
],
)
@pytest.mark.django_db
def test_permission_operator_composing(client, test_kwargs, query_name, id, expected):
client.login(username="tom", password="testpassword")
query = """
query{
%s(id: "%s"){
name,
}
}
""" % (
query_name,
id,
)
response = client.post(data=query, **test_kwargs)
result = response.json()
assert result["data"] == expected
| |
# Copyright (C) 2003-2007 John Rochester <john@jrochester.org>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
SSH Agent interface
"""
import os
import socket
import struct
import sys
import threading
import time
import tempfile
import stat
from select import select
from paramiko.common import asbytes, io_sleep
from paramiko.py3compat import byte_chr
from paramiko.ssh_exception import SSHException, AuthenticationException
from paramiko.message import Message
from paramiko.pkey import PKey
from paramiko.util import retry_on_signal
cSSH2_AGENTC_REQUEST_IDENTITIES = byte_chr(11)
SSH2_AGENT_IDENTITIES_ANSWER = 12
cSSH2_AGENTC_SIGN_REQUEST = byte_chr(13)
SSH2_AGENT_SIGN_RESPONSE = 14
class AgentSSH(object):
def __init__(self):
self._conn = None
self._keys = ()
def get_keys(self):
"""
Return the list of keys available through the SSH agent, if any. If
no SSH agent was running (or it couldn't be contacted), an empty list
will be returned.
:return:
a tuple of `.AgentKey` objects representing keys available on the
SSH agent
"""
return self._keys
def _connect(self, conn):
self._conn = conn
ptype, result = self._send_message(cSSH2_AGENTC_REQUEST_IDENTITIES)
if ptype != SSH2_AGENT_IDENTITIES_ANSWER:
raise SSHException("could not get keys from ssh-agent")
keys = []
for i in range(result.get_int()):
keys.append(AgentKey(self, result.get_binary()))
result.get_string()
self._keys = tuple(keys)
def _close(self):
if self._conn is not None:
self._conn.close()
self._conn = None
self._keys = ()
def _send_message(self, msg):
msg = asbytes(msg)
self._conn.send(struct.pack(">I", len(msg)) + msg)
l = self._read_all(4)
msg = Message(self._read_all(struct.unpack(">I", l)[0]))
return ord(msg.get_byte()), msg
def _read_all(self, wanted):
result = self._conn.recv(wanted)
while len(result) < wanted:
if len(result) == 0:
raise SSHException("lost ssh-agent")
extra = self._conn.recv(wanted - len(result))
if len(extra) == 0:
raise SSHException("lost ssh-agent")
result += extra
return result
class AgentProxyThread(threading.Thread):
"""
Class in charge of communication between two channels.
"""
def __init__(self, agent):
threading.Thread.__init__(self, target=self.run)
self._agent = agent
self._exit = False
def run(self):
try:
(r, addr) = self.get_connection()
# Found that r should be either
# a socket from the socket library or None
self.__inr = r
# The address should be an IP address as a string? or None
self.__addr = addr
self._agent.connect()
if not isinstance(self._agent, int) and (
self._agent._conn is None
or not hasattr(self._agent._conn, "fileno")
):
raise AuthenticationException("Unable to connect to SSH agent")
self._communicate()
except:
# XXX Not sure what to do here ... raise or pass ?
raise
def _communicate(self):
import fcntl
oldflags = fcntl.fcntl(self.__inr, fcntl.F_GETFL)
fcntl.fcntl(self.__inr, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
while not self._exit:
events = select([self._agent._conn, self.__inr], [], [], 0.5)
for fd in events[0]:
if self._agent._conn == fd:
data = self._agent._conn.recv(512)
if len(data) != 0:
self.__inr.send(data)
else:
self._close()
break
elif self.__inr == fd:
data = self.__inr.recv(512)
if len(data) != 0:
self._agent._conn.send(data)
else:
self._close()
break
time.sleep(io_sleep)
def _close(self):
self._exit = True
self.__inr.close()
self._agent._conn.close()
class AgentLocalProxy(AgentProxyThread):
"""
Class to be used when wanting to ask a local SSH Agent being
asked from a remote fake agent (so use a unix socket for ex.)
"""
def __init__(self, agent):
AgentProxyThread.__init__(self, agent)
def get_connection(self):
"""
Return a pair of socket object and string address.
May block!
"""
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
conn.bind(self._agent._get_filename())
conn.listen(1)
(r, addr) = conn.accept()
return r, addr
except:
raise
class AgentRemoteProxy(AgentProxyThread):
"""
Class to be used when wanting to ask a remote SSH Agent
"""
def __init__(self, agent, chan):
AgentProxyThread.__init__(self, agent)
self.__chan = chan
def get_connection(self):
return self.__chan, None
class AgentClientProxy(object):
"""
Class proxying request as a client:
#. client ask for a request_forward_agent()
#. server creates a proxy and a fake SSH Agent
#. server ask for establishing a connection when needed,
calling the forward_agent_handler at client side.
#. the forward_agent_handler launch a thread for connecting
the remote fake agent and the local agent
#. Communication occurs ...
"""
def __init__(self, chanRemote):
self._conn = None
self.__chanR = chanRemote
self.thread = AgentRemoteProxy(self, chanRemote)
self.thread.start()
def __del__(self):
self.close()
def connect(self):
"""
Method automatically called by ``AgentProxyThread.run``.
"""
if ("SSH_AUTH_SOCK" in os.environ) and (sys.platform != "win32"):
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
retry_on_signal(
lambda: conn.connect(os.environ["SSH_AUTH_SOCK"])
)
except:
# probably a dangling env var: the ssh agent is gone
return
elif sys.platform == "win32":
import paramiko.win_pageant as win_pageant
if win_pageant.can_talk_to_agent():
conn = win_pageant.PageantConnection()
else:
return
else:
# no agent support
return
self._conn = conn
def close(self):
"""
Close the current connection and terminate the agent
Should be called manually
"""
if hasattr(self, "thread"):
self.thread._exit = True
self.thread.join(1000)
if self._conn is not None:
self._conn.close()
class AgentServerProxy(AgentSSH):
"""
:param .Transport t: Transport used for SSH Agent communication forwarding
:raises: `.SSHException` -- mostly if we lost the agent
"""
def __init__(self, t):
AgentSSH.__init__(self)
self.__t = t
self._dir = tempfile.mkdtemp("sshproxy")
os.chmod(self._dir, stat.S_IRWXU)
self._file = self._dir + "/sshproxy.ssh"
self.thread = AgentLocalProxy(self)
self.thread.start()
def __del__(self):
self.close()
def connect(self):
conn_sock = self.__t.open_forward_agent_channel()
if conn_sock is None:
raise SSHException("lost ssh-agent")
conn_sock.set_name("auth-agent")
self._connect(conn_sock)
def close(self):
"""
Terminate the agent, clean the files, close connections
Should be called manually
"""
os.remove(self._file)
os.rmdir(self._dir)
self.thread._exit = True
self.thread.join(1000)
self._close()
def get_env(self):
"""
Helper for the environnement under unix
:return:
a dict containing the ``SSH_AUTH_SOCK`` environnement variables
"""
return {"SSH_AUTH_SOCK": self._get_filename()}
def _get_filename(self):
return self._file
class AgentRequestHandler(object):
"""
Primary/default implementation of SSH agent forwarding functionality.
Simply instantiate this class, handing it a live command-executing session
object, and it will handle forwarding any local SSH agent processes it
finds.
For example::
# Connect
client = SSHClient()
client.connect(host, port, username)
# Obtain session
session = client.get_transport().open_session()
# Forward local agent
AgentRequestHandler(session)
# Commands executed after this point will see the forwarded agent on
# the remote end.
session.exec_command("git clone https://my.git.repository/")
"""
def __init__(self, chanClient):
self._conn = None
self.__chanC = chanClient
chanClient.request_forward_agent(self._forward_agent_handler)
self.__clientProxys = []
def _forward_agent_handler(self, chanRemote):
self.__clientProxys.append(AgentClientProxy(chanRemote))
def __del__(self):
self.close()
def close(self):
for p in self.__clientProxys:
p.close()
class Agent(AgentSSH):
"""
Client interface for using private keys from an SSH agent running on the
local machine. If an SSH agent is running, this class can be used to
connect to it and retrieve `.PKey` objects which can be used when
attempting to authenticate to remote SSH servers.
Upon initialization, a session with the local machine's SSH agent is
opened, if one is running. If no agent is running, initialization will
succeed, but `get_keys` will return an empty tuple.
:raises: `.SSHException` --
if an SSH agent is found, but speaks an incompatible protocol
"""
def __init__(self):
AgentSSH.__init__(self)
if ("SSH_AUTH_SOCK" in os.environ) and (sys.platform != "win32"):
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
conn.connect(os.environ["SSH_AUTH_SOCK"])
except:
# probably a dangling env var: the ssh agent is gone
return
elif sys.platform == "win32":
from . import win_pageant
if win_pageant.can_talk_to_agent():
conn = win_pageant.PageantConnection()
else:
return
else:
# no agent support
return
self._connect(conn)
def close(self):
"""
Close the SSH agent connection.
"""
self._close()
class AgentKey(PKey):
"""
Private key held in a local SSH agent. This type of key can be used for
authenticating to a remote server (signing). Most other key operations
work as expected.
"""
def __init__(self, agent, blob):
self.agent = agent
self.blob = blob
self.public_blob = None
self.name = Message(blob).get_text()
def asbytes(self):
return self.blob
def __str__(self):
return self.asbytes()
def get_name(self):
return self.name
def sign_ssh_data(self, data):
msg = Message()
msg.add_byte(cSSH2_AGENTC_SIGN_REQUEST)
msg.add_string(self.blob)
msg.add_string(data)
msg.add_int(0)
ptype, result = self.agent._send_message(msg)
if ptype != SSH2_AGENT_SIGN_RESPONSE:
raise SSHException("key cannot be used for signing")
return result.get_binary()
| |
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The Manager runs a series of tests (TestType interface) against a set
of test files. If a test file fails a TestType, it returns a list of TestFailure
objects to the Manager. The Manager then aggregates the TestFailures to
create a final report.
"""
import datetime
import json
import logging
import random
import sys
import time
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models.test_input import TestInput
from webkitpy.tool import grammar
_log = logging.getLogger(__name__)
# Builder base URL where we have the archived test results.
BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
TestExpectations = test_expectations.TestExpectations
class Manager(object):
"""A class for managing running a series of tests on a series of layout
test files."""
def __init__(self, port, options, printer):
"""Initialize test runner data structures.
Args:
port: an object implementing port-specific
options: a dictionary of command line options
printer: a Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR
self.PERF_SUBDIR = 'perf'
self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
self.VIRTUAL_HTTP_SUBDIR = port.TEST_PATH_SEPARATOR.join([
'virtual', 'stable', 'http'])
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
self.ARCHIVED_RESULTS_LIMIT = 25
self._http_server_started = False
self._wptserve_started = False
self._websockets_server_started = False
self._results_directory = self._port.results_directory()
self._finder = LayoutTestFinder(self._port, self._options)
self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
def _collect_tests(self, args):
return self._finder.find_tests(args, test_list=self._options.test_list,
fastest_percentile=self._options.fastest)
def _is_http_test(self, test):
return (
test.startswith(self.HTTP_SUBDIR) or
self._is_websocket_test(test) or
self.VIRTUAL_HTTP_SUBDIR in test
)
def _is_inspector_test(self, test):
return self.INSPECTOR_SUBDIR in test
def _is_websocket_test(self, test):
if self._port.is_wpt_enabled() and self._port.is_wpt_test(test):
return False
return self.WEBSOCKET_SUBDIR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
tests_to_run = [test for test in test_names if test not in tests_to_skip]
if not tests_to_run:
return tests_to_run, tests_to_skip
# Create a sorted list of test files so the subset chunk,
# if used, contains alphabetically consecutive tests.
if self._options.order == 'natural':
tests_to_run.sort(key=self._port.test_key)
elif self._options.order == 'random':
random.shuffle(tests_to_run)
elif self._options.order == 'random-seeded':
rnd = random.Random()
rnd.seed(4) # http://xkcd.com/221/
rnd.shuffle(tests_to_run)
tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
tests_to_skip.update(tests_in_other_chunks)
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file):
return TestInput(test_file,
self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
self._test_requires_lock(test_file),
should_add_missing_baselines=(self._options.new_test_results and not self._test_is_expected_missing(test_file)))
def _test_requires_lock(self, test_file):
"""Return True if the test needs to be locked when
running multiple copies of NRWTs. Perf tests are locked
because heavy load caused by running other tests in parallel
might cause some of them to timeout."""
return self._is_http_test(test_file) or self._is_perf_test(test_file)
def _test_is_expected_missing(self, test_file):
expectations = self._expectations.model().get_expectations(test_file)
return test_expectations.MISSING in expectations or test_expectations.NEEDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in expectations
def _test_is_slow(self, test_file):
return test_expectations.SLOW in self._expectations.model().get_expectations(test_file)
def needs_servers(self, test_names):
return any(self._test_requires_lock(test_name) for test_name in test_names)
def _rename_results_folder(self):
try:
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(self._filesystem.mtime(self._filesystem.join(self._results_directory, "results.html"))))
except (IOError, OSError), e:
# It might be possible that results.html was not generated in previous run, because the test
# run was interrupted even before testing started. In those cases, don't archive the folder.
# Simply override the current folder contents with new results.
import errno
if e.errno == errno.EEXIST or e.errno == errno.ENOENT:
self._printer.write_update("No results.html file found in previous run, skipping it.")
return None
archived_name = ''.join((self._filesystem.basename(self._results_directory), "_", timestamp))
archived_path = self._filesystem.join(self._filesystem.dirname(self._results_directory), archived_name)
self._filesystem.move(self._results_directory, archived_path)
def _delete_dirs(self, dir_list):
for dir in dir_list:
self._filesystem.rmtree(dir)
def _limit_archived_results_count(self):
results_directory_path = self._filesystem.dirname(self._results_directory)
file_list = self._filesystem.listdir(results_directory_path)
results_directories = []
for dir in file_list:
file_path = self._filesystem.join(results_directory_path, dir)
if self._filesystem.isdir(file_path) and self._results_directory in file_path:
results_directories.append(file_path)
results_directories.sort(key=lambda x: self._filesystem.mtime(x))
self._printer.write_update("Clobbering excess archived results in %s" % results_directory_path)
self._delete_dirs(results_directories[:-self.ARCHIVED_RESULTS_LIMIT])
def _set_up_run(self, test_names):
self._printer.write_update("Checking build ...")
if self._options.build:
exit_code = self._port.check_build(self.needs_servers(test_names), self._printer)
if exit_code:
_log.error("Build check failed")
return exit_code
# This must be started before we check the system dependencies,
# since the helper may do things to make the setup correct.
if self._options.pixel_tests:
self._printer.write_update("Starting pixel test helper ...")
self._port.start_helper()
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
self._printer.write_update("Checking system dependencies ...")
exit_code = self._port.check_sys_deps(self.needs_servers(test_names))
if exit_code:
self._port.stop_helper()
return exit_code
if self._options.clobber_old_results:
self._clobber_old_results()
elif self._filesystem.exists(self._results_directory):
self._limit_archived_results_count()
# Rename the existing results folder for archiving.
self._rename_results_folder()
# Create the output directory if it doesn't already exist.
self._port.host.filesystem.maybe_make_directory(self._results_directory)
self._port.setup_test_run()
return test_run_results.OK_EXIT_STATUS
def run(self, args):
"""Run the tests and return a RunDetails object with the results."""
start_time = time.time()
self._printer.write_update("Collecting tests ...")
running_all_tests = False
try:
paths, test_names, running_all_tests = self._collect_tests(args)
except IOError:
# This is raised if --test-list doesn't exist
return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
self._printer.write_update("Parsing expectations ...")
self._expectations = test_expectations.TestExpectations(self._port, test_names)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test.
if not tests_to_run:
_log.critical('No tests to run.')
return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
# Don't retry failures if an explicit list of tests was passed in.
if self._options.retry_failures is None:
should_retry_failures = len(paths) < len(test_names)
else:
should_retry_failures = self._options.retry_failures
enabled_pixel_tests_in_retry = False
try:
self._start_servers(tests_to_run)
num_workers = self._port.num_workers(int(self._options.child_processes))
initial_results = self._run_tests(
tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
num_workers)
# Don't retry failures when interrupted by user or failures limit exception.
should_retry_failures = should_retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
if should_retry_failures and tests_to_retry:
enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
for retry_attempt in xrange(1, self._options.num_retries + 1):
if not tests_to_retry:
break
_log.info('')
_log.info('Retrying %s, attempt %d of %d...' %
(grammar.pluralize('unexpected failure', len(tests_to_retry)),
retry_attempt, self._options.num_retries))
retry_results = self._run_tests(tests_to_retry,
tests_to_skip=set(),
repeat_each=1,
iterations=1,
num_workers=num_workers,
retry_attempt=retry_attempt)
all_retry_results.append(retry_results)
tests_to_retry = self._tests_to_retry(retry_results)
if enabled_pixel_tests_in_retry:
self._options.pixel_tests = False
finally:
self._stop_servers()
self._clean_up_run()
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
self._printer.write_update("looking for new crash logs")
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:
self._look_for_new_crash_logs(retry_attempt_results, start_time)
_log.debug("summarizing results")
summarized_full_results = test_run_results.summarize_results(
self._port, self._expectations, initial_results, all_retry_results,
enabled_pixel_tests_in_retry)
summarized_failing_results = test_run_results.summarize_results(
self._port, self._expectations, initial_results, all_retry_results,
enabled_pixel_tests_in_retry, only_include_failing=True)
exit_code = summarized_failing_results['num_regressions']
if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)' %
(exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS))
exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests)
if self._options.write_full_results_to:
self._filesystem.copyfile(self._filesystem.join(self._results_directory, "full_results.json"),
self._options.write_full_results_to)
self._upload_json_files()
results_path = self._filesystem.join(self._results_directory, "results.html")
self._copy_results_html_file(results_path)
if initial_results.keyboard_interrupted:
exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = test_run_results.EARLY_EXIT_STATUS
if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)):
self._port.show_results_html_file(results_path)
self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results)
self._check_for_stale_w3c_dir()
return test_run_results.RunDetails(
exit_code, summarized_full_results, summarized_failing_results,
initial_results, all_retry_results, enabled_pixel_tests_in_retry)
def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations,
num_workers, retry_attempt=0):
test_inputs = []
for _ in xrange(iterations):
for test in tests_to_run:
for _ in xrange(repeat_each):
test_inputs.append(self._test_input_for_file(test))
return self._runner.run_tests(self._expectations, test_inputs,
tests_to_skip, num_workers, retry_attempt)
def _start_servers(self, tests_to_run):
if self._port.is_wpt_enabled() and any(self._port.is_wpt_test(test) for test in tests_to_run):
self._printer.write_update('Starting WPTServe ...')
self._port.start_wptserve()
self._wptserve_started = True
if self._port.requires_http_server() or any((self._is_http_test(test) or self._is_inspector_test(test)) for test in tests_to_run):
self._printer.write_update('Starting HTTP server ...')
self._port.start_http_server(additional_dirs={}, number_of_drivers=self._options.max_locked_shards)
self._http_server_started = True
if any(self._is_websocket_test(test) for test in tests_to_run):
self._printer.write_update('Starting WebSocket server ...')
self._port.start_websocket_server()
self._websockets_server_started = True
def _stop_servers(self):
if self._wptserve_started:
self._printer.write_update('Stopping WPTServe ...')
self._wptserve_started = False
self._port.stop_wptserve()
if self._http_server_started:
self._printer.write_update('Stopping HTTP server ...')
self._http_server_started = False
self._port.stop_http_server()
if self._websockets_server_started:
self._printer.write_update('Stopping WebSocket server ...')
self._websockets_server_started = False
self._port.stop_websocket_server()
def _clean_up_run(self):
_log.debug("Flushing stdout")
sys.stdout.flush()
_log.debug("Flushing stderr")
sys.stderr.flush()
_log.debug("Stopping helper")
self._port.stop_helper()
_log.debug("Cleaning up port")
self._port.clean_up_test_run()
def _check_for_stale_w3c_dir(self):
# TODO(dpranke): Remove this check after 1/1/2015 and let people deal with the warnings.
# Remove the check in port/base.py as well.
fs = self._port.host.filesystem
layout_tests_dir = self._port.layout_tests_dir()
if fs.isdir(fs.join(layout_tests_dir, 'w3c')):
_log.warning('WARNING: You still have the old LayoutTests/w3c directory in your checkout. You should delete it!')
def _force_pixel_tests_if_needed(self):
if self._options.pixel_tests:
return False
_log.debug("Restarting helper")
self._port.stop_helper()
self._options.pixel_tests = True
self._port.start_helper()
return True
def _look_for_new_crash_logs(self, run_results, start_time):
"""Since crash logs can take a long time to be written out if the system is
under stress do a second pass at the end of the test run.
run_results: the results of the test run
start_time: time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
for test, result in run_results.unexpected_results_by_name.iteritems():
if (result.type != test_expectations.CRASH):
continue
for failure in result.failures:
if not isinstance(failure, test_failures.FailureCrash):
continue
if failure.has_log:
continue
crashed_processes.append([test, failure.process_name, failure.pid])
sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
if sample_files:
for test, sample_file in sample_files.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.copy_sample_file(sample_file)
crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
if crash_logs:
for test, crash_log in crash_logs.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.write_crash_log(crash_log)
def _clobber_old_results(self):
dir_above_results_path = self._filesystem.dirname(self._results_directory)
self._printer.write_update("Clobbering old results in %s" % dir_above_results_path)
if not self._filesystem.exists(dir_above_results_path):
return
file_list = self._filesystem.listdir(dir_above_results_path)
results_directories = []
for dir in file_list:
file_path = self._filesystem.join(dir_above_results_path, dir)
if self._filesystem.isdir(file_path) and self._results_directory in file_path:
results_directories.append(file_path)
self._delete_dirs(results_directories)
# Port specific clean-up.
self._port.clobber_old_port_specific_results()
def _tests_to_retry(self, run_results):
# TODO(ojan): This should also check that result.type != test_expectations.MISSING since retrying missing expectations is silly.
# But that's a bit tricky since we only consider the last retry attempt for the count of unexpected regressions.
return [result.test_name for result in run_results.unexpected_results_by_name.values() if result.type != test_expectations.PASS]
def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results, running_all_tests):
_log.debug("Writing JSON files in %s." % self._results_directory)
# FIXME: Upload stats.json to the server and delete times_ms.
times_trie = json_results_generator.test_timings_trie(initial_results.results_by_name.values())
times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
# Save out the times data so we can use it for --fastest in the future.
if running_all_tests:
bot_test_times_path = self._port.bot_test_times_path()
self._filesystem.maybe_make_directory(self._filesystem.dirname(bot_test_times_path))
json_results_generator.write_json(self._filesystem, times_trie, bot_test_times_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._results_directory, "stats.json")
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
json_results_generator.write_json(self._filesystem, summarized_full_results, full_results_path)
full_results_path = self._filesystem.join(self._results_directory, "failing_results.json")
# We write failing_results.json out as jsonp because we need to load it from a file url for results.html and Chromium doesn't allow that.
json_results_generator.write_json(self._filesystem, summarized_failing_results, full_results_path, callback="ADD_RESULTS")
_log.debug("Finished writing JSON files.")
def _upload_json_files(self):
if not self._options.test_results_server:
return
if not self._options.master_name:
_log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
return
_log.debug("Uploading JSON files for builder: %s", self._options.builder_name)
attrs = [("builder", self._options.builder_name),
("testtype", "layout-tests"),
("master", self._options.master_name)]
files = [(file, self._filesystem.join(self._results_directory, file)) for file in ["failing_results.json", "full_results.json", "times_ms.json"]]
url = "http://%s/testfile/upload" % self._options.test_results_server
# Set uploading timeout in case appengine server is having problems.
# 120 seconds are more than enough to upload test results.
uploader = FileUploader(url, 120)
try:
response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
if response:
if response.code == 200:
_log.debug("JSON uploaded.")
else:
_log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read()))
else:
_log.error("JSON upload failed; no response returned")
except Exception, err:
_log.error("Upload failed: %s" % err)
def _copy_results_html_file(self, destination_path):
base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
results_file = self._filesystem.join(base_dir, 'results.html')
# Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
# so make sure it exists before we try to copy it.
if self._filesystem.exists(results_file):
self._filesystem.copyfile(results_file, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
return int(worker_name.split('/')[1]) if worker_name else -1
stats = {}
for result in initial_results.results_by_name.values():
if result.type != test_expectations.SKIP:
stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
stats_trie = {}
for name, value in stats.iteritems():
json_results_generator.add_path_to_trie(name, value, stats_trie)
return stats_trie
| |
# -*- coding: utf-8 -*-
"""
Pdb debugger class.
Modified from the standard pdb.Pdb class to avoid including readline, so that
the command line completion of other programs which include this isn't
damaged.
In the future, this class will be expanded with improvements over the standard
pdb.
The code in this file is mainly lifted out of cmd.py in Python 2.2, with minor
changes. Licensing should therefore be under the standard Python terms. For
details on the PSF (Python Software Foundation) standard license, see:
http://www.python.org/2.2.3/license.html"""
#*****************************************************************************
#
# This file is licensed under the PSF license.
#
# Copyright (C) 2001 Python Software Foundation, www.python.org
# Copyright (C) 2005-2006 Fernando Perez. <fperez@colorado.edu>
#
#
#*****************************************************************************
from __future__ import print_function
import bdb
import functools
import inspect
import sys
import warnings
from IPython import get_ipython
from IPython.utils import PyColorize, ulinecache
from IPython.utils import coloransi, py3compat
from IPython.core.excolors import exception_colors
from IPython.testing.skipdoctest import skip_doctest
prompt = 'ipdb> '
#We have to check this directly from sys.argv, config struct not yet available
from pdb import Pdb as OldPdb
# Allow the set_trace code to operate outside of an ipython instance, even if
# it does so with some limitations. The rest of this support is implemented in
# the Tracer constructor.
def make_arrow(pad):
"""generate the leading arrow in front of traceback or debugger"""
if pad >= 2:
return '-'*(pad-2) + '> '
elif pad == 1:
return '>'
return ''
def BdbQuit_excepthook(et, ev, tb, excepthook=None):
"""Exception hook which handles `BdbQuit` exceptions.
All other exceptions are processed using the `excepthook`
parameter.
"""
warnings.warn("`BdbQuit_excepthook` is deprecated since version 5.1",
DeprecationWarning)
if et==bdb.BdbQuit:
print('Exiting Debugger.')
elif excepthook is not None:
excepthook(et, ev, tb)
else:
# Backwards compatibility. Raise deprecation warning?
BdbQuit_excepthook.excepthook_ori(et,ev,tb)
def BdbQuit_IPython_excepthook(self,et,ev,tb,tb_offset=None):
warnings.warn(
"`BdbQuit_IPython_excepthook` is deprecated since version 5.1",
DeprecationWarning)
print('Exiting Debugger.')
class Tracer(object):
"""
DEPRECATED
Class for local debugging, similar to pdb.set_trace.
Instances of this class, when called, behave like pdb.set_trace, but
providing IPython's enhanced capabilities.
This is implemented as a class which must be initialized in your own code
and not as a standalone function because we need to detect at runtime
whether IPython is already active or not. That detection is done in the
constructor, ensuring that this code plays nicely with a running IPython,
while functioning acceptably (though with limitations) if outside of it.
"""
@skip_doctest
def __init__(self, colors=None):
"""
DEPRECATED
Create a local debugger instance.
Parameters
----------
colors : str, optional
The name of the color scheme to use, it must be one of IPython's
valid color schemes. If not given, the function will default to
the current IPython scheme when running inside IPython, and to
'NoColor' otherwise.
Examples
--------
::
from IPython.core.debugger import Tracer; debug_here = Tracer()
Later in your code::
debug_here() # -> will open up the debugger at that point.
Once the debugger activates, you can use all of its regular commands to
step through code, set breakpoints, etc. See the pdb documentation
from the Python standard library for usage details.
"""
warnings.warn("`Tracer` is deprecated since version 5.1, directly use "
"`IPython.core.debugger.Pdb.set_trace()`",
DeprecationWarning)
ip = get_ipython()
if ip is None:
# Outside of ipython, we set our own exception hook manually
sys.excepthook = functools.partial(BdbQuit_excepthook,
excepthook=sys.excepthook)
def_colors = 'NoColor'
else:
# In ipython, we use its custom exception handler mechanism
def_colors = ip.colors
ip.set_custom_exc((bdb.BdbQuit,), BdbQuit_IPython_excepthook)
if colors is None:
colors = def_colors
# The stdlib debugger internally uses a modified repr from the `repr`
# module, that limits the length of printed strings to a hardcoded
# limit of 30 characters. That much trimming is too aggressive, let's
# at least raise that limit to 80 chars, which should be enough for
# most interactive uses.
try:
try:
from reprlib import aRepr # Py 3
except ImportError:
from repr import aRepr # Py 2
aRepr.maxstring = 80
except:
# This is only a user-facing convenience, so any error we encounter
# here can be warned about but can be otherwise ignored. These
# printouts will tell us about problems if this API changes
import traceback
traceback.print_exc()
self.debugger = Pdb(colors)
def __call__(self):
"""Starts an interactive debugger at the point where called.
This is similar to the pdb.set_trace() function from the std lib, but
using IPython's enhanced debugger."""
self.debugger.set_trace(sys._getframe().f_back)
def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
"""Make new_fn have old_fn's doc string. This is particularly useful
for the ``do_...`` commands that hook into the help system.
Adapted from from a comp.lang.python posting
by Duncan Booth."""
def wrapper(*args, **kw):
return new_fn(*args, **kw)
if old_fn.__doc__:
wrapper.__doc__ = old_fn.__doc__ + additional_text
return wrapper
def _file_lines(fname):
"""Return the contents of a named file as a list of lines.
This function never raises an IOError exception: if the file can't be
read, it simply returns an empty list."""
try:
outfile = open(fname)
except IOError:
return []
else:
out = outfile.readlines()
outfile.close()
return out
class Pdb(OldPdb, object):
"""Modified Pdb class, does not load readline.
for a standalone version that uses prompt_toolkit, see
`IPython.terminal.debugger.TerminalPdb` and
`IPython.terminal.debugger.set_trace()`
"""
def __init__(self, color_scheme=None, completekey=None,
stdin=None, stdout=None, context=5):
# Parent constructor:
try:
self.context = int(context)
if self.context <= 0:
raise ValueError("Context must be a positive integer")
except (TypeError, ValueError):
raise ValueError("Context must be a positive integer")
OldPdb.__init__(self, completekey, stdin, stdout)
# IPython changes...
self.shell = get_ipython()
if self.shell is None:
# No IPython instance running, we must create one
from IPython.terminal.interactiveshell import \
TerminalInteractiveShell
self.shell = TerminalInteractiveShell.instance()
if color_scheme is not None:
warnings.warn(
"The `color_scheme` argument is deprecated since version 5.1",
DeprecationWarning)
else:
color_scheme = self.shell.colors
self.aliases = {}
# Create color table: we copy the default one from the traceback
# module and add a few attributes needed for debugging
self.color_scheme_table = exception_colors()
# shorthands
C = coloransi.TermColors
cst = self.color_scheme_table
cst['NoColor'].colors.prompt = C.NoColor
cst['NoColor'].colors.breakpoint_enabled = C.NoColor
cst['NoColor'].colors.breakpoint_disabled = C.NoColor
cst['Linux'].colors.prompt = C.Green
cst['Linux'].colors.breakpoint_enabled = C.LightRed
cst['Linux'].colors.breakpoint_disabled = C.Red
cst['LightBG'].colors.prompt = C.Blue
cst['LightBG'].colors.breakpoint_enabled = C.LightRed
cst['LightBG'].colors.breakpoint_disabled = C.Red
cst['Neutral'].colors.prompt = C.Blue
cst['Neutral'].colors.breakpoint_enabled = C.LightRed
cst['Neutral'].colors.breakpoint_disabled = C.Red
self.set_colors(color_scheme)
# Add a python parser so we can syntax highlight source while
# debugging.
self.parser = PyColorize.Parser()
# Set the prompt - the default prompt is '(Pdb)'
self.prompt = prompt
def set_colors(self, scheme):
"""Shorthand access to the color table scheme selector method."""
self.color_scheme_table.set_active_scheme(scheme)
def trace_dispatch(self, frame, event, arg):
try:
return super(Pdb, self).trace_dispatch(frame, event, arg)
except bdb.BdbQuit:
pass
def interaction(self, frame, traceback):
try:
OldPdb.interaction(self, frame, traceback)
except KeyboardInterrupt:
sys.stdout.write('\n' + self.shell.get_exception_only())
def parseline(self, line):
if line.startswith("!!"):
# Force standard behavior.
return super(Pdb, self).parseline(line[2:])
# "Smart command mode" from pdb++: don't execute commands if a variable
# with the same name exists.
cmd, arg, newline = super(Pdb, self).parseline(line)
# Fix for #9611: Do not trigger smart command if the command is `exit`
# or `quit` and it would resolve to their *global* value (the
# `ExitAutocall` object). Just checking that it is not present in the
# locals dict is not enough as locals and globals match at the
# toplevel.
if ((cmd in self.curframe.f_locals or cmd in self.curframe.f_globals)
and not (cmd in ["exit", "quit"]
and (self.curframe.f_locals is self.curframe.f_globals
or cmd not in self.curframe.f_locals))):
return super(Pdb, self).parseline("!" + line)
return super(Pdb, self).parseline(line)
def new_do_up(self, arg):
OldPdb.do_up(self, arg)
do_u = do_up = decorate_fn_with_doc(new_do_up, OldPdb.do_up)
def new_do_down(self, arg):
OldPdb.do_down(self, arg)
do_d = do_down = decorate_fn_with_doc(new_do_down, OldPdb.do_down)
def new_do_frame(self, arg):
OldPdb.do_frame(self, arg)
def new_do_quit(self, arg):
if hasattr(self, 'old_all_completions'):
self.shell.Completer.all_completions=self.old_all_completions
return OldPdb.do_quit(self, arg)
do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
def new_do_restart(self, arg):
"""Restart command. In the context of ipython this is exactly the same
thing as 'quit'."""
self.msg("Restart doesn't make sense here. Using 'quit' instead.")
return self.do_quit(arg)
def print_stack_trace(self, context=None):
if context is None:
context = self.context
try:
context=int(context)
if context <= 0:
raise ValueError("Context must be a positive integer")
except (TypeError, ValueError):
raise ValueError("Context must be a positive integer")
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno, context=context)
except KeyboardInterrupt:
pass
def print_stack_entry(self,frame_lineno, prompt_prefix='\n-> ',
context=None):
if context is None:
context = self.context
try:
context=int(context)
if context <= 0:
raise ValueError("Context must be a positive integer")
except (TypeError, ValueError):
raise ValueError("Context must be a positive integer")
print(self.format_stack_entry(frame_lineno, '', context))
# vds: >>
frame, lineno = frame_lineno
filename = frame.f_code.co_filename
self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
# vds: <<
def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
if context is None:
context = self.context
try:
context=int(context)
if context <= 0:
print("Context must be a positive integer")
except (TypeError, ValueError):
print("Context must be a positive integer")
try:
import reprlib # Py 3
except ImportError:
import repr as reprlib # Py 2
ret = []
Colors = self.color_scheme_table.active_colors
ColorsNormal = Colors.Normal
tpl_link = u'%s%%s%s' % (Colors.filenameEm, ColorsNormal)
tpl_call = u'%s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal)
tpl_line = u'%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
tpl_line_em = u'%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line,
ColorsNormal)
frame, lineno = frame_lineno
return_value = ''
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
#return_value += '->'
return_value += reprlib.repr(rv) + '\n'
ret.append(return_value)
#s = filename + '(' + `lineno` + ')'
filename = self.canonic(frame.f_code.co_filename)
link = tpl_link % py3compat.cast_unicode(filename)
if frame.f_code.co_name:
func = frame.f_code.co_name
else:
func = "<lambda>"
call = ''
if func != '?':
if '__args__' in frame.f_locals:
args = reprlib.repr(frame.f_locals['__args__'])
else:
args = '()'
call = tpl_call % (func, args)
# The level info should be generated in the same format pdb uses, to
# avoid breaking the pdbtrack functionality of python-mode in *emacs.
if frame is self.curframe:
ret.append('> ')
else:
ret.append(' ')
ret.append(u'%s(%s)%s\n' % (link,lineno,call))
start = lineno - 1 - context//2
lines = ulinecache.getlines(filename)
start = min(start, len(lines) - context)
start = max(start, 0)
lines = lines[start : start + context]
for i,line in enumerate(lines):
show_arrow = (start + 1 + i == lineno)
linetpl = (frame is self.curframe or show_arrow) \
and tpl_line_em \
or tpl_line
ret.append(self.__format_line(linetpl, filename,
start + 1 + i, line,
arrow = show_arrow) )
return ''.join(ret)
def __format_line(self, tpl_line, filename, lineno, line, arrow = False):
bp_mark = ""
bp_mark_color = ""
scheme = self.color_scheme_table.active_scheme_name
new_line, err = self.parser.format2(line, 'str', scheme)
if not err: line = new_line
bp = None
if lineno in self.get_file_breaks(filename):
bps = self.get_breaks(filename, lineno)
bp = bps[-1]
if bp:
Colors = self.color_scheme_table.active_colors
bp_mark = str(bp.number)
bp_mark_color = Colors.breakpoint_enabled
if not bp.enabled:
bp_mark_color = Colors.breakpoint_disabled
numbers_width = 7
if arrow:
# This is the line with the error
pad = numbers_width - len(str(lineno)) - len(bp_mark)
num = '%s%s' % (make_arrow(pad), str(lineno))
else:
num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
return tpl_line % (bp_mark_color + bp_mark, num, line)
def print_list_lines(self, filename, first, last):
"""The printing (as opposed to the parsing part of a 'list'
command."""
try:
Colors = self.color_scheme_table.active_colors
ColorsNormal = Colors.Normal
tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
src = []
if filename == "<string>" and hasattr(self, "_exec_filename"):
filename = self._exec_filename
for lineno in range(first, last+1):
line = ulinecache.getline(filename, lineno)
if not line:
break
if lineno == self.curframe.f_lineno:
line = self.__format_line(tpl_line_em, filename, lineno, line, arrow = True)
else:
line = self.__format_line(tpl_line, filename, lineno, line, arrow = False)
src.append(line)
self.lineno = lineno
print(''.join(src))
except KeyboardInterrupt:
pass
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print('*** Error in argument:', repr(arg))
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
self.print_list_lines(self.curframe.f_code.co_filename, first, last)
# vds: >>
lineno = first
filename = self.curframe.f_code.co_filename
self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
# vds: <<
do_l = do_list
def getsourcelines(self, obj):
lines, lineno = inspect.findsource(obj)
if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
# must be a module frame: do not try to cut a block out of it
return lines, 1
elif inspect.ismodule(obj):
return lines, 1
return inspect.getblock(lines[lineno:]), lineno+1
def do_longlist(self, arg):
self.lastcmd = 'longlist'
try:
lines, lineno = self.getsourcelines(self.curframe)
except OSError as err:
self.error(err)
return
last = lineno + len(lines)
self.print_list_lines(self.curframe.f_code.co_filename, lineno, last)
do_ll = do_longlist
def do_pdef(self, arg):
"""Print the call signature for any callable object.
The debugger interface to %pdef"""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pdef')(arg, namespaces=namespaces)
def do_pdoc(self, arg):
"""Print the docstring for an object.
The debugger interface to %pdoc."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pdoc')(arg, namespaces=namespaces)
def do_pfile(self, arg):
"""Print (or run through pager) the file where an object is defined.
The debugger interface to %pfile.
"""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pfile')(arg, namespaces=namespaces)
def do_pinfo(self, arg):
"""Provide detailed information about an object.
The debugger interface to %pinfo, i.e., obj?."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pinfo')(arg, namespaces=namespaces)
def do_pinfo2(self, arg):
"""Provide extra detailed information about an object.
The debugger interface to %pinfo2, i.e., obj??."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('pinfo2')(arg, namespaces=namespaces)
def do_psource(self, arg):
"""Print (or run through pager) the source code for an object."""
namespaces = [('Locals', self.curframe.f_locals),
('Globals', self.curframe.f_globals)]
self.shell.find_line_magic('psource')(arg, namespaces=namespaces)
if sys.version_info > (3, ):
def do_where(self, arg):
"""w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command.
Take a number as argument as an (optional) number of context line to
print"""
if arg:
context = int(arg)
self.print_stack_trace(context)
else:
self.print_stack_trace()
do_w = do_where
| |
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2013,2014,2015,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import argparse
from collections import defaultdict
try:
import ms.version
except ImportError:
pass
else:
ms.version.addpkg('lxml', '3.2.5')
from lxml import etree
was_error = False
def error(message):
global was_error
print("ERROR: %s" % message, file=sys.stderr)
was_error = True
def read_docbook(file, commands):
cmd, xml = os.path.splitext(os.path.basename(file))
try:
tree = etree.parse(file)
except etree.XMLSyntaxError as err:
error("Failed to parse %s: %s" % (file, err))
return
tree.xinclude()
# This code is somewhat depends on the style the existing documentation is
# written. If the style changes considerably, then the code may need to be
# adapted.
ns = "http://docbook.org/ns/docbook"
refsynopsisdiv = tree.find("{%s}refsynopsisdiv" % ns)
if refsynopsisdiv is None:
error("No refsynopsisdiv in %s. Maybe namespace is wrong?" % file)
commands[cmd] = defaultdict(dict)
# Collect all options mentioned in the synopsis
# TODO: use xpath?
for section in refsynopsisdiv.findall("{%s}cmdsynopsis" % ns):
for option in section.getiterator("{%s}option" % ns):
optname = option.text.strip()
commands[cmd][optname]["docbook"] = True
if option.findall("{%s}replaceable" % ns):
commands[cmd][optname]["synopsis_has_arg"] = True
# Collect all options described in a refsect1 titled "Options"
# TODO: use xpath?
for section in tree.findall("{%s}refsect1" % ns):
title = section.find("{%s}title" % ns)
if title.text.strip().lower() != "options":
continue
# Look for varlistentry/term/option
# TODO: use xpath?
for varentry in section.getiterator("{%s}varlistentry" % ns):
term = varentry.find("{%s}term" % ns)
for option in term.getiterator("{%s}option" % ns):
optname = option.text.strip()
commands[cmd][optname]["body"] = True
if option.findall("{%s}replaceable" % ns):
commands[cmd][optname]["body_has_arg"] = True
def process_input_xml(file, commands, default_options):
tree = etree.parse(file)
for option in tree.xpath("command[@name='*']//option"):
default_options["--" + option.attrib["name"]] = True
if "reverse" in option.attrib:
default_options["--" + option.attrib["reverse"]] = True
elif option.attrib["type"] == "boolean":
default_options["--no" + option.attrib["name"]] = True
if "short" in option.attrib:
default_options["-" + option.attrib["short"]] = True
for section in tree.findall("command"):
cmd = section.attrib["name"]
if cmd == '*':
# Definition of the global options, skip
continue
if cmd not in commands:
# No DocBook documentation, skip
continue
for option in section.xpath(".//option"):
optnames = ["--" + option.attrib["name"]]
if "reverse" in option.attrib:
optnames.append("--" + option.attrib["reverse"])
elif option.attrib["type"] == "boolean":
optnames.append("--no" + option.attrib["name"])
for optname in optnames:
commands[cmd][optname]["inputxml"] = True
if option.attrib["type"] != "flag" and \
option.attrib["type"] != "boolean":
commands[cmd][optname]["inputxml_has_arg"] = True
# TODO: merge default options into the command-specific options? That
# would mean a bit more code here, but less special casing in
# check_errors()
def check_errors(commands, default_options):
for cmd in sorted(commands):
for option, flags in sorted(commands[cmd].items()):
if "docbook" not in flags and "body" in flags:
error("Command %s, option %s is documented but not mentioned "
"in the synopsis." % (cmd, option))
if "docbook" in flags and "body" not in flags:
error("Command %s, option %s is mentioned in the synopsis, "
"but not described in the body." % (cmd, option))
if "inputxml" in flags and "docbook" not in flags:
error("Command %s, option %s is not documented." %
(cmd, option))
if "inputxml" not in flags and \
("docbook" in flags or "body" in flags) and \
option not in default_options:
error("Command %s, option %s is documented, but does "
"not exist." % (cmd, option))
if "synopsis_has_arg" in flags and "body_has_arg" not in flags:
error("Command %s, option %s has an argument in the synopsis, "
"but not in the description." % (cmd, option))
if "synopsis_has_arg" not in flags and "body_has_arg" in flags:
error("Command %s, option %s has an argument in the "
"description, but not in the synopsis." % (cmd, option))
if "inputxml_has_arg" in flags and "synopsis_has_arg" not in flags \
and option not in default_options:
error("Command %s, option %s has an argument in input.xml, "
"but not in the documentation." % (cmd, option))
if "inputxml_has_arg" not in flags and "synopsis_has_arg" in flags \
and option not in default_options:
error("Command %s, option %s has an argument in the "
"synposis, but not in input.xml." % (cmd, option))
def main():
_DIR = os.path.dirname(os.path.realpath(__file__))
_ETCDIR = os.path.join(_DIR, "..", "etc")
_DOCDIR = os.path.join(_DIR, "commands")
parser = argparse.ArgumentParser(description='Documentation checker')
parser.add_argument("--input_xml", default=os.path.join(_ETCDIR,
"input.xml"),
help="Location of input.xml")
parser.add_argument("--command_dir", default=_DOCDIR,
help="Directory containing the command documentations")
args = parser.parse_args()
commands = defaultdict(dict)
for file in os.listdir(args.command_dir):
if not file.endswith(".xml"):
continue
read_docbook(os.path.join(args.command_dir, file), commands)
default_options = {}
process_input_xml(args.input_xml, commands, default_options)
check_errors(commands, default_options)
if was_error:
raise SystemExit(1)
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
"""
@package mi.dataset.parser
@file marine-integrations/mi/dataset/parser/ctdpf_j_cspp.py
@author Joe Padula
@brief Parser for the ctdpf_j_cspp dataset driver
Release notes:
Modified by Chris Goodrich, December 2014 to remove state stuff
Initial Release
"""
__author__ = 'Joe Padula'
__license__ = 'Apache 2.0'
import numpy
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.data_particle import DataParticle
from mi.core.exceptions import RecoverableSampleException
from mi.dataset.parser.common_regexes import END_OF_LINE_REGEX, \
FLOAT_REGEX, MULTIPLE_TAB_REGEX
from mi.dataset.parser.cspp_base import CsppParser, \
Y_OR_N_REGEX, \
CsppMetadataDataParticle, \
MetadataRawDataKey, \
encode_y_or_n
# regex for the data record
DATA_REGEX = r'(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Profiler Timestamp
DATA_REGEX += '(' + Y_OR_N_REGEX + ')' + MULTIPLE_TAB_REGEX # Suspect Timestamp
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Temperature
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Conductivity
DATA_REGEX += '(' + FLOAT_REGEX + ')' + MULTIPLE_TAB_REGEX # Pressure
DATA_REGEX += '(' + FLOAT_REGEX + ')' + END_OF_LINE_REGEX # Salinity
class DataMatchesGroupNumber(BaseEnum):
"""
An enum for group match indices for a data record chunk.
Used to access the match groups in the particle raw data
"""
PROFILER_TIMESTAMP = 1
SUSPECT_TIMESTAMP = 2
TEMPERATURE = 3
CONDUCTIVITY = 4
PRESSURE = 5
SALINITY = 6
class DataParticleType(BaseEnum):
"""
The data particle types that this parser can generate
"""
METADATA_RECOVERED = 'ctdpf_j_cspp_metadata_recovered'
INSTRUMENT_RECOVERED = 'ctdpf_j_cspp_instrument_recovered'
METADATA_TELEMETERED = 'ctdpf_j_cspp_metadata'
INSTRUMENT_TELEMETERED = 'ctdpf_j_cspp_instrument'
class CtdpfJCsppParserDataParticleKey(BaseEnum):
"""
The data particle keys associated with ctdpf_j_cspp data instrument particle parameters
"""
PROFILER_TIMESTAMP = 'profiler_timestamp'
SUSPECT_TIMESTAMP = 'suspect_timestamp'
TEMPERATURE = 'temperature'
CONDUCTIVITY = 'conductivity'
PRESSURE = 'pressure'
SALINITY = 'salinity'
class CtdpfJCsppMetadataDataParticle(CsppMetadataDataParticle):
"""
Base Class for building a ctdpf_j_cspp metadata particle
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
@throws RecoverableSampleException If there is a problem with sample creation
"""
results = []
try:
# Append the base metadata parsed values to the results to return
results += self._build_metadata_parsed_values()
data_match = self.raw_data[MetadataRawDataKey.DATA_MATCH]
# Set the internal timestamp
internal_timestamp_unix = numpy.float(data_match.group(
DataMatchesGroupNumber.PROFILER_TIMESTAMP))
self.set_internal_timestamp(unix_time=internal_timestamp_unix)
except (ValueError, TypeError, IndexError) as ex:
log.warn("Exception when building parsed values")
raise RecoverableSampleException(
"Error (%s) while decoding parameters in data: [%s]"
% (ex, self.raw_data))
return results
class CtdpfJCsppMetadataRecoveredDataParticle(CtdpfJCsppMetadataDataParticle):
"""
Class for building a ctdpf_j_cspp recovered metadata particle
"""
_data_particle_type = DataParticleType.METADATA_RECOVERED
class CtdpfJCsppMetadataTelemeteredDataParticle(CtdpfJCsppMetadataDataParticle):
"""
Class for building a ctdpf_j_cspp telemetered metadata particle
"""
_data_particle_type = DataParticleType.METADATA_TELEMETERED
class CtdpfJCsppInstrumentDataParticle(DataParticle):
"""
Base Class for building a ctdpf_j_cspp instrument data particle
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
an array of dictionaries defining the data in the particle
with the appropriate tag.
@throws RecoverableSampleException If there is a problem with sample creation
"""
results = []
try:
results.append(self._encode_value(CtdpfJCsppParserDataParticleKey.PROFILER_TIMESTAMP,
self.raw_data.group(DataMatchesGroupNumber.PROFILER_TIMESTAMP),
numpy.float))
results.append(self._encode_value(CtdpfJCsppParserDataParticleKey.SUSPECT_TIMESTAMP,
self.raw_data.group(DataMatchesGroupNumber.SUSPECT_TIMESTAMP),
encode_y_or_n))
results.append(self._encode_value(CtdpfJCsppParserDataParticleKey.TEMPERATURE,
self.raw_data.group(DataMatchesGroupNumber.TEMPERATURE),
float))
results.append(self._encode_value(CtdpfJCsppParserDataParticleKey.CONDUCTIVITY,
self.raw_data.group(DataMatchesGroupNumber.CONDUCTIVITY),
float))
results.append(self._encode_value(CtdpfJCsppParserDataParticleKey.PRESSURE,
self.raw_data.group(DataMatchesGroupNumber.PRESSURE),
float))
results.append(self._encode_value(CtdpfJCsppParserDataParticleKey.SALINITY,
self.raw_data.group(DataMatchesGroupNumber.SALINITY),
float))
# Set the internal timestamp
internal_timestamp_unix = numpy.float(self.raw_data.group(
DataMatchesGroupNumber.PROFILER_TIMESTAMP))
self.set_internal_timestamp(unix_time=internal_timestamp_unix)
except (ValueError, TypeError, IndexError) as ex:
log.warn("Exception when building parsed values")
raise RecoverableSampleException(
"Error (%s) while decoding parameters in data: [%s]"
% (ex, self.raw_data))
return results
class CtdpfJCsppInstrumentRecoveredDataParticle(CtdpfJCsppInstrumentDataParticle):
"""
Class for building a ctdpf_j_cspp recovered instrument data particle
"""
_data_particle_type = DataParticleType.INSTRUMENT_RECOVERED
class CtdpfJCsppInstrumentTelemeteredDataParticle(CtdpfJCsppInstrumentDataParticle):
"""
Class for building a ctdpf_j_cspp telemetered instrument data particle
"""
_data_particle_type = DataParticleType.INSTRUMENT_TELEMETERED
class CtdpfJCsppParser(CsppParser):
def __init__(self,
config,
stream_handle,
exception_callback):
"""
This method is a constructor that will instantiate an CtdpfJCsppParser object.
@param config The configuration for this CtdpfJCsppParser parser
@param stream_handle The handle to the data stream containing the ctdpf_j_cspp data
@param exception_callback The function to call to report exceptions
"""
# Call the superclass constructor
super(CtdpfJCsppParser, self).__init__(config,
stream_handle,
exception_callback,
DATA_REGEX,
ignore_matcher=None)
| |
#!/usr/bin/python
#
# Copyright 2016 Michael Sparks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from __future__ import absolute_import
import re
import ply
# ------------------------------------------------------------
import ply.lex as lex
states = (
('NORMAL', 'exclusive'), # Normal state for parsing
('BLOCKS', 'exclusive'), # Used for checking block structure - this is also our INITIAL state
('ENDBLOCKS', 'exclusive'), # Used for closing block structure
)
tabsize = 8
keywords = [ "and", "not", "or",
"True", "False",
"class", "def", "yield", "return",
"while", "for", "in", "if", "elif", "else", "break", "continue",
"from", "import",
"pass",
# "print" # DISABLED, due to removal of print statement in favour of print function
# # DISABLED, LEFT IN CODE TO ASSIST WITH yield implementation later.
]
tokens = [
'NUMBER',
'INTEGER',
'FLOAT',
'HEX',
'OCTAL',
'BINARY',
'STRING',
'IDENTIFIER',
'BOOLEAN',
'CHARACTER',
'UNSIGNEDLONG',
'SIGNEDLONG'
]
punctuation = [ "COMMA", "PARENL", "PARENR", "COLON", "TIMES", "DIVIDE", "PLUS", "MINUS", "POWER", "DOT",
"COMPARISON_OPERATOR",
"ASSIGN"
]
structural = [ "EOL", "INDENT", "DEDENT" ]
tokens += [ x.upper() for x in keywords if (x not in [ "True", "False" ])]
tokens += punctuation
tokens += structural
## Regular expression rules for simple cases
t_NORMAL_COLON = r':'
t_NORMAL_PLUS = r'\+'
t_NORMAL_MINUS = r'-'
t_NORMAL_TIMES = r'\*'
t_NORMAL_DIVIDE = r'/'
t_NORMAL_POWER = r'\*\*'
t_NORMAL_PARENL = r'\('
t_NORMAL_PARENR = r'\)'
t_NORMAL_ASSIGN = r'='
t_NORMAL_COMMA = r','
t_NORMAL_COMPARISON_OPERATOR = r'(<>|==|>=|<=|!=|<|>|in|not +in|is|is +not)'
# Things ignored inside various states
t_NORMAL_ignore = ' \t'
t_INITIAL_BLOCKS_ignore = ''
t_ENDBLOCKS_ignore = ''
def t_NORMAL_SCHARACTER(t):
r"c'([^\\']|(\\.))'"
t.value = t.value[2:-1]
t.value = t.value.replace('\\\'', '\'')
t.value = t.value.replace('\\\"', '\"')
t.type = "CHARACTER"
return t
def t_NORMAL_DCHARACTER(t):
r'c"([^\\"]|(\\.))"'
t.value = t.value[2:-1]
t.value = t.value.replace('\\\'', '\'')
t.value = t.value.replace('\\\"', '\"')
t.type = "CHARACTER"
return t
def t_NORMAL_UNSIGNEDLONG(t):
r'\d+l'
t.value = int(t.value[:-1])
return t
def t_NORMAL_SIGNEDLONG(t):
r'\d+L'
t.value = int(t.value[:-1])
return t
def t_NORMAL_IDENTIFIER(t):
r'[a-zA-Z_][a-zA-Z0-9_]*'
if t.value in keywords:
if t.value in ["True", "False"]:
t.type = "BOOLEAN"
t.value = True if t.value == "True" else False
else:
# Other keywords converted to token types
t.type = t.value.upper()
return t
def t_NORMAL_SQUOTESTRING(t):
r"'([^\\']|(\\.))*'"
t.value = t.value[1:-1]
t.value = t.value.replace('\\\'', '\'')
t.type = "STRING"
return t
def t_NORMAL_DQUOTESTRING(t):
r'"([^\\"]|(\\.))*"'
t.value = t.value[1:-1]
t.value = t.value.replace('\\\"', '\"')
t.type = "STRING"
return t
# A regular expression rule with some action code
def t_NORMAL_BINARY(t):
r'0b\d+'
t.value = int(t.value,2)
return t
def t_NORMAL_OCTAL(t):
r'0o\d+'
t.value = int(t.value,8)
return t
def t_NORMAL_HEX(t):
r'0x([abcdef]|\d)+'
t.value = int(t.value,16)
return t
def t_NORMAL_FLOAT(t):
r'\d+\.\d+'
t.value = float(t.value)
return t
t_NORMAL_DOT = r'\.'
def t_NORMAL_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
# Define a rule so we can track line numbers
def t_INITIAL_NORMAL_ENDBLOCKS_EOL(t):
r'\n+'
t.lexer.curr_indent = 0
t.lexer.lineno += len(t.value)
t.lexer.begin('BLOCKS') # We are always in BLOCKS state if we reach EOL
return t
def t_BLOCKS_EOL(t):
r'\n+'
t.lexer.curr_indent = 0
t.lexer.lineno += len(t.value)
return t
def t_NORMAL_INCLUDELINE(t):
r'\#include.*'
print("WE SAW A #INCLUDE line! :-)")
print("It was this:", repr(t.value))
t.lexer.includes.append(t.value)
def t_INITIAL_BLOCKS_WS(t):
r'[ \t]+'
#
# All leading whitepace on a line.
#
# This allows us to get the indent size. We don't emit any indent or dedent here though
#
count = 0
for char in t.value:
if char == " ":
count += 1
if char == "\t":
count += tabsize
t.lexer.curr_indent = count
def t_INITIAL_BLOCKS_INDENT(t):
r'[^ \t\n]'
#
# Trigger on first non-whitespace character on the line
#
# Put it back in the lexer, because we don't want to consume it.
t.lexer.lexpos -= 1
# Decide whether to switch to ENDBLOCKS or NORMAL mode
curr_indent = t.lexer.curr_indent
dedents_needed = 0
while t.lexer.indents[-1] > curr_indent:
t.lexer.indents.pop()
dedents_needed += 1
if dedents_needed > 0:
t.lexer.dedents_needed = dedents_needed
t.lexer.begin('ENDBLOCKS')
return
# Not closing a block, so parsing inside a block
# If it's a new one, add it to the "lexer.indents" stack
if curr_indent > t.lexer.indents[-1]:
t.lexer.indents.append(t.lexer.curr_indent)
print("EMITTING INDENT", t)
return t
t.lexer.begin('NORMAL')
def t_ENDBLOCKS_DEDENT(t):
r'.'
# We use the lexer to re-call this function as many times as we need to
# emit a DEDENT token
#
# We do this by decrementing our counter, and pushing back the token that
# brought us here
# When the counter reaches 0, we switch to the NORMAL state.
#
t.lexer.lexpos -= 1
# This allows us to emit as many DEDENT tokens as necessary.
if t.lexer.dedents_needed > 0:
t.lexer.dedents_needed -= 1
print("EMITTING DEDENT", t)
return t
t.lexer.begin('NORMAL')
# Error handling rule
def t_ANY_error(t):
print("Illegal character '%s'" % t.value[0], t)
t.lexer.skip(1)
def build_lexer():
# Build the lexer
lexer = lex.lex(reflags=re.MULTILINE)
lexer.includes = []
lexer.lineno = 1
lexer.curr_indent = 0
lexer.indents = [0]
return lexer
# lexer = build_lexer()
if __name__ == "__main__":
lexer = build_lexer()
# Test it out
data = '''\
and not or
True False
class def yield return
while for in if elif else break continue
from import
pass
print
first = 1
second = 2
third = 3
if 1:
print first, second, third
print 1, 2, "hello"
print 1, 2.1, 0x20, 0b10101, 0100, True, False, "hello"
print -1, -2.1, -0x20, -0b10101, -0100, True, False, "hello"
'''
# Give the lexer some input
lexer.input(data)
# Tokenize
while True:
tok = lexer.token()
if not tok:
break # No more input
print(tok)
| |
""" Tools for doing common subexpression elimination.
"""
from __future__ import print_function, division
from sympy.core import Basic, Mul, Add, Pow, sympify, Symbol, Tuple
from sympy.core.singleton import S
from sympy.core.function import _coeff_isneg
from sympy.core.exprtools import factor_terms
from sympy.core.compatibility import iterable, range
from sympy.utilities.iterables import filter_symbols, \
numbered_symbols, sift, topological_sort, ordered
from . import cse_opts
# (preprocessor, postprocessor) pairs which are commonly useful. They should
# each take a sympy expression and return a possibly transformed expression.
# When used in the function ``cse()``, the target expressions will be transformed
# by each of the preprocessor functions in order. After the common
# subexpressions are eliminated, each resulting expression will have the
# postprocessor functions transform them in *reverse* order in order to undo the
# transformation if necessary. This allows the algorithm to operate on
# a representation of the expressions that allows for more optimization
# opportunities.
# ``None`` can be used to specify no transformation for either the preprocessor or
# postprocessor.
basic_optimizations = [(cse_opts.sub_pre, cse_opts.sub_post),
(factor_terms, None)]
# sometimes we want the output in a different format; non-trivial
# transformations can be put here for users
# ===============================================================
def reps_toposort(r):
"""Sort replacements `r` so (k1, v1) appears before (k2, v2)
if k2 is in v1's free symbols. This orders items in the
way that cse returns its results (hence, in order to use the
replacements in a substitution option it would make sense
to reverse the order).
Examples
========
>>> from sympy.simplify.cse_main import reps_toposort
>>> from sympy.abc import x, y
>>> from sympy import Eq
>>> for l, r in reps_toposort([(x, y + 1), (y, 2)]):
... print(Eq(l, r))
...
Eq(y, 2)
Eq(x, y + 1)
"""
r = sympify(r)
E = []
for c1, (k1, v1) in enumerate(r):
for c2, (k2, v2) in enumerate(r):
if k1 in v2.free_symbols:
E.append((c1, c2))
return [r[i] for i in topological_sort((range(len(r)), E))]
def cse_separate(r, e):
"""Move expressions that are in the form (symbol, expr) out of the
expressions and sort them into the replacements using the reps_toposort.
Examples
========
>>> from sympy.simplify.cse_main import cse_separate
>>> from sympy.abc import x, y, z
>>> from sympy import cos, exp, cse, Eq, symbols
>>> x0, x1 = symbols('x:2')
>>> eq = (x + 1 + exp((x + 1)/(y + 1)) + cos(y + 1))
>>> cse([eq, Eq(x, z + 1), z - 2], postprocess=cse_separate) in [
... [[(x0, y + 1), (x, z + 1), (x1, x + 1)],
... [x1 + exp(x1/x0) + cos(x0), z - 2]],
... [[(x1, y + 1), (x, z + 1), (x0, x + 1)],
... [x0 + exp(x0/x1) + cos(x1), z - 2]]]
...
True
"""
d = sift(e, lambda w: w.is_Equality and w.lhs.is_Symbol)
r = r + [w.args for w in d[True]]
e = d[False]
return [reps_toposort(r), e]
# ====end of cse postprocess idioms===========================
def preprocess_for_cse(expr, optimizations):
""" Preprocess an expression to optimize for common subexpression
elimination.
Parameters
----------
expr : sympy expression
The target expression to optimize.
optimizations : list of (callable, callable) pairs
The (preprocessor, postprocessor) pairs.
Returns
-------
expr : sympy expression
The transformed expression.
"""
for pre, post in optimizations:
if pre is not None:
expr = pre(expr)
return expr
def postprocess_for_cse(expr, optimizations):
""" Postprocess an expression after common subexpression elimination to
return the expression to canonical sympy form.
Parameters
----------
expr : sympy expression
The target expression to transform.
optimizations : list of (callable, callable) pairs, optional
The (preprocessor, postprocessor) pairs. The postprocessors will be
applied in reversed order to undo the effects of the preprocessors
correctly.
Returns
-------
expr : sympy expression
The transformed expression.
"""
for pre, post in reversed(optimizations):
if post is not None:
expr = post(expr)
return expr
def opt_cse(exprs, order='canonical'):
"""Find optimization opportunities in Adds, Muls, Pows and negative
coefficient Muls
Parameters
----------
exprs : list of sympy expressions
The expressions to optimize.
order : string, 'none' or 'canonical'
The order by which Mul and Add arguments are processed. For large
expressions where speed is a concern, use the setting order='none'.
Returns
-------
opt_subs : dictionary of expression substitutions
The expression substitutions which can be useful to optimize CSE.
Examples
========
>>> from sympy.simplify.cse_main import opt_cse
>>> from sympy.abc import x
>>> opt_subs = opt_cse([x**-2])
>>> print(opt_subs)
{x**(-2): 1/(x**2)}
"""
opt_subs = dict()
adds = set()
muls = set()
seen_subexp = set()
def _find_opts(expr):
if expr.is_Atom or expr.is_Order:
return
if iterable(expr):
list(map(_find_opts, expr))
return
if expr in seen_subexp:
return expr
seen_subexp.add(expr)
list(map(_find_opts, expr.args))
if _coeff_isneg(expr):
neg_expr = -expr
if not neg_expr.is_Atom:
opt_subs[expr] = Mul(S.NegativeOne, neg_expr, evaluate=False)
seen_subexp.add(neg_expr)
expr = neg_expr
if expr.is_Mul:
muls.add(expr)
elif expr.is_Add:
adds.add(expr)
elif expr.is_Pow:
if _coeff_isneg(expr.exp):
opt_subs[expr] = Pow(Pow(expr.base, -expr.exp), S.NegativeOne,
evaluate=False)
for e in exprs:
if isinstance(e, Basic):
_find_opts(e)
## Process Adds and commutative Muls
def _match_common_args(Func, funcs):
if order != 'none':
funcs = list(ordered(funcs))
else:
funcs = sorted(funcs, key=lambda x: len(x.args))
func_args = [set(e.args) for e in funcs]
for i in range(len(func_args)):
for j in range(i + 1, len(func_args)):
com_args = func_args[i].intersection(func_args[j])
if len(com_args) > 1:
com_func = Func(*com_args)
# for all sets, replace the common symbols by the function
# over them, to allow recursive matches
diff_i = func_args[i].difference(com_args)
func_args[i] = diff_i | set([com_func])
if diff_i:
opt_subs[funcs[i]] = Func(Func(*diff_i), com_func,
evaluate=False)
diff_j = func_args[j].difference(com_args)
func_args[j] = diff_j | set([com_func])
opt_subs[funcs[j]] = Func(Func(*diff_j), com_func,
evaluate=False)
for k in range(j + 1, len(func_args)):
if not com_args.difference(func_args[k]):
diff_k = func_args[k].difference(com_args)
func_args[k] = diff_k | set([com_func])
opt_subs[funcs[k]] = Func(Func(*diff_k), com_func,
evaluate=False)
# split muls into commutative
comutative_muls = set()
for m in muls:
c, nc = m.args_cnc(cset=True)
if c:
c_mul = Mul(*c)
if nc:
opt_subs[m] = Mul(c_mul, Mul(*nc), evaluate=False)
if len(c) > 1:
comutative_muls.add(c_mul)
_match_common_args(Add, adds)
_match_common_args(Mul, comutative_muls)
return opt_subs
def tree_cse(exprs, symbols, opt_subs=None, order='canonical'):
"""Perform raw CSE on expression tree, taking opt_subs into account.
Parameters
==========
exprs : list of sympy expressions
The expressions to reduce.
symbols : infinite iterator yielding unique Symbols
The symbols used to label the common subexpressions which are pulled
out.
opt_subs : dictionary of expression substitutions
The expressions to be substituted before any CSE action is performed.
order : string, 'none' or 'canonical'
The order by which Mul and Add arguments are processed. For large
expressions where speed is a concern, use the setting order='none'.
"""
if opt_subs is None:
opt_subs = dict()
## Find repeated sub-expressions
to_eliminate = set()
seen_subexp = set()
def _find_repeated(expr):
if expr.is_Atom or expr.is_Order:
return
if iterable(expr):
args = expr
else:
if expr in seen_subexp:
to_eliminate.add(expr)
return
seen_subexp.add(expr)
if expr in opt_subs:
expr = opt_subs[expr]
args = expr.args
list(map(_find_repeated, args))
for e in exprs:
if isinstance(e, Basic):
_find_repeated(e)
## Rebuild tree
replacements = []
subs = dict()
def _rebuild(expr):
if not expr.args:
return expr
if iterable(expr):
new_args = [_rebuild(arg) for arg in expr]
return expr.func(*new_args)
if expr in subs:
return subs[expr]
orig_expr = expr
if expr in opt_subs:
expr = opt_subs[expr]
# If enabled, parse Muls and Adds arguments by order to ensure
# replacement order independent from hashes
if order != 'none':
if expr.is_Mul:
c, nc = expr.args_cnc()
args = list(ordered(c)) + nc
elif expr.is_Add:
args = list(ordered(expr.args))
else:
args = expr.args
else:
args = expr.args
new_args = list(map(_rebuild, args))
if new_args != args:
new_expr = expr.func(*new_args)
else:
new_expr = expr
if orig_expr in to_eliminate:
try:
sym = next(symbols)
except StopIteration:
raise ValueError("Symbols iterator ran out of symbols.")
subs[orig_expr] = sym
replacements.append((sym, new_expr))
return sym
else:
return new_expr
reduced_exprs = []
for e in exprs:
if isinstance(e, Basic):
reduced_e = _rebuild(e)
else:
reduced_e = e
reduced_exprs.append(reduced_e)
return replacements, reduced_exprs
def cse(exprs, symbols=None, optimizations=None, postprocess=None,
order='canonical'):
""" Perform common subexpression elimination on an expression.
Parameters
==========
exprs : list of sympy expressions, or a single sympy expression
The expressions to reduce.
symbols : infinite iterator yielding unique Symbols
The symbols used to label the common subexpressions which are pulled
out. The ``numbered_symbols`` generator is useful. The default is a
stream of symbols of the form "x0", "x1", etc. This must be an
infinite iterator.
optimizations : list of (callable, callable) pairs
The (preprocessor, postprocessor) pairs of external optimization
functions. Optionally 'basic' can be passed for a set of predefined
basic optimizations. Such 'basic' optimizations were used by default
in old implementation, however they can be really slow on larger
expressions. Now, no pre or post optimizations are made by default.
postprocess : a function which accepts the two return values of cse and
returns the desired form of output from cse, e.g. if you want the
replacements reversed the function might be the following lambda:
lambda r, e: return reversed(r), e
order : string, 'none' or 'canonical'
The order by which Mul and Add arguments are processed. If set to
'canonical', arguments will be canonically ordered. If set to 'none',
ordering will be faster but dependent on expressions hashes, thus
machine dependent and variable. For large expressions where speed is a
concern, use the setting order='none'.
Returns
=======
replacements : list of (Symbol, expression) pairs
All of the common subexpressions that were replaced. Subexpressions
earlier in this list might show up in subexpressions later in this
list.
reduced_exprs : list of sympy expressions
The reduced expressions with all of the replacements above.
Examples
========
>>> from sympy import cse, SparseMatrix
>>> from sympy.abc import x, y, z, w
>>> cse(((w + x + y + z)*(w + y + z))/(w + x)**3)
([(x0, y + z), (x1, w + x)], [(w + x0)*(x0 + x1)/x1**3])
Note that currently, y + z will not get substituted if -y - z is used.
>>> cse(((w + x + y + z)*(w - y - z))/(w + x)**3)
([(x0, w + x)], [(w - y - z)*(x0 + y + z)/x0**3])
List of expressions with recursive substitutions:
>>> m = SparseMatrix([x + y, x + y + z])
>>> cse([(x+y)**2, x + y + z, y + z, x + z + y, m])
([(x0, x + y), (x1, x0 + z)], [x0**2, x1, y + z, x1, Matrix([
[x0],
[x1]])])
Note: the type and mutability of input matrices is retained.
>>> isinstance(_[1][-1], SparseMatrix)
True
"""
from sympy.matrices import (MatrixBase, Matrix, ImmutableMatrix,
SparseMatrix, ImmutableSparseMatrix)
# Handle the case if just one expression was passed.
if isinstance(exprs, (Basic, MatrixBase)):
exprs = [exprs]
copy = exprs
temp = []
for e in exprs:
if isinstance(e, (Matrix, ImmutableMatrix)):
temp.append(Tuple(*e._mat))
elif isinstance(e, (SparseMatrix, ImmutableSparseMatrix)):
temp.append(Tuple(*e._smat.items()))
else:
temp.append(e)
exprs = temp
del temp
if optimizations is None:
optimizations = list()
elif optimizations == 'basic':
optimizations = basic_optimizations
# Preprocess the expressions to give us better optimization opportunities.
reduced_exprs = [preprocess_for_cse(e, optimizations) for e in exprs]
excluded_symbols = set().union(*[expr.atoms(Symbol)
for expr in reduced_exprs])
if symbols is None:
symbols = numbered_symbols()
else:
# In case we get passed an iterable with an __iter__ method instead of
# an actual iterator.
symbols = iter(symbols)
symbols = filter_symbols(symbols, excluded_symbols)
# Find other optimization opportunities.
opt_subs = opt_cse(reduced_exprs, order)
# Main CSE algorithm.
replacements, reduced_exprs = tree_cse(reduced_exprs, symbols, opt_subs,
order)
# Postprocess the expressions to return the expressions to canonical form.
exprs = copy
for i, (sym, subtree) in enumerate(replacements):
subtree = postprocess_for_cse(subtree, optimizations)
replacements[i] = (sym, subtree)
reduced_exprs = [postprocess_for_cse(e, optimizations)
for e in reduced_exprs]
# Get the matrices back
for i, e in enumerate(exprs):
if isinstance(e, (Matrix, ImmutableMatrix)):
reduced_exprs[i] = Matrix(e.rows, e.cols, reduced_exprs[i])
if isinstance(e, ImmutableMatrix):
reduced_exprs[i] = reduced_exprs[i].as_immutable()
elif isinstance(e, (SparseMatrix, ImmutableSparseMatrix)):
m = SparseMatrix(e.rows, e.cols, {})
for k, v in reduced_exprs[i]:
m[k] = v
if isinstance(e, ImmutableSparseMatrix):
m = m.as_immutable()
reduced_exprs[i] = m
if postprocess is None:
return replacements, reduced_exprs
return postprocess(replacements, reduced_exprs)
| |
#! /usr/bin/env python
from __future__ import division, absolute_import, print_function
# System imports
from distutils.util import get_platform
import os
import sys
import unittest
# Import NumPy
import numpy as np
major, minor = [ int(d) for d in np.__version__.split(".")[:2] ]
if major == 0:
BadListError = TypeError
else:
BadListError = ValueError
import Array
######################################################################
class Array1TestCase(unittest.TestCase):
def setUp(self):
self.length = 5
self.array1 = Array.Array1(self.length)
def testConstructor0(self):
"Test Array1 default constructor"
a = Array.Array1()
self.failUnless(isinstance(a, Array.Array1))
self.failUnless(len(a) == 0)
def testConstructor1(self):
"Test Array1 length constructor"
self.failUnless(isinstance(self.array1, Array.Array1))
def testConstructor2(self):
"Test Array1 array constructor"
na = np.arange(self.length)
aa = Array.Array1(na)
self.failUnless(isinstance(aa, Array.Array1))
def testConstructor3(self):
"Test Array1 copy constructor"
for i in range(self.array1.length()): self.array1[i] = i
arrayCopy = Array.Array1(self.array1)
self.failUnless(arrayCopy == self.array1)
def testConstructorBad(self):
"Test Array1 length constructor, negative"
self.assertRaises(ValueError, Array.Array1, -4)
def testLength(self):
"Test Array1 length method"
self.failUnless(self.array1.length() == self.length)
def testLen(self):
"Test Array1 __len__ method"
self.failUnless(len(self.array1) == self.length)
def testResize0(self):
"Test Array1 resize method, length"
newLen = 2 * self.length
self.array1.resize(newLen)
self.failUnless(len(self.array1) == newLen)
def testResize1(self):
"Test Array1 resize method, array"
a = np.zeros((2*self.length,), dtype='l')
self.array1.resize(a)
self.failUnless(len(self.array1) == a.size)
def testResizeBad(self):
"Test Array1 resize method, negative length"
self.assertRaises(ValueError, self.array1.resize, -5)
def testSetGet(self):
"Test Array1 __setitem__, __getitem__ methods"
n = self.length
for i in range(n):
self.array1[i] = i*i
for i in range(n):
self.failUnless(self.array1[i] == i*i)
def testSetBad1(self):
"Test Array1 __setitem__ method, negative index"
self.assertRaises(IndexError, self.array1.__setitem__, -1, 0)
def testSetBad2(self):
"Test Array1 __setitem__ method, out-of-range index"
self.assertRaises(IndexError, self.array1.__setitem__, self.length+1, 0)
def testGetBad1(self):
"Test Array1 __getitem__ method, negative index"
self.assertRaises(IndexError, self.array1.__getitem__, -1)
def testGetBad2(self):
"Test Array1 __getitem__ method, out-of-range index"
self.assertRaises(IndexError, self.array1.__getitem__, self.length+1)
def testAsString(self):
"Test Array1 asString method"
for i in range(self.array1.length()): self.array1[i] = i+1
self.failUnless(self.array1.asString() == "[ 1, 2, 3, 4, 5 ]")
def testStr(self):
"Test Array1 __str__ method"
for i in range(self.array1.length()): self.array1[i] = i-2
self.failUnless(str(self.array1) == "[ -2, -1, 0, 1, 2 ]")
def testView(self):
"Test Array1 view method"
for i in range(self.array1.length()): self.array1[i] = i+1
a = self.array1.view()
self.failUnless(isinstance(a, np.ndarray))
self.failUnless(len(a) == self.length)
self.failUnless((a == [1, 2, 3, 4, 5]).all())
######################################################################
class Array2TestCase(unittest.TestCase):
def setUp(self):
self.nrows = 5
self.ncols = 4
self.array2 = Array.Array2(self.nrows, self.ncols)
def testConstructor0(self):
"Test Array2 default constructor"
a = Array.Array2()
self.failUnless(isinstance(a, Array.Array2))
self.failUnless(len(a) == 0)
def testConstructor1(self):
"Test Array2 nrows, ncols constructor"
self.failUnless(isinstance(self.array2, Array.Array2))
def testConstructor2(self):
"Test Array2 array constructor"
na = np.zeros((3, 4), dtype="l")
aa = Array.Array2(na)
self.failUnless(isinstance(aa, Array.Array2))
def testConstructor3(self):
"Test Array2 copy constructor"
for i in range(self.nrows):
for j in range(self.ncols):
self.array2[i][j] = i * j
arrayCopy = Array.Array2(self.array2)
self.failUnless(arrayCopy == self.array2)
def testConstructorBad1(self):
"Test Array2 nrows, ncols constructor, negative nrows"
self.assertRaises(ValueError, Array.Array2, -4, 4)
def testConstructorBad2(self):
"Test Array2 nrows, ncols constructor, negative ncols"
self.assertRaises(ValueError, Array.Array2, 4, -4)
def testNrows(self):
"Test Array2 nrows method"
self.failUnless(self.array2.nrows() == self.nrows)
def testNcols(self):
"Test Array2 ncols method"
self.failUnless(self.array2.ncols() == self.ncols)
def testLen(self):
"Test Array2 __len__ method"
self.failUnless(len(self.array2) == self.nrows*self.ncols)
def testResize0(self):
"Test Array2 resize method, size"
newRows = 2 * self.nrows
newCols = 2 * self.ncols
self.array2.resize(newRows, newCols)
self.failUnless(len(self.array2) == newRows * newCols)
def testResize1(self):
"Test Array2 resize method, array"
a = np.zeros((2*self.nrows, 2*self.ncols), dtype='l')
self.array2.resize(a)
self.failUnless(len(self.array2) == a.size)
def testResizeBad1(self):
"Test Array2 resize method, negative nrows"
self.assertRaises(ValueError, self.array2.resize, -5, 5)
def testResizeBad2(self):
"Test Array2 resize method, negative ncols"
self.assertRaises(ValueError, self.array2.resize, 5, -5)
def testSetGet1(self):
"Test Array2 __setitem__, __getitem__ methods"
m = self.nrows
n = self.ncols
array1 = [ ]
a = np.arange(n, dtype="l")
for i in range(m):
array1.append(Array.Array1(i*a))
for i in range(m):
self.array2[i] = array1[i]
for i in range(m):
self.failUnless(self.array2[i] == array1[i])
def testSetGet2(self):
"Test Array2 chained __setitem__, __getitem__ methods"
m = self.nrows
n = self.ncols
for i in range(m):
for j in range(n):
self.array2[i][j] = i*j
for i in range(m):
for j in range(n):
self.failUnless(self.array2[i][j] == i*j)
def testSetBad1(self):
"Test Array2 __setitem__ method, negative index"
a = Array.Array1(self.ncols)
self.assertRaises(IndexError, self.array2.__setitem__, -1, a)
def testSetBad2(self):
"Test Array2 __setitem__ method, out-of-range index"
a = Array.Array1(self.ncols)
self.assertRaises(IndexError, self.array2.__setitem__, self.nrows+1, a)
def testGetBad1(self):
"Test Array2 __getitem__ method, negative index"
self.assertRaises(IndexError, self.array2.__getitem__, -1)
def testGetBad2(self):
"Test Array2 __getitem__ method, out-of-range index"
self.assertRaises(IndexError, self.array2.__getitem__, self.nrows+1)
def testAsString(self):
"Test Array2 asString method"
result = """\
[ [ 0, 1, 2, 3 ],
[ 1, 2, 3, 4 ],
[ 2, 3, 4, 5 ],
[ 3, 4, 5, 6 ],
[ 4, 5, 6, 7 ] ]
"""
for i in range(self.nrows):
for j in range(self.ncols):
self.array2[i][j] = i+j
self.failUnless(self.array2.asString() == result)
def testStr(self):
"Test Array2 __str__ method"
result = """\
[ [ 0, -1, -2, -3 ],
[ 1, 0, -1, -2 ],
[ 2, 1, 0, -1 ],
[ 3, 2, 1, 0 ],
[ 4, 3, 2, 1 ] ]
"""
for i in range(self.nrows):
for j in range(self.ncols):
self.array2[i][j] = i-j
self.failUnless(str(self.array2) == result)
def testView(self):
"Test Array2 view method"
a = self.array2.view()
self.failUnless(isinstance(a, np.ndarray))
self.failUnless(len(a) == self.nrows)
######################################################################
class ArrayZTestCase(unittest.TestCase):
def setUp(self):
self.length = 5
self.array3 = Array.ArrayZ(self.length)
def testConstructor0(self):
"Test ArrayZ default constructor"
a = Array.ArrayZ()
self.failUnless(isinstance(a, Array.ArrayZ))
self.failUnless(len(a) == 0)
def testConstructor1(self):
"Test ArrayZ length constructor"
self.failUnless(isinstance(self.array3, Array.ArrayZ))
def testConstructor2(self):
"Test ArrayZ array constructor"
na = np.arange(self.length, dtype=np.complex128)
aa = Array.ArrayZ(na)
self.failUnless(isinstance(aa, Array.ArrayZ))
def testConstructor3(self):
"Test ArrayZ copy constructor"
for i in range(self.array3.length()): self.array3[i] = complex(i,-i)
arrayCopy = Array.ArrayZ(self.array3)
self.failUnless(arrayCopy == self.array3)
def testConstructorBad(self):
"Test ArrayZ length constructor, negative"
self.assertRaises(ValueError, Array.ArrayZ, -4)
def testLength(self):
"Test ArrayZ length method"
self.failUnless(self.array3.length() == self.length)
def testLen(self):
"Test ArrayZ __len__ method"
self.failUnless(len(self.array3) == self.length)
def testResize0(self):
"Test ArrayZ resize method, length"
newLen = 2 * self.length
self.array3.resize(newLen)
self.failUnless(len(self.array3) == newLen)
def testResize1(self):
"Test ArrayZ resize method, array"
a = np.zeros((2*self.length,), dtype=np.complex128)
self.array3.resize(a)
self.failUnless(len(self.array3) == a.size)
def testResizeBad(self):
"Test ArrayZ resize method, negative length"
self.assertRaises(ValueError, self.array3.resize, -5)
def testSetGet(self):
"Test ArrayZ __setitem__, __getitem__ methods"
n = self.length
for i in range(n):
self.array3[i] = i*i
for i in range(n):
self.failUnless(self.array3[i] == i*i)
def testSetBad1(self):
"Test ArrayZ __setitem__ method, negative index"
self.assertRaises(IndexError, self.array3.__setitem__, -1, 0)
def testSetBad2(self):
"Test ArrayZ __setitem__ method, out-of-range index"
self.assertRaises(IndexError, self.array3.__setitem__, self.length+1, 0)
def testGetBad1(self):
"Test ArrayZ __getitem__ method, negative index"
self.assertRaises(IndexError, self.array3.__getitem__, -1)
def testGetBad2(self):
"Test ArrayZ __getitem__ method, out-of-range index"
self.assertRaises(IndexError, self.array3.__getitem__, self.length+1)
def testAsString(self):
"Test ArrayZ asString method"
for i in range(self.array3.length()): self.array3[i] = complex(i+1,-i-1)
self.failUnless(self.array3.asString() == "[ (1,-1), (2,-2), (3,-3), (4,-4), (5,-5) ]")
def testStr(self):
"Test ArrayZ __str__ method"
for i in range(self.array3.length()): self.array3[i] = complex(i-2,(i-2)*2)
self.failUnless(str(self.array3) == "[ (-2,-4), (-1,-2), (0,0), (1,2), (2,4) ]")
def testView(self):
"Test ArrayZ view method"
for i in range(self.array3.length()): self.array3[i] = complex(i+1,i+2)
a = self.array3.view()
self.failUnless(isinstance(a, np.ndarray))
self.failUnless(len(a) == self.length)
self.failUnless((a == [1+2j, 2+3j, 3+4j, 4+5j, 5+6j]).all())
######################################################################
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Array1TestCase))
suite.addTest(unittest.makeSuite(Array2TestCase))
suite.addTest(unittest.makeSuite(ArrayZTestCase))
# Execute the test suite
print("Testing Classes of Module Array")
print("NumPy version", np.__version__)
print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(len(result.errors) + len(result.failures))
| |
from image_registration.fft_tools import correlate2d,fast_ffts,dftups,upsample_image,zoom,shift
import image_registration # for doctests
import iterative_zoom
import warnings
import numpy as np
__all__ = ['chi2_shift','chi2_shift_iterzoom','chi2n_map']
def chi2_shift(im1, im2, err=None, upsample_factor='auto', boundary='wrap',
nthreads=1, use_numpy_fft=False, zeromean=False, nfitted=2,
verbose=False, return_error=True, return_chi2array=False,
max_auto_size=512, max_nsig=1.1):
"""
Find the offsets between image 1 and image 2 using the DFT upsampling method
(http://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation/content/html/efficient_subpixel_registration.html)
combined with :math:`\chi^2` to measure the errors on the fit
Equation 1 gives the :math:`\chi^2` value as a function of shift, where Y
is the model as a function of shift:
.. math::
\chi^2(dx,dy) & = & \Sigma_{ij} \\frac{(X_{ij}-Y_{ij}(dx,dy))^2}{\sigma_{ij}^2} \\\\
..
& = & \Sigma_{ij} \left[ X_{ij}^2/\sigma_{ij}^2 - 2X_{ij}Y_{ij}(dx,dy)/\sigma_{ij}^2 + Y_{ij}(dx,dy)^2/\sigma_{ij}^2 \\right] \\\\
Equation 2-4:
.. math::
Term~1: f(dx,dy) & = & \Sigma_{ij} \\frac{X_{ij}^2}{\sigma_{ij}^2} \\\\
f(dx,dy) & = & f(0,0) , \\forall dx,dy \\\\
Term~2: g(dx,dy) & = & -2 \Sigma_{ij} \\frac{X_{ij}Y_{ij}(dx,dy)}{\sigma_{ij}^2} = -2 \Sigma_{ij} \left(\\frac{X_{ij}}{\sigma_{ij}^2}\\right) Y_{ij}(dx,dy) \\\\
Term~3: h(dx,dy) & = & \Sigma_{ij} \\frac{Y_{ij}(dx,dy)^2}{\sigma_{ij}^2} = \Sigma_{ij} \left(\\frac{1}{\sigma_{ij}^2}\\right) Y^2_{ij}(dx,dy)
The cross-correlation can be computed with fourier transforms, and is defined
.. math::
CC_{m,n}(x,y) = \Sigma_{ij} x^*_{ij} y_{(n+i)(m+j)}
which can then be applied to our problem, noting that the cross-correlation
has the same form as term 2 and 3 in :math:`\chi^2` (term 1 is a constant,
with no dependence on the shift)
.. math::
Term~2: & CC(X/\sigma^2,Y)[dx,dy] & = & \Sigma_{ij} \left(\\frac{X_{ij}}{\sigma_{ij}^2}\\right)^* Y_{ij}(dx,dy) \\\\
Term~3: & CC(\sigma^{-2},Y^2)[dx,dy] & = & \Sigma_{ij} \left(\\frac{1}{\sigma_{ij}^2}\\right)^* Y^2_{ij}(dx,dy) \\\\
Technically, only terms 2 and 3 has any effect on the resulting image,
since term 1 is the same for all shifts, and the quantity of interest is
:math:`\Delta \chi^2` when determining the best-fit shift and error.
Parameters
----------
im1 : np.ndarray
im2 : np.ndarray
The images to register.
err : np.ndarray
Per-pixel error in image 2
boundary : 'wrap','constant','reflect','nearest'
Option to pass to map_coordinates for determining what to do with
shifts outside of the boundaries.
upsample_factor : int or 'auto'
upsampling factor; governs accuracy of fit (1/usfac is best accuracy)
(can be "automatically" determined based on chi^2 error)
return_error : bool
Returns the "fit error" (1-sigma in x and y) based on the delta-chi2
values
return_chi2_array : bool
Returns the x and y shifts and the chi2 as a function of those shifts
in addition to other returned parameters. i.e., the last return from
this function will be a tuple (x, y, chi2)
zeromean : bool
Subtract the mean from the images before cross-correlating? If no, you
may get a 0,0 offset because the DC levels are strongly correlated.
verbose : bool
Print error message if upsampling factor is inadequate to measure errors
use_numpy_fft : bool
Force use numpy's fft over fftw? (only matters if you have fftw
installed)
nthreads : bool
Number of threads to use for fft (only matters if you have fftw
installed)
nfitted : int
number of degrees of freedom in the fit (used for chi^2 computations).
Should probably always be 2.
max_auto_size : int
Maximum zoom image size to create when using auto-upsampling
Returns
-------
dx,dy : float,float
Measures the amount im2 is offset from im1 (i.e., shift im2 by -1 *
these #'s to match im1)
errx,erry : float,float
optional, error in x and y directions
xvals,yvals,chi2n_upsampled : ndarray,ndarray,ndarray,
x,y positions (in original chi^2 coordinates) of the chi^2 values and
their corresponding chi^2 value
Examples
--------
Create a 2d array,
shift it in both directions,
then use chi2_shift to determine the shift
>>> rr = ((np.indices([100,100]) - np.array([50.,50.])[:,None,None])**2).sum(axis=0)**0.5
>>> image = np.exp(-rr**2/(3.**2*2.)) * 20
>>> shifted = np.roll(np.roll(image,12,0),5,1) + np.random.randn(100,100)
>>> dx,dy,edx,edy = chi2_shift(image, shifted, upsample_factor='auto')
>>> shifted2 = image_registration.fft_tools.shift2d(image,3.665,-4.25) + np.random.randn(100,100)
>>> dx2,dy2,edx2,edy2 = chi2_shift(image, shifted2, upsample_factor='auto')
"""
chi2,term1,term2,term3 = chi2n_map(im1, im2, err, boundary=boundary,
nthreads=nthreads, zeromean=zeromean, use_numpy_fft=use_numpy_fft,
return_all=True, reduced=False)
ymax, xmax = np.unravel_index(chi2.argmin(), chi2.shape)
# needed for ffts
im1 = np.nan_to_num(im1)
im2 = np.nan_to_num(im2)
ylen,xlen = im1.shape
xcen = xlen/2-(1-xlen%2)
ycen = ylen/2-(1-ylen%2)
# original shift calculation
yshift = ymax-ycen # shift im2 by these numbers to get im1
xshift = xmax-xcen
if verbose:
print "Coarse xmax/ymax = %i,%i, for offset %f,%f" % (xmax,ymax,xshift,yshift)
# below is sub-pixel zoom-in stuff
# find delta-chi^2 limiting values for varying DOFs
try:
import scipy.stats
# 1,2,3-sigma delta-chi2 levels
m1 = scipy.stats.chi2.ppf( 1-scipy.stats.norm.sf(1)*2, nfitted )
m2 = scipy.stats.chi2.ppf( 1-scipy.stats.norm.sf(2)*2, nfitted )
m3 = scipy.stats.chi2.ppf( 1-scipy.stats.norm.sf(3)*2, nfitted )
m_auto = scipy.stats.chi2.ppf( 1-scipy.stats.norm.sf(max_nsig)*2, nfitted )
except ImportError:
# assume m=2 (2 degrees of freedom)
m1 = 2.2957489288986364
m2 = 6.1800743062441734
m3 = 11.829158081900793
m_auto = 2.6088233328527037 # slightly >1 sigma
# biggest scale = where chi^2/n ~ 9 or 11.8 for M=2?
if upsample_factor=='auto':
# deltachi2 is not reduced deltachi2
deltachi2_lowres = (chi2 - chi2.min())
if verbose:
print "Minimum chi2: %g Max delta-chi2 (lowres): %g Min delta-chi2 (lowres): %g" % (chi2.min(),deltachi2_lowres.max(),deltachi2_lowres[deltachi2_lowres>0].min())
sigmamax_area = deltachi2_lowres<m_auto
if sigmamax_area.sum() > 1:
yy,xx = np.indices(sigmamax_area.shape)
xvals = xx[sigmamax_area]
yvals = yy[sigmamax_area]
xvrange = xvals.max()-xvals.min()
yvrange = yvals.max()-yvals.min()
size = max(xvrange,yvrange)
else:
size = 1
upsample_factor = max_auto_size/2. / size
if upsample_factor < 1:
upsample_factor = 1
s1 = s2 = max_auto_size
# zoom factor = s1 / upsample_factor = 2*size
zoom_factor = 2.*size
if verbose:
print "Selected upsample factor %0.1f for image size %i and zoom factor %0.1f (max-sigma range was %i for area %i)" % (upsample_factor, s1, zoom_factor, size, sigmamax_area.sum())
else:
s1,s2 = im1.shape
zoom_factor = s1/upsample_factor
if zoom_factor <= 1:
zoom_factor = 2
s1 = zoom_factor*upsample_factor
s2 = zoom_factor*upsample_factor
(yshifts_corrections,xshifts_corrections),chi2_ups = zoom.zoomnd(chi2,
usfac=upsample_factor, outshape=[s1,s2], offsets=[yshift,xshift],
return_xouts=True)
# deltachi2 is not reduced deltachi2
deltachi2_ups = (chi2_ups - chi2_ups.min())
if verbose:
print "Minimum chi2_ups: %g Max delta-chi2 (highres): %g Min delta-chi2 (highres): %g" % (chi2_ups.min(),deltachi2_ups.max(),deltachi2_ups[deltachi2_ups>0].min())
if verbose > 1:
pass
#if hasattr(term3_ups,'len'):
# print "term3_ups has shape ",term3_ups.shape," term2: ",term2_ups.shape," term1=",term1
#else:
# print "term2 shape: ",term2.shape," term1: ",term1," term3: ",term3_ups
# THE UPSAMPLED BEST-FIT HAS BEEN FOUND
# BELOW IS TO COMPUTE THE ERROR
errx_low,errx_high,erry_low,erry_high = chi2map_to_errors(chi2_ups, upsample_factor)
yshift_corr = yshifts_corrections.flat[chi2_ups.argmin()]-ycen
xshift_corr = xshifts_corrections.flat[chi2_ups.argmin()]-xcen
shift_xvals = xshifts_corrections-xcen
shift_yvals = yshifts_corrections-ycen
returns = [-xshift_corr,-yshift_corr]
if return_error:
returns.append( (errx_low+errx_high)/2. )
returns.append( (erry_low+erry_high)/2. )
if return_chi2array:
returns.append((shift_xvals,shift_yvals,chi2_ups))
return returns
def chi2map_to_errors(chi2map, zoomfactor=1., nsigma=1, nfitted=2):
"""
Derive errors from a chi^2 map
Parameters
----------
chi2map : np.ndarray
A chi^2 map *with a minimum in bounds* and with delta-chi^2 <
chi2stat(nsigma) in bounds
zoomfactor : float
The amount the chi2 map has been zoomed (i.e., the pixel scale, in
units of small pixels per original pixel)
nsigma : float
How many sigma do you want the error bars to be? Uses scipy.stats to
invert the chi^2 distribution, or an extrapolated version thereof if
nsigma>8 (leads to errors in the ppf because of 1-0 floating point
inaccuracy above that level)
nfitted : int
Number of fitted parameters. In this case, always 2, but you could
change your chi^2 statistic based on this
Returns
-------
(-ex,+ex,-ey,+ey) where ex/ey are the x and y errors.
"""
# find delta-chi^2 limiting values for varying DOFs
try:
import scipy.stats
def sigma_to_chi2(x):
if x < 8:
return scipy.stats.chi2.ppf( 1-scipy.stats.norm.sf(x)*2, nfitted )
else: # flop accuracy fails, assume 2 dof
return 1.59358435 * x**1.80468278
except ImportError:
# assume m=2 (2 degrees of freedom)
sigma_to_chi2 = lambda x: 1.59358435 * x**1.80468278
yy,xx = (np.indices(chi2map.shape) - np.array(chi2map.shape)[:,np.newaxis,np.newaxis]/2.) / zoomfactor
xcen = xx.flat[chi2map.argmin()]
ycen = yy.flat[chi2map.argmin()]
deltachi2 = chi2map - chi2map.min()
sigma1_area = deltachi2 < sigma_to_chi2(nsigma)
x_sigma1 = xx[sigma1_area]
y_sigma1 = yy[sigma1_area]
errx_low = xcen - x_sigma1.min()
errx_high = x_sigma1.max() - xcen
erry_low = ycen - y_sigma1.min()
erry_high = y_sigma1.max() - ycen
return errx_low,errx_high,erry_low,erry_high
def chi2_shift_iterzoom(im1, im2, err=None, upsample_factor='auto',
boundary='wrap', nthreads=1, use_numpy_fft=False, zeromean=False,
verbose=False, return_error=True, return_chi2array=False,
zoom_shape=[10,10], rezoom_shape=[100,100], rezoom_factor=5,
mindiff=1, **kwargs):
"""
Find the offsets between image 1 and image 2 using an iterative DFT
upsampling method combined with :math:`\chi^2` to measure the errors on the
fit
A simpler version of :func:`chi2_shift` that only computes the
:math:`\chi^2` array on the largest scales, then uses a fourier upsampling
technique to zoom in.
Parameters
----------
im1 : np.ndarray
im2 : np.ndarray
The images to register.
err : np.ndarray
Per-pixel error in image 2
boundary : 'wrap','constant','reflect','nearest'
Option to pass to map_coordinates for determining what to do with
shifts outside of the boundaries.
upsample_factor : int or 'auto'
upsampling factor; governs accuracy of fit (1/usfac is best accuracy)
(can be "automatically" determined based on chi^2 error)
zeromean : bool
Subtract the mean from the images before cross-correlating? If no, you
may get a 0,0 offset because the DC levels are strongly correlated.
verbose : bool
Print error message if upsampling factor is inadequate to measure errors
use_numpy_fft : bool
Force use numpy's fft over fftw? (only matters if you have fftw
installed)
nthreads : bool
Number of threads to use for fft (only matters if you have fftw
installed)
nfitted : int
number of degrees of freedom in the fit (used for chi^2 computations).
Should probably always be 2.
zoom_shape : [int,int]
Shape of iterative zoom image
rezoom_shape : [int,int]
Shape of the final output chi^2 map to use for determining the errors
rezoom_factor : int
Amount to zoom above the last zoom factor. Should be <=
rezoom_shape/zoom_shape
Other Parameters
----------------
return_error : bool
Returns the "fit error" (1-sigma in x and y) based on the delta-chi2
values
return_chi2_array : bool
Returns the x and y shifts and the chi2 as a function of those shifts
in addition to other returned parameters. i.e., the last return from
this function will be a tuple (x, y, chi2)
Returns
-------
dx,dy : float,float
Measures the amount im2 is offset from im1 (i.e., shift im2 by -1 *
these #'s to match im1)
errx,erry : float,float
optional, error in x and y directions
xvals,yvals,chi2n_upsampled : ndarray,ndarray,ndarray,
x,y positions (in original chi^2 coordinates) of the chi^2 values and
their corresponding chi^2 value
Examples
--------
Create a 2d array,
shift it in both directions,
then use chi2_shift_iterzoom to determine the shift
>>> np.random.seed(42) # so the doctest will pass
>>> image = np.random.randn(50,55)
>>> shifted = np.roll(np.roll(image,12,0),5,1)
>>> dx,dy,edx,edy = chi2_shift_iterzoom(image, shifted, upsample_factor='auto')
>>> shifted2 = image_registration.fft_tools.shift2d(image,3.665,-4.25)
>>> dx2,dy2,edx2,edy2 = chi2_shift_iterzoom(image, shifted2, upsample_factor='auto')
"""
chi2,term1,term2,term3 = chi2n_map(im1, im2, err, boundary=boundary,
nthreads=nthreads, zeromean=zeromean, use_numpy_fft=use_numpy_fft,
return_all=True, reduced=False)
# at this point, the chi2 map contains ALL of the information!
# below is sub-pixel zoom-in stuff
chi2zoom, zf, offsets = iterative_zoom.iterative_zoom(chi2,
mindiff=mindiff, zoomshape=zoom_shape, return_zoomed=True,
verbose=verbose, return_center=False, **kwargs)
if np.all(chi2zoom==0):
# if you've over-zoomed & broken things, you can zoom in by the same
# factor but with a bigger field of view
(yy,xx),chi2_rezoom = zoom.zoomnd(chi2, usfac=zf, offsets=offsets,
outshape=rezoom_shape, middle_convention=np.floor,
return_xouts=True, **kwargs)
else:
(yy,xx),chi2_rezoom = zoom.zoomnd(chi2, usfac=zf*rezoom_factor,
offsets=offsets, outshape=rezoom_shape,
middle_convention=np.floor, return_xouts=True,
**kwargs)
# x and y are swapped and negative
returns = [-off for off in offsets[::-1]]
if return_error:
errx_low,errx_high,erry_low,erry_high = chi2map_to_errors(chi2_rezoom, zf*rezoom_factor)
returns.append( (errx_low+errx_high)/2. )
returns.append( (erry_low+erry_high)/2. )
if return_chi2array:
yy = (chi2.shape[0]-1)/2 - yy
xx = (chi2.shape[1]-1)/2 - xx
returns.append((xx,yy,chi2_rezoom))
return returns
def chi2n_map(im1, im2, err=None, boundary='wrap', nthreads=1,
zeromean=False, use_numpy_fft=False, return_all=False, reduced=False):
"""
Parameters
----------
im1 : np.ndarray
im2 : np.ndarray
The images to register.
err : np.ndarray
Per-pixel error in image 2
boundary : 'wrap','constant','reflect','nearest'
Option to pass to map_coordinates for determining what to do with
shifts outside of the boundaries.
zeromean : bool
Subtract the mean from the images before cross-correlating? If no, you
may get a 0,0 offset because the DC levels are strongly correlated.
nthreads : bool
Number of threads to use for fft (only matters if you have fftw
installed)
reduced : bool
Return the reduced :math:`\chi^2` array, or unreduced?
(assumes 2 degrees of freedom for the fit)
Returns
-------
chi2n : np.ndarray
the :math:`\chi^2` array
term1 : float
Scalar, term 1 in the :math:`\chi^2` equation
term2 : np.ndarray
Term 2 in the equation, -2 * cross-correlation(x/sigma^2,y)
term3 : np.ndarray | float
If error is an array, returns an array, otherwise is a scalar float
corresponding to term 3 in the equation
"""
if not im1.shape == im2.shape:
raise ValueError("Images must have same shape.")
if zeromean:
im1 = im1 - (im1[im1==im1].mean())
im2 = im2 - (im2[im2==im2].mean())
im1 = np.nan_to_num(im1)
im2 = np.nan_to_num(im2)
if err is not None and not np.isscalar(err):
err = np.nan_to_num(err)
# to avoid divide-by-zero errors
# err is always squared, so negative errors are "sort of ok"
im2[err==0] = 0
im1[err==0] = 0
err[err==0] = 1
# we want im1 first, because it's first down below
term3 = correlate2d(im1**2, 1./err**2, boundary=boundary,
nthreads=nthreads, use_numpy_fft=use_numpy_fft)
else: # scalar error is OK
if err is None:
err = 1.
term3 = ((im1**2/err**2)).sum()
# term 1 and 2 don't rely on err being an array
term1 = ((im2**2/err**2)).sum()
# ORDER MATTERS! cross-correlate im1,im2 not im2,im1
term2 = -2 * correlate2d(im1,im2/err**2, boundary=boundary,
nthreads=nthreads, use_numpy_fft=use_numpy_fft)
chi2 = term1 + term2 + term3
if reduced:
# 2 degrees of freedom
chi2 /= im2.size-2.
if return_all:
return chi2,term1,term2,term3
else:
return chi2
def chi2_shift_leastsq(im1, im2, err=None, mode='wrap', maxoff=None,
return_error=True, guessx=0, guessy=0, use_fft=False,
ignore_outside=True, verbose=False, **kwargs):
"""
Determine the best fit offset using `scipy.ndimage.map_coordinates` to
shift the offset image.
*OBSOLETE* It kind of works, but is sensitive to input guess and doesn't reliably
output errors
Parameters
----------
im1 : np.ndarray
First image
im2 : np.ndarray
Second image (offset image)
err : np.ndarray OR float
Per-pixel error in image 2
mode : 'wrap','constant','reflect','nearest'
Option to pass to map_coordinates for determining what to do with
shifts outside of the boundaries.
maxoff : None or int
If set, crop the data after shifting before determining chi2
(this is a good thing to use; not using it can result in weirdness
involving the boundaries)
"""
#xc = correlate2d(im1,im2, boundary=boundary)
#ac1peak = (im1**2).sum()
#ac2peak = (im2**2).sum()
#chi2 = ac1peak - 2*xc + ac2peak
if not im1.shape == im2.shape:
raise ValueError("Images must have same shape.")
if np.any(np.isnan(im1)):
im1 = im1.copy()
im1[im1!=im1] = 0
if np.any(np.isnan(im2)):
im2 = im2.copy()
if hasattr(err,'shape'):
err[im2!=im2] = np.inf
im2[im2!=im2] = 0
im1 = im1-im1.mean()
im2 = im2-im2.mean()
if not use_fft:
yy,xx = np.indices(im1.shape)
ylen,xlen = im1.shape
xcen = xlen/2-(1-xlen%2)
ycen = ylen/2-(1-ylen%2)
# possible requirements for only this function
import lmfit
if not use_fft:
import scipy.ndimage
def residuals(p, im1, im2):
xsh, ysh = p['xsh'].value,p['ysh'].value
if use_fft:
shifted_img = shift.shiftnd(im2, (-ysh, -xsh))
else: # identical to skimage
shifted_img = scipy.ndimage.map_coordinates(im2, [yy+ysh,xx+xsh],
mode=mode)
if maxoff is not None:
xslice = slice(xcen-maxoff,xcen+maxoff,None)
yslice = slice(ycen-maxoff,ycen+maxoff,None)
# divide by sqrt(number of samples) = sqrt(maxoff**2)
residuals = np.abs(np.ravel((im1[yslice,xslice]-shifted_img[yslice,xslice])) / maxoff)
else:
if ignore_outside:
outsidex = min([(xlen-2*xsh)/2,xcen])
outsidey = min([(ylen-2*ysh)/2,xcen])
xslice = slice(xcen-outsidex,xcen+outsidex,None)
yslice = slice(ycen-outsidey,ycen+outsidey,None)
residuals = ( np.abs( np.ravel(
(im1[yslice,xslice]-shifted_img[yslice,xslice]))) /
(2*outsidex*2*outsidey)**0.5 )
else:
xslice = slice(None)
yslice = slice(None)
residuals = np.abs(np.ravel((im1-shifted_img))) / im1.size**0.5
if err is None:
return residuals
elif hasattr(err,'shape'):
if use_fft:
shifted_err = shift.shiftnd(err, (-ysh, -xsh))
else:
shifted_err = scipy.ndimage.map_coordinates(err, [yy+ysh,xx+xsh], mode=mode)
return residuals / shifted_err[yslice,xslice].flat
else:
return residuals / err
fit_params = lmfit.Parameters()
fit_params['xsh'] = lmfit.Parameter(value=guessx, max=maxoff)
fit_params['ysh'] = lmfit.Parameter(value=guessy, max=maxoff)
if maxoff is not None:
fit_params['xsh'].min = -maxoff
fit_params['ysh'].min = -maxoff
iter_cb = per_iteration if verbose else None
lmfitter = lmfit.minimize(residuals, fit_params, args=(im1,im2), iter_cb=iter_cb, **kwargs)
px,py = lmfitter.params.values()
fxsh,fysh = px.value,py.value
efxsh,efysh = px.stderr,py.stderr
if return_error:
return fxsh,fysh,efxsh,efysh
else:
return fxsh,fysh
# ignore
if return_error:
if cov is None:
return bestfit[0],bestfit[1],0,0
else: # based on scipy.optimize.curve_fit, the "correct" covariance is this cov * chi^2/n
return bestfit[0],bestfit[1],(cov[0,0]*chi2n)**0.5,(cov[1,1]*chi2n)**0.5
else:
return bestfit[0],bestfit[1]
def per_iteration(pars, i, resid, *args, **kws):
if i < 100 or i % 10 == 0:
print '====== Iteration %03i: ' % (i),
for p in pars.values():
print p.name , p.value,
print " chi^2: ",(resid**2).sum()
| |
# Copyright 2015 by Fred Moolekamp
# License: BSD 3-clause
from __future__ import print_function, division
from collections import OrderedDict
from toyz.utils.errors import ToyzJobError
import math
import os
import numpy as np
from toyz.utils import core
from toyz.web import session_vars
# Set the default values for the sessions global variables if they have not already been set
viewer_variables = {
'filepath': None,
'img_file': None,
}
for v in viewer_variables:
if not hasattr(session_vars, v):
setattr(session_vars, v, viewer_variables[v])
# It may be desirabe in the future to allow users to choose what type of image they
# want to send to the client. For now the default is sent to jpg, since it is the
# smallest image type.
img_formats = {
'png': 'PNG',
'bmp': 'BMP',
'eps': 'EPS',
'gif': 'GIF',
'im': 'IM',
'jpg': 'JPEG',
'j2k': 'JPEG 2000',
'msp': 'MSP',
'pcx': 'PCX',
'pbm': 'PBM',
'pgm': 'PGM',
'ppm': 'PPM',
'spi': 'SPIDER',
'tiff': 'TIFF',
'webp': 'WEBP',
'xbm': 'XBM'
}
def import_fits():
try:
import astropy.io.fits as pyfits
except ImportError:
try:
import pyfits
except ImportError:
raise ToyzJobError(
"You must have astropy or pyfits installed to view FITS images")
return pyfits
def get_file(file_info):
"""
If the image has already been loaded into memory, access it here.
Otherwise, store the image for later use
"""
if session_vars.filepath == file_info['filepath']:
img_file = session_vars.img_file
else:
print('loading', file_info['filepath'])
if file_info['ext']=='fits':
print('Detected fits image type')
pyfits = import_fits()
img_file = pyfits.open(file_info['filepath'])
else:
try:
from PIL import Image
except ImportError:
raise ToyzJobError(
"You must have PIL (Python Imaging Library) installed to "
"open files of this type"
)
img_file = Image.open(file_info['filepath'])
session_vars.filepath = file_info['filepath']
session_vars.img_file = img_file
return img_file
def get_file_info(file_info):
file_split = file_info['filepath'].split('.')
file_info['filename'] = os.path.basename(file_split[0])
if 'fits' in file_split[1:]:
file_info['ext'] = 'fits'
else:
file_info['ext'] = file_split[-1]
if 'tile_width' not in file_info:
file_info['tile_width'] = 400
if 'tile_height' not in file_info:
file_info['tile_height'] = 200
if 'img_type' not in file_info:
file_info['img_type'] == 'image'
if file_info['ext']=='fits':
file_info['file_type'] = 'img_array'
hdulist = get_file(file_info)
file_info['hdulist'] = [hdu.__class__.__name__ for hdu in hdulist]
if 'images' not in file_info:
if len(hdulist)>1:
file_info['images'] = OrderedDict(
[[str(n), {'frame': str(n)}] for n, hdu in enumerate(hdulist)
if 'imagehdu' in hdu.__class__.__name__.lower()])
else:
file_info['images'] = {
'0': {'frame': '0'}
}
if len(file_info['images']) == 0:
raise ToyzJobError("FITS file does not contain any recognized image hdu's")
else:
file_info['file_type'] = 'img'
file_info['images'] = {'0':{'frame': '0'}}
file_defaults = {
'frame': next(iter(file_info['images'])),
'resampling': 'NEAREST',
'invert_x': False,
'invert_y': False,
'tile_format': 'png',
'colormap': {
'name': 'Spectral',
'color_scale': 'linear',
'invert_color': False,
'set_bounds': False
}
}
for default in file_defaults:
if default not in file_info:
file_info[default] = file_defaults[default]
return file_info
def get_window(viewer):
viewer['left']=int(viewer['x_center']-viewer['width']/2)
viewer['bottom']=int(viewer['y_center']+viewer['height']/2)
viewer['right']=int(viewer['left']+viewer['width'])
viewer['top']=int(viewer['bottom']-viewer['height'])
return viewer
def get_best_fit(data_width, data_height, img_viewer):
# Make the image slightly smaller than the viewer size, to account for
# slight differences in the image height/width from the desired
# tiles sizes
x_scale = img_viewer['width']/data_width*.97
y_scale = img_viewer['height']/data_height*.97
scale = min(y_scale, x_scale)
img_viewer['x_center'] = int(math.floor(data_width/2*scale))
img_viewer['y_center'] = int(math.floor(data_height/2*scale))
img_viewer['scale'] = scale
img_viewer = get_window(img_viewer)
return img_viewer
def get_img_info(file_info, img_info):
if file_info['ext']=='fits':
hdulist = get_file(file_info)
data = hdulist[int(img_info['frame'])].data
height, width = data.shape
if('colormap' not in img_info):
if(file_info['colormap']['set_bounds']):
px_min = file_info['px_min']
px_max = file_info['px_max']
else:
px_min = float(data.min())
px_max = float(data.max())
img_info['colormap'] = file_info['colormap']
if not file_info['colormap']['set_bounds']:
img_info['colormap']['px_min'] = float(data.min())
img_info['colormap']['px_max'] = float(data.max())
else:
# For non-FITS formats, only a single large image is loaded, which
try:
from PIL import Image
except ImportError:
raise ToyzJobError(
"You must have PIL (Python Imaging Library) installed to "
"open files of this type"
)
img = get_file(file_info)
img_info['colormap'] = {
'name': 'none',
'px_min': 0,
'px_max': 255,
'invert_color': False
}
width, height = img.size
img_info['width'] = width
img_info['height'] = height
if 'scale' not in img_info:
if 'viewer' not in img_info or 'scale' not in img_info['viewer']:
raise ToyzJobError("You must either supply a scale or image viewer parameters")
if img_info['viewer']['scale']<0:
img_info['viewer'] = get_best_fit(width, height, img_info['viewer'])
else:
img_info['viewer'] = get_window(img_info['viewer'])
else:
img_info['viewer'] = get_window(img_info['viewer'])
img_info['scale'] = img_info['viewer']['scale']
img_info['scaled_width'] = int(math.ceil(width*img_info['scale']))
img_info['scaled_height'] = int(math.ceil(height*img_info['scale']))
img_info['columns'] = int(math.ceil(img_info['scaled_width']/file_info['tile_width']))
img_info['rows'] = int(math.ceil(img_info['scaled_height']/file_info['tile_height']))
img_defaults = {
'invert_x': False,
'invert_y': False,
'tiles': {}
}
for default in img_defaults:
if default not in img_info:
if default in file_info:
img_info[default] = file_info[default]
else:
img_info[default] = img_defaults[default]
#print('img_info:', img_info)
return img_info
def get_tile_filename(file_info, img_info, x0_idx, xf_idx, y0_idx, yf_idx):
filename_params = [file_info['filename'], file_info['frame'],
x0_idx, xf_idx, y0_idx, yf_idx,
"{0:.3f}".format(img_info['scale']), img_info['colormap']['name'],
"{0:.2f}".format(img_info['colormap']['px_min']),
"{0:.2f}".format(img_info['colormap']['px_max']),
str(img_info['colormap']['invert_color'])]
new_filename = '_'.join([str(f) for f in filename_params])
new_filepath = os.path.join(img_info['save_path'], new_filename+'.'+file_info['tile_format'])
return new_filepath
def get_tile_info(file_info, img_info):
"""
Get info for all tiles available in the viewer. If the tile has not been loaded yet,
it is added to the new_tiles array.
"""
all_tiles = []
new_tiles = {}
if img_info['invert_x']:
xmin = img_info['width']*img_info['scale'] - img_info['viewer']['right']
xmax = img_info['width']*img_info['scale'] - img_info['viewer']['left']
else:
xmin = img_info['viewer']['left']
xmax = img_info['viewer']['right']
if img_info['invert_y']:
ymin = img_info['height']*img_info['scale'] - img_info['viewer']['bottom']
ymax = img_info['height']*img_info['scale'] - img_info['viewer']['top']
else:
ymin = img_info['viewer']['top']
ymax = img_info['viewer']['bottom']
minCol = int(max(1,math.floor(xmin/file_info['tile_width'])))-1
maxCol=int(min(img_info['columns'],math.ceil(xmax/file_info['tile_width'])))
minRow = int(max(1,math.floor(ymin/file_info['tile_height'])))-1
maxRow = int(min(img_info['rows'],math.ceil(ymax/file_info['tile_height'])))
block_width = int(math.ceil(file_info['tile_width']/img_info['scale']))
block_height = int(math.ceil(file_info['tile_height']/img_info['scale']))
for row in range(minRow,maxRow):
y0 = row*file_info['tile_height']
yf = (row+1)*file_info['tile_height']
y0_idx = int(y0/img_info['scale'])
yf_idx = min(y0_idx + block_height, img_info['height'])
for col in range(minCol,maxCol):
all_tiles.append(str(col)+','+str(row))
tile_idx = str(col)+','+str(row)
if (tile_idx not in img_info['tiles'] or
'loaded' not in img_info['tiles'][tile_idx] or
not img_info['tiles'][tile_idx]['loaded']):
x0 = col*file_info['tile_width']
xf = (col+1)*file_info['tile_width']
x0_idx = int(x0/img_info['scale'])
xf_idx = min(x0_idx+block_width, img_info['width'])
tile_width = int((xf_idx-x0_idx)*img_info['scale'])
tile_height = int((yf_idx-y0_idx)*img_info['scale'])
new_filepath = get_tile_filename(
file_info, img_info, x0_idx, xf_idx, y0_idx, yf_idx)
tile = {
'idx': tile_idx,
'left': x0,
'right': xf,
'top': y0,
'bottom': yf,
'y0_idx': y0_idx,
'yf_idx': yf_idx,
'x0_idx': x0_idx,
'xf_idx': xf_idx,
'new_filepath': new_filepath,
'loaded': False,
'row': row,
'col': col,
'x': col*file_info['tile_width'],
'y': row*file_info['tile_height'],
'width': tile_width,
'height': tile_height
}
if img_info['invert_y']:
tile['top'] = yf
tile['bottom'] = y0
if img_info['invert_x']:
tile['left'] = xf
tile['right'] = x0
new_tiles[tile_idx] = tile
print('viewer:', img_info['viewer'])
print('new tiles', new_tiles.keys())
return all_tiles, new_tiles
def scale_data(file_info, img_info, tile_info, data):
if img_info['scale']==1:
data = data[tile_info['y0_idx']:tile_info['yf_idx'],
tile_info['x0_idx']:tile_info['xf_idx']]
else:
try:
import scipy.ndimage
data = data[tile_info['y0_idx']:tile_info['yf_idx'],
tile_info['x0_idx']:tile_info['xf_idx']]
data = scipy.ndimage.zoom(data, img_info['scale'], order=0)
except ImportError:
if img_info['scale']>1:
data = data[tile_info['y0_idx']:tile_info['yf_idx'],
tile_info['x0_idx']:tile_info['xf_idx']]
data = np.kron(data, np.ones((img_info['scale'],img_info['scale'])))
#data = zoom(data, img_info['scale'], order=0)
elif img_info['scale']<1 and img_info['scale']>0:
tile_width = min(file_info['tile_width'],
int((img_info['width']-tile_info['x0_idx'])*img_info['scale'])-1)
tile_height = min(file_info['tile_height'],
int((img_info['height']-tile_info['y0_idx'])*img_info['scale'])-1)
xmax = min(img_info['width']-1, tile_info['xf_idx'])
ymax = min(img_info['height']-1, tile_info['yf_idx'])
xIdx=np.linspace(tile_info['x0_idx'], xmax, tile_width)
yIdx=np.linspace(tile_info['y0_idx'], ymax, tile_height)
xIdx=np.array(xIdx,np.int)
yIdx=np.reshape(np.array(yIdx,np.int),(yIdx.size,1))
data = data[yIdx,xIdx]
else:
raise ToyzJobError('Scale must be a positive number')
return data
def create_tile(file_info, img_info, tile_info):
try:
from PIL import Image
except ImportError:
raise ToyzJobError(
"You must have PIL (Python Imaging Library) installed to "
"open files of this type"
)
if file_info['ext']=='fits':
try:
from matplotlib import cm as cmap
from matplotlib.colors import Normalize, LinearSegmentedColormap
except ImportError:
raise ToyzJobError("You must have matplotlib installed to load FITS images")
hdulist = get_file(file_info)
data = hdulist[int(img_info['frame'])].data
# If no advanced resampling algorithm is used, scale the data as quickly as possible.
# Otherwise crop the data.
if file_info['resampling'] == 'NEAREST':
data = scale_data(file_info, img_info, tile_info, data)
else:
data = data[
tile_info['y0_idx']:tile_info['yf_idx'],
tile_info['x0_idx']:tile_info['xf_idx']]
# FITS images have a flipped y-axis from what browsers and other image formats expect
if img_info['invert_y']:
data = np.flipud(data)
if img_info['invert_x']:
data = np.fliplr(data)
norm = Normalize(img_info['colormap']['px_min'], img_info['colormap']['px_max'], True)
colormap_name = img_info['colormap']['name']
if img_info['colormap']['invert_color']:
colormap_name = colormap_name + '_r'
colormap = getattr(cmap, colormap_name)
cm = cmap.ScalarMappable(norm, colormap)
img = np.uint8(cm.to_rgba(data)*255)
img = Image.fromarray(img)
if file_info['resampling'] != 'NEAREST':
img = img.resize(
(tile_info['width'], tile_info['height']),
getattr(Image, file_info['resampling']))
else:
img = get_file(file_info)
img = img.crop((
tile_info['x0_idx'], tile_info['y0_idx'],
tile_info['xf_idx'],
tile_info['yf_idx']))
img = img.resize(
(tile_info['width'], tile_info['height']), getattr(Image, file_info['resampling']))
width, height = img.size
if width>0 and height>0:
path = os.path.dirname(tile_info['new_filepath'])
core.create_paths([path])
img.save(tile_info['new_filepath'], format=img_formats[file_info['tile_format']])
else:
return False, ''
return True, tile_info
def get_img_data(data_type, file_info, img_info, **kwargs):
"""
Get data from an image or FITS file
"""
if file_info['ext']=='fits':
hdulist = get_file(file_info)
data = hdulist[int(img_info['frame'])].data
else:
try:
from PIL import Image
except ImportError:
raise ToyzJobError(
"You must have PIL (Python Imaging Library) installed to "
"open files of this type"
)
img = get_file(file_info)
data = np.array(img)
if data_type == 'data':
if 'scale' in kwargs:
width = int(kwargs['width']/2/img_info['viewer']['scale'])
height = int(kwargs['height']/2/img_info['viewer']['scale'])
else:
width = int(kwargs['width']/2)
height = int(kwargs['height']/2)
x0 = max(0, kwargs['x']-width)
y0 = max(0, kwargs['y']-height)
xf = min(data.shape[1], kwargs['x']+width)
yf = min(data.shape[0], kwargs['y']+height)
if 'scale' in kwargs:
tile_data = {
'x0_idx': x0,
'y0_idx': y0,
'xf_idx': xf,
'yf_idx': yf
}
data = scale_data(file_info, img_info, tile_data, data)
else:
data = data[y0:yf, x0:xf]
response = {
'id': 'data',
'min': float(data.min()),
'max': float(data.max()),
'mean': float(data.mean()),
'median': float(np.median(data)),
'std_dev': float(np.std(data)),
'data': data.tolist()
}
elif data_type == 'datapoint':
if (kwargs['x']<data.shape[1] and kwargs['y']<data.shape[0] and
kwargs['x']>=0 and kwargs['y']>=0):
response = {
'id': 'datapoint',
'px_value': float(data[kwargs['y'],kwargs['x']])
}
else:
response = {
'id': 'datapoint',
'px_value': 0
}
else:
raise ToyzJobError("Loading that data type has not been implemented yet")
return response
| |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1DaemonSetList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1DaemonSet]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1DaemonSetList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1DaemonSetList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1DaemonSetList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1DaemonSetList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1DaemonSetList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1DaemonSetList. # noqa: E501
A list of daemon sets. # noqa: E501
:return: The items of this V1DaemonSetList. # noqa: E501
:rtype: list[V1DaemonSet]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1DaemonSetList.
A list of daemon sets. # noqa: E501
:param items: The items of this V1DaemonSetList. # noqa: E501
:type: list[V1DaemonSet]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1DaemonSetList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1DaemonSetList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1DaemonSetList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1DaemonSetList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1DaemonSetList. # noqa: E501
:return: The metadata of this V1DaemonSetList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1DaemonSetList.
:param metadata: The metadata of this V1DaemonSetList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DaemonSetList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DaemonSetList):
return True
return self.to_dict() != other.to_dict()
| |
# Copyright IBM Corp. 2013 All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import gpfs
from cinder.volume import volume_types
CONF = cfg.CONF
class FakeQemuImgInfo(object):
def __init__(self):
self.file_format = None
self.backing_file = None
class GPFSDriverTestCase(test.TestCase):
driver_name = "cinder.volume.drivers.gpfs.GPFSDriver"
context = context.get_admin_context()
def _execute_wrapper(self, cmd, *args, **kwargs):
try:
kwargs.pop('run_as_root')
except KeyError:
pass
return utils.execute(cmd, *args, **kwargs)
def setUp(self):
super(GPFSDriverTestCase, self).setUp()
self.volumes_path = tempfile.mkdtemp(prefix="gpfs_")
self.images_dir = '%s/images' % self.volumes_path
self.addCleanup(self._cleanup, self.images_dir, self.volumes_path)
if not os.path.exists(self.volumes_path):
os.mkdir(self.volumes_path)
if not os.path.exists(self.images_dir):
os.mkdir(self.images_dir)
self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
self.driver = gpfs.GPFSDriver(configuration=conf.Configuration(None))
self.driver.gpfs_execute = self._execute_wrapper
self.driver.set_execute(self._execute_wrapper)
self.driver._cluster_id = '123456'
self.driver._gpfs_device = '/dev/gpfs'
self.driver._storage_pool = 'system'
self.driver._encryption_state = 'yes'
self.flags(volume_driver=self.driver_name,
gpfs_mount_point_base=self.volumes_path)
self.context = context.get_admin_context()
self.context.user_id = 'fake'
self.context.project_id = 'fake'
CONF.gpfs_images_dir = self.images_dir
def _cleanup(self, images_dir, volumes_path):
try:
os.rmdir(images_dir)
os.rmdir(volumes_path)
except OSError:
pass
def test_different(self):
self.assertTrue(gpfs._different((True, False)))
self.assertFalse(gpfs._different((True, True)))
self.assertFalse(gpfs._different(None))
def test_sizestr(self):
self.assertEqual('10G', gpfs._sizestr('10'))
@mock.patch('cinder.utils.execute')
def test_gpfs_local_execute(self, mock_exec):
mock_exec.return_value = 'test'
self.driver._gpfs_local_execute('test')
expected = [mock.call('test', run_as_root=True)]
self.assertEqual(expected, mock_exec.mock_calls)
@mock.patch('cinder.utils.execute')
def test_get_gpfs_state_ok(self, mock_exec):
mock_exec.return_value = ('mmgetstate::HEADER:version:reserved:'
'reserved:nodeName:nodeNumber:state:quorum:'
'nodesUp:totalNodes:remarks:cnfsState:\n'
'mmgetstate::0:1:::devstack:3:active:2:3:3:'
'quorum node:(undefined):', '')
self.assertTrue(self.driver._get_gpfs_state().splitlines()[1].
startswith('mmgetstate::0:1:::devstack'))
@mock.patch('cinder.utils.execute')
def test_get_gpfs_state_fail_mmgetstate(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_gpfs_state)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_state')
def test_check_gpfs_state_ok(self, mock_get_gpfs_state):
mock_get_gpfs_state.return_value = ('mmgetstate::HEADER:version:'
'reserved:reserved:nodeName:'
'nodeNumber:state:quorum:nodesUp:'
'totalNodes:remarks:cnfsState:\n'
'mmgetstate::0:1:::devstack:3:'
'active:2:3:3:'
'quorum node:(undefined):')
self.driver._check_gpfs_state()
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_state')
def test_check_gpfs_state_fail_not_active(self, mock_get_gpfs_state):
mock_get_gpfs_state.return_value = ('mmgetstate::HEADER:version:'
'reserved:reserved:nodeName:'
'nodeNumber:state:quorum:nodesUp:'
'totalNodes:remarks:cnfsState:\n'
'mmgetstate::0:1:::devstack:3:'
'arbitrating:2:3:3:'
'quorum node:(undefined):')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._check_gpfs_state)
@mock.patch('cinder.utils.execute')
def test_get_fs_from_path_ok(self, mock_exec):
mock_exec.return_value = ('Filesystem 1K-blocks '
'Used Available Use%% Mounted on\n'
'%s 10485760 531968 9953792'
' 6%% /gpfs0' % self.driver._gpfs_device,
'')
self.assertEqual(self.driver._gpfs_device,
self.driver._get_filesystem_from_path('/gpfs0'))
@mock.patch('cinder.utils.execute')
def test_get_fs_from_path_fail_path(self, mock_exec):
mock_exec.return_value = ('Filesystem 1K-blocks '
'Used Available Use% Mounted on\n'
'test 10485760 531968 '
'9953792 6% /gpfs0', '')
self.assertNotEqual(self.driver._gpfs_device,
self.driver._get_filesystem_from_path('/gpfs0'))
@mock.patch('cinder.utils.execute')
def test_get_fs_from_path_fail_raise(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_filesystem_from_path, '/gpfs0')
@mock.patch('cinder.utils.execute')
def test_get_gpfs_cluster_id_ok(self, mock_exec):
mock_exec.return_value = ('mmlsconfig::HEADER:version:reserved:'
'reserved:configParameter:value:nodeList:\n'
'mmlsconfig::0:1:::clusterId:%s::'
% self.driver._cluster_id, '')
self.assertEqual(self.driver._cluster_id,
self.driver._get_gpfs_cluster_id())
@mock.patch('cinder.utils.execute')
def test_get_gpfs_cluster_id_fail_id(self, mock_exec):
mock_exec.return_value = ('mmlsconfig::HEADER.:version:reserved:'
'reserved:configParameter:value:nodeList:\n'
'mmlsconfig::0:1:::clusterId:test::', '')
self.assertNotEqual(self.driver._cluster_id,
self.driver._get_gpfs_cluster_id())
@mock.patch('cinder.utils.execute')
def test_get_gpfs_cluster_id_fail_raise(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_gpfs_cluster_id)
@mock.patch('cinder.utils.execute')
def test_get_fileset_from_path_ok(self, mock_exec):
mock_exec.return_value = ('file name: /gpfs0\n'
'metadata replication: 1 max 2\n'
'data replication: 1 max 2\n'
'immutable: no\n'
'appendOnly: no\n'
'flags:\n'
'storage pool name: system\n'
'fileset name: root\n'
'snapshot name:\n'
'Windows attributes: DIRECTORY', '')
self.driver._get_fileset_from_path('')
@mock.patch('cinder.utils.execute')
def test_get_fileset_from_path_fail_mmlsattr(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_fileset_from_path, '')
@mock.patch('cinder.utils.execute')
def test_get_fileset_from_path_fail_find_fileset(self, mock_exec):
mock_exec.return_value = ('file name: /gpfs0\n'
'metadata replication: 1 max 2\n'
'data replication: 1 max 2\n'
'immutable: no\n'
'appendOnly: no\n'
'flags:\n'
'storage pool name: system\n'
'*** name: root\n'
'snapshot name:\n'
'Windows attributes: DIRECTORY', '')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_fileset_from_path, '')
@mock.patch('cinder.utils.execute')
def test_verify_gpfs_pool_ok(self, mock_exec):
mock_exec.return_value = ('Storage pools in file system at \'/gpfs0\':'
'\n'
'Name Id BlkSize Data '
'Meta '
'Total Data in (KB) Free Data in (KB) '
'Total Meta in (KB) Free Meta in (KB)\n'
'system 0 256 KB yes '
'yes '
' 10485760 9953792 ( 95%) '
'10485760 9954560 ( 95%)', '')
self.assertTrue(self.driver._gpfs_device,
self.driver._verify_gpfs_pool('/dev/gpfs'))
@mock.patch('cinder.utils.execute')
def test_verify_gpfs_pool_fail_pool(self, mock_exec):
mock_exec.return_value = ('Storage pools in file system at \'/gpfs0\':'
'\n'
'Name Id BlkSize Data '
'Meta '
'Total Data in (KB) Free Data in (KB) '
'Total Meta in (KB) Free Meta in (KB)\n'
'test 0 256 KB yes '
'yes'
' 10485760 9953792 ( 95%)'
' 10485760 9954560 ( 95%)', '')
self.assertTrue(self.driver._gpfs_device,
self.driver._verify_gpfs_pool('/dev/gpfs'))
@mock.patch('cinder.utils.execute')
def test_verify_gpfs_pool_fail_raise(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
self.assertFalse(self.driver._verify_gpfs_pool('/dev/gpfs'))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
@mock.patch('cinder.utils.execute')
def test_update_volume_storage_pool_ok(self, mock_exec, mock_verify_pool):
mock_verify_pool.return_value = True
self.assertTrue(self.driver._update_volume_storage_pool('', 'system'))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
@mock.patch('cinder.utils.execute')
def test_update_volume_storage_pool_ok_pool_none(self,
mock_exec,
mock_verify_pool):
mock_verify_pool.return_value = True
self.assertTrue(self.driver._update_volume_storage_pool('', None))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
@mock.patch('cinder.utils.execute')
def test_update_volume_storage_pool_fail_pool(self,
mock_exec,
mock_verify_pool):
mock_verify_pool.return_value = False
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._update_volume_storage_pool,
'',
'system')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
@mock.patch('cinder.utils.execute')
def test_update_volume_storage_pool_fail_mmchattr(self,
mock_exec,
mock_verify_pool):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
mock_verify_pool.return_value = True
self.assertFalse(self.driver._update_volume_storage_pool('', 'system'))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_filesystem_from_path')
@mock.patch('cinder.utils.execute')
def test_get_gpfs_fs_release_level_ok(self,
mock_exec,
mock_fs_from_path):
mock_exec.return_value = ('mmlsfs::HEADER:version:reserved:reserved:'
'deviceName:fieldName:data:remarks:\n'
'mmlsfs::0:1:::gpfs:filesystemVersion:14.03 '
'(4.1.0.0):\n'
'mmlsfs::0:1:::gpfs:filesystemVersionLocal:'
'14.03 (4.1.0.0):\n'
'mmlsfs::0:1:::gpfs:filesystemVersionManager'
':14.03 (4.1.0.0):\n'
'mmlsfs::0:1:::gpfs:filesystemVersion'
'Original:14.03 (4.1.0.0):\n'
'mmlsfs::0:1:::gpfs:filesystemHighest'
'Supported:14.03 (4.1.0.0):', '')
mock_fs_from_path.return_value = '/dev/gpfs'
self.assertEqual(('/dev/gpfs', 1403),
self.driver._get_gpfs_fs_release_level(''))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_filesystem_from_path')
@mock.patch('cinder.utils.execute')
def test_get_gpfs_fs_release_level_fail_mmlsfs(self,
mock_exec,
mock_fs_from_path):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
mock_fs_from_path.return_value = '/dev/gpfs'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_gpfs_fs_release_level, '')
@mock.patch('cinder.utils.execute')
def test_get_gpfs_cluster_release_level_ok(self, mock_exec):
mock_exec.return_value = ('mmlsconfig::HEADER:version:reserved:'
'reserved:configParameter:value:nodeList:\n'
'mmlsconfig::0:1:::minReleaseLevel:1403::',
'')
self.assertEqual(1403, self.driver._get_gpfs_cluster_release_level())
@mock.patch('cinder.utils.execute')
def test_get_gpfs_cluster_release_level_fail_mmlsconfig(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_gpfs_cluster_release_level)
@mock.patch('cinder.utils.execute')
def test_is_gpfs_path_fail_mmlsattr(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._is_gpfs_path, '/dummy/path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_fileset_from_path')
@mock.patch('cinder.utils.execute')
def test_is_same_fileset_ok(self,
mock_exec,
mock_get_fileset_from_path):
mock_get_fileset_from_path.return_value = True
self.assertTrue(self.driver._is_same_fileset('', ''))
mock_get_fileset_from_path.side_effect = [True, False]
self.assertFalse(self.driver._is_same_fileset('', ''))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_available_capacity')
@mock.patch('cinder.utils.execute')
def test_same_cluster_ok(self, mock_exec, mock_avail_capacity):
mock_avail_capacity.return_value = (10192683008, 10737418240)
stats = self.driver.get_volume_stats()
loc = stats['location_info']
cap = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
self.assertTrue(self.driver._same_cluster(host))
locinfo = stats['location_info'] + '_'
loc = locinfo
cap = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
self.assertFalse(self.driver._same_cluster(host))
@mock.patch('cinder.utils.execute')
def test_set_rw_permission(self, mock_exec):
self.driver._set_rw_permission('')
@mock.patch('cinder.utils.execute')
def test_can_migrate_locally(self, mock_exec):
host = {'host': 'foo', 'capabilities': ''}
self.assertEqual(None, self.driver._can_migrate_locally(host))
loc = 'GPFSDriver:%s' % self.driver._cluster_id
cap = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
self.assertEqual(None, self.driver._can_migrate_locally(host))
loc = 'GPFSDriver_:%s:testpath' % self.driver._cluster_id
cap = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
self.assertEqual(None, self.driver._can_migrate_locally(host))
loc = 'GPFSDriver:%s:testpath' % (self.driver._cluster_id + '_')
cap = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
self.assertEqual(None, self.driver._can_migrate_locally(host))
loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id
cap = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
self.assertEqual('testpath', self.driver._can_migrate_locally(host))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_encryption_status')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_cluster_release_level')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_filesystem_from_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_cluster_id')
@mock.patch('cinder.utils.execute')
def test_do_setup_ok(self,
mock_exec,
mock_get_gpfs_cluster_id,
mock_get_filesystem_from_path,
mock_verify_gpfs_pool,
mock_get_gpfs_fs_rel_lev,
mock_verify_encryption_state):
ctxt = self.context
mock_get_gpfs_cluster_id.return_value = self.driver._cluster_id
mock_get_filesystem_from_path.return_value = '/dev/gpfs'
mock_verify_gpfs_pool.return_value = True
mock_get_gpfs_fs_rel_lev.return_value = 1405
mock_verify_encryption_state.return_value = 'Yes'
self.driver.do_setup(ctxt)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_cluster_release_level')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_filesystem_from_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_cluster_id')
@mock.patch('cinder.utils.execute')
def test_do_setup_no_encryption(self,
mock_exec,
mock_get_gpfs_cluster_id,
mock_get_filesystem_from_path,
mock_verify_gpfs_pool,
mock_get_gpfs_fs_rel_lev):
ctxt = self.context
mock_get_gpfs_cluster_id.return_value = self.driver._cluster_id
mock_get_filesystem_from_path.return_value = '/dev/gpfs'
mock_verify_gpfs_pool.return_value = True
mock_get_gpfs_fs_rel_lev.return_value = 1403
self.driver.do_setup(ctxt)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_filesystem_from_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_cluster_id')
@mock.patch('cinder.utils.execute')
def test_do_setup_fail_get_cluster_id(self,
mock_exec,
mock_get_gpfs_cluster_id,
mock_get_filesystem_from_path,
mock_verify_gpfs_pool):
ctxt = self.context
mock_get_gpfs_cluster_id.side_effect = (
processutils.ProcessExecutionError(stdout='test', stderr='test'))
mock_get_filesystem_from_path.return_value = '/dev/gpfs'
mock_verify_gpfs_pool.return_value = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, ctxt)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_filesystem_from_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_cluster_id')
@mock.patch('cinder.utils.execute')
def test_do_setup_fail_get_fs_from_path(self,
mock_exec,
mock_get_gpfs_cluster_id,
mock_get_fs_from_path,
mock_verify_gpfs_pool):
ctxt = self.context
mock_get_gpfs_cluster_id.return_value = self.driver._cluster_id
mock_get_fs_from_path.side_effect = (
processutils.ProcessExecutionError(stdout='test', stderr='test'))
mock_verify_gpfs_pool.return_value = True
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, ctxt)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_filesystem_from_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_cluster_id')
@mock.patch('cinder.utils.execute')
def test_do_setup_fail_volume(self,
mock_exec,
mock_get_gpfs_cluster_id,
mock_get_filesystem_from_path,
mock_verify_gpfs_pool):
ctxt = self.context
mock_get_gpfs_cluster_id. return_value = self.driver._cluster_id
mock_get_filesystem_from_path.return_value = '/dev/gpfs'
mock_verify_gpfs_pool.return_value = False
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, ctxt)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._check_gpfs_state')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_fs_release_level')
def test_check_for_setup_error_fail_conf(self,
mock_get_gpfs_fs_rel_lev,
mock_is_gpfs_path,
mock_check_gpfs_state):
fake_fs = '/dev/gpfs'
fake_fs_release = 1400
fake_cluster_release = 1201
# fail configuration.gpfs_mount_point_base is None
org_value = self.driver.configuration.gpfs_mount_point_base
self.flags(volume_driver=self.driver_name, gpfs_mount_point_base=None)
mock_get_gpfs_fs_rel_lev.return_value = (fake_fs, fake_fs_release)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
self.flags(volume_driver=self.driver_name,
gpfs_mount_point_base=org_value)
# fail configuration.gpfs_images_share_mode not in
# ['copy_on_write', 'copy']
org_value = self.driver.configuration.gpfs_images_share_mode
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode='copy_on_read')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode=org_value)
# fail configuration.gpfs_images_share_mode and
# configuration.gpfs_images_dir is None
org_value_share_mode = self.driver.configuration.gpfs_images_share_mode
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode='copy')
org_value_dir = CONF.gpfs_images_dir
CONF.gpfs_images_dir = None
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode=org_value_share_mode)
CONF.gpfs_images_dir = org_value_dir
# fail configuration.gpfs_images_share_mode == 'copy_on_write' and not
# _same_filesystem(configuration.gpfs_mount_point_base,
# configuration.gpfs_images_dir)
org_value = self.driver.configuration.gpfs_images_share_mode
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode='copy_on_write')
with mock.patch('cinder.volume.drivers.ibm.gpfs._same_filesystem',
return_value=False):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode=org_value)
# fail self.configuration.gpfs_images_share_mode == 'copy_on_write' and
# not self._is_same_fileset(self.configuration.gpfs_mount_point_base,
# self.configuration.gpfs_images_dir)
org_value = self.driver.configuration.gpfs_images_share_mode
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode='copy_on_write')
with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_is_same_fileset', return_value=False):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode=org_value)
# fail directory is None
org_value_share_mode = self.driver.configuration.gpfs_images_share_mode
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode=None)
org_value_dir = CONF.gpfs_images_dir
CONF.gpfs_images_dir = None
with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_cluster_release_level',
return_value=fake_cluster_release):
self.driver.check_for_setup_error()
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode=org_value_share_mode)
CONF.gpfs_images_dir = org_value_dir
# fail directory.startswith('/')
org_value_mount = self.driver.configuration.gpfs_mount_point_base
self.flags(volume_driver=self.driver_name,
gpfs_mount_point_base='_' + self.volumes_path)
org_value_dir = CONF.gpfs_images_dir
CONF.gpfs_images_dir = None
with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_cluster_release_level',
return_value=fake_cluster_release):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
self.flags(volume_driver=self.driver_name,
gpfs_mount_point_base=org_value_mount)
CONF.gpfs_images_dir = org_value_dir
# fail os.path.isdir(directory)
org_value_mount = self.driver.configuration.gpfs_mount_point_base
self.flags(volume_driver=self.driver_name,
gpfs_mount_point_base=self.volumes_path + '_')
org_value_dir = CONF.gpfs_images_dir
CONF.gpfs_images_dir = None
with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_cluster_release_level',
return_value=fake_cluster_release):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
self.flags(volume_driver=self.driver_name,
gpfs_mount_point_base=org_value_mount)
CONF.gpfs_images_dir = org_value_dir
# fail not cluster release level >= GPFS_CLONE_MIN_RELEASE
org_fake_cluster_release = fake_cluster_release
fake_cluster_release = 1105
org_value_mount = self.driver.configuration.gpfs_mount_point_base
self.flags(volume_driver=self.driver_name,
gpfs_mount_point_base=self.volumes_path)
org_value_dir = CONF.gpfs_images_dir
CONF.gpfs_images_dir = None
with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_cluster_release_level',
return_value=fake_cluster_release):
with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_fs_release_level',
return_value=(fake_fs, fake_fs_release)):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
fake_cluster_release = org_fake_cluster_release
# fail not fs release level >= GPFS_CLONE_MIN_RELEASE
org_fake_fs_release = fake_fs_release
fake_fs_release = 1105
org_value_mount = self.driver.configuration.gpfs_mount_point_base
self.flags(volume_driver=self.driver_name,
gpfs_mount_point_base=self.volumes_path)
org_value_dir = CONF.gpfs_images_dir
CONF.gpfs_images_dir = None
with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_cluster_release_level',
return_value=fake_cluster_release):
with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_gpfs_fs_release_level',
return_value=(fake_fs, fake_fs_release)):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
self.flags(volume_driver=self.driver_name,
gpfs_mount_point_base=org_value_mount)
CONF.gpfs_images_dir = org_value_dir
fake_fs_release = org_fake_fs_release
@mock.patch('cinder.utils.execute')
def test_create_sparse_file(self, mock_exec):
self.driver._create_sparse_file('', 100)
@mock.patch('cinder.utils.execute')
def test_allocate_file_blocks(self, mock_exec):
self.driver._allocate_file_blocks(os.path.join(self.images_dir,
'test'), 1)
@mock.patch('cinder.utils.execute')
def test_gpfs_change_attributes(self, mock_exec):
options = []
options.extend(['-T', 'test'])
self.driver._gpfs_change_attributes(options, self.images_dir)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._mkfs')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_gpfs_change_attributes')
def test_set_volume_attributes(self, mock_change_attributes, mock_mkfs):
metadata = [{'key': 'data_pool_name', 'value': 'test'},
{'key': 'replicas', 'value': 'test'},
{'key': 'dio', 'value': 'test'},
{'key': 'write_affinity_depth', 'value': 'test'},
{'key': 'block_group_factor', 'value': 'test'},
{'key': 'write_affinity_failure_group', 'value': 'test'},
{'key': 'test', 'value': 'test'},
{'key': 'fstype', 'value': 'test'},
{'key': 'fslabel', 'value': 'test'},
{'key': 'test', 'value': 'test'}]
self.driver._set_volume_attributes('', '', metadata)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_gpfs_change_attributes')
def test_set_volume_attributes_no_attributes(self, mock_change_attributes):
metadata = []
org_value = self.driver.configuration.gpfs_storage_pool
self.flags(volume_driver=self.driver_name, gpfs_storage_pool='system')
self.driver._set_volume_attributes('', '', metadata)
self.flags(volume_driver=self.driver_name,
gpfs_storage_pool=org_value)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_gpfs_change_attributes')
def test_set_volume_attributes_no_options(self, mock_change_attributes):
metadata = []
org_value = self.driver.configuration.gpfs_storage_pool
self.flags(volume_driver=self.driver_name, gpfs_storage_pool='')
self.driver._set_volume_attributes('', '', metadata)
self.flags(volume_driver=self.driver_name,
gpfs_storage_pool=org_value)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_allocate_file_blocks')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_volume_attributes')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_rw_permission')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_create_sparse_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_verify_gpfs_path_state')
def test_create_volume(self,
mock_gpfs_path_state,
mock_local_path,
mock_sparse_file,
mock_rw_permission,
mock_set_volume_attributes,
mock_allocate_file_blocks,
mock_exec):
mock_local_path.return_value = 'test'
volume = self._fake_volume()
value = {}
value['value'] = 'test'
org_value = self.driver.configuration.gpfs_sparse_volumes
self.flags(volume_driver=self.driver_name, gpfs_sparse_volumes=False)
self.driver.create_volume(volume)
self.flags(volume_driver=self.driver_name,
gpfs_sparse_volumes=org_value)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_allocate_file_blocks')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_volume_attributes')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_rw_permission')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_create_sparse_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_verify_gpfs_path_state')
def test_create_volume_no_sparse_volume(self,
mock_gpfs_path_state,
mock_local_path,
mock_sparse_file,
mock_rw_permission,
mock_set_volume_attributes,
mock_allocate_file_blocks,
mock_exec):
mock_local_path.return_value = 'test'
volume = self._fake_volume()
value = {}
value['value'] = 'test'
org_value = self.driver.configuration.gpfs_sparse_volumes
self.flags(volume_driver=self.driver_name, gpfs_sparse_volumes=True)
self.driver.create_volume(volume)
self.flags(volume_driver=self.driver_name,
gpfs_sparse_volumes=org_value)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_allocate_file_blocks')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_volume_attributes')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_rw_permission')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_create_sparse_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_verify_gpfs_path_state')
def test_create_volume_with_metadata(self,
mock_gpfs_path_state,
mock_local_path,
mock_sparse_file,
mock_rw_permission,
mock_set_volume_attributes,
mock_allocate_file_blocks,
mock_exec):
mock_local_path.return_value = 'test'
volume = self._fake_volume()
value = {}
value['value'] = 'test'
mock_set_volume_attributes.return_value = True
metadata = [{'key': 'fake_key', 'value': 'fake_value'}]
org_value = self.driver.configuration.gpfs_sparse_volumes
self.flags(volume_driver=self.driver_name, gpfs_sparse_volumes=False)
self.driver.create_volume(volume)
self.assertTrue(self.driver._set_volume_attributes(volume, 'test',
metadata))
self.flags(volume_driver=self.driver_name,
gpfs_sparse_volumes=org_value)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_resize_volume_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_volume_attributes')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_rw_permission')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy')
@mock.patch('cinder.volume.drivers.ibm.gpfs.'
'GPFSDriver._get_snapshot_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_create_volume_from_snapshot(self,
mock_local_path,
mock_snapshot_path,
mock_gpfs_full_copy,
mock_create_gpfs_copy,
mock_rw_permission,
mock_gpfs_redirect,
mock_set_volume_attributes,
mock_resize_volume_file):
mock_resize_volume_file.return_value = 5 * units.Gi
volume = self._fake_volume()
volume['consistencygroup_id'] = None
snapshot = self._fake_snapshot()
mock_snapshot_path.return_value = "/tmp/fakepath"
self.assertEqual({'size': 5.0},
self.driver.create_volume_from_snapshot(volume,
snapshot))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_resize_volume_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_volume_attributes')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_rw_permission')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_snapshot_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_create_volume_from_snapshot_metadata(self,
mock_local_path,
mock_snapshot_path,
mock_gpfs_full_copy,
mock_create_gpfs_copy,
mock_rw_permission,
mock_gpfs_redirect,
mock_set_volume_attributes,
mock_resize_volume_file):
mock_resize_volume_file.return_value = 5 * units.Gi
volume = self._fake_volume()
volume['consistencygroup_id'] = None
snapshot = self._fake_snapshot()
mock_snapshot_path.return_value = "/tmp/fakepath"
mock_set_volume_attributes.return_value = True
metadata = [{'key': 'fake_key', 'value': 'fake_value'}]
self.assertTrue(self.driver._set_volume_attributes(volume, 'test',
metadata))
self.assertEqual({'size': 5.0},
self.driver.create_volume_from_snapshot(volume,
snapshot))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_resize_volume_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_volume_attributes')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_rw_permission')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_create_gpfs_clone')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_create_cloned_volume(self,
mock_local_path,
mock_gpfs_full_copy,
mock_create_gpfs_clone,
mock_rw_permission,
mock_set_volume_attributes,
mock_resize_volume_file):
mock_resize_volume_file.return_value = 5 * units.Gi
volume = self._fake_volume()
src_volume = self._fake_volume()
self.assertEqual({'size': 5.0},
self.driver.create_cloned_volume(volume, src_volume))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_resize_volume_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_volume_attributes')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_rw_permission')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_create_gpfs_clone')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_create_cloned_volume_with_metadata(self,
mock_local_path,
mock_gpfs_full_copy,
mock_create_gpfs_clone,
mock_rw_permission,
mock_set_volume_attributes,
mock_resize_volume_file):
mock_resize_volume_file.return_value = 5 * units.Gi
volume = self._fake_volume()
src_volume = self._fake_volume()
mock_set_volume_attributes.return_value = True
metadata = [{'key': 'fake_key', 'value': 'fake_value'}]
self.assertTrue(self.driver._set_volume_attributes(volume, 'test',
metadata))
self.assertEqual({'size': 5.0},
self.driver.create_cloned_volume(volume, src_volume))
@mock.patch('cinder.utils.execute')
def test_delete_gpfs_file_ok(self, mock_exec):
mock_exec.side_effect = [('Parent Depth Parent inode File name\n'
'------ ----- -------------- ---------\n'
' no 2 148488 '
'/gpfs0/test.txt', ''),
('', ''),
('', '')]
self.driver._delete_gpfs_file(self.images_dir)
self.driver._delete_gpfs_file(self.images_dir + '_')
mock_exec.side_effect = [('Parent Depth Parent inode File name\n'
'------ ----- -------------- ---------\n'
' '
'/gpfs0/test.txt', ''),
('', '')]
self.driver._delete_gpfs_file(self.images_dir)
@mock.patch('os.path.exists')
@mock.patch('cinder.utils.execute')
def test_delete_gpfs_file_ok_parent(self, mock_exec, mock_path_exists):
mock_path_exists.side_effect = [True, False, False,
True, False, False,
True, False, False]
mock_exec.side_effect = [('Parent Depth Parent inode File name\n'
'------ ----- -------------- ---------\n'
' no 2 148488 '
'/gpfs0/test.txt', ''),
('/gpfs0/test.snap\ntest', ''),
('', '')]
self.driver._delete_gpfs_file(self.images_dir)
mock_exec.side_effect = [('Parent Depth Parent inode File name\n'
'------ ----- -------------- ---------\n'
' no 2 148488 '
'/gpfs0/test.txt', ''),
('/gpfs0/test.ts\ntest', ''),
('', '')]
self.driver._delete_gpfs_file(self.images_dir)
mock_exec.side_effect = [('Parent Depth Parent inode File name\n'
'------ ----- -------------- ---------\n'
' no 2 148488 '
'/gpfs0/test.txt', ''),
('/gpfs0/test.txt\ntest', ''),
('', '')]
self.driver._delete_gpfs_file(self.images_dir)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._delete_gpfs_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_verify_gpfs_path_state')
def test_delete_volume(self,
mock_verify_gpfs_path_state,
mock_local_path,
mock_delete_gpfs_file):
self.driver.delete_volume('')
@mock.patch('cinder.utils.execute')
def test_gpfs_redirect_ok(self, mock_exec):
org_value = self.driver.configuration.gpfs_max_clone_depth
self.flags(volume_driver=self.driver_name, gpfs_max_clone_depth=1)
mock_exec.side_effect = [('Parent Depth Parent inode File name\n'
'------ ----- -------------- ---------\n'
' no 2 148488 '
'/gpfs0/test.txt', ''),
('', '')]
self.assertTrue(self.driver._gpfs_redirect(''))
self.flags(volume_driver=self.driver_name, gpfs_max_clone_depth=1)
mock_exec.side_effect = [('Parent Depth Parent inode File name\n'
'------ ----- -------------- ---------\n'
' no 1 148488 '
'/gpfs0/test.txt', ''),
('', '')]
self.assertFalse(self.driver._gpfs_redirect(''))
self.flags(volume_driver=self.driver_name,
gpfs_max_clone_depth=org_value)
@mock.patch('cinder.utils.execute')
def test_gpfs_redirect_fail_depth(self, mock_exec):
org_value = self.driver.configuration.gpfs_max_clone_depth
self.flags(volume_driver=self.driver_name, gpfs_max_clone_depth=0)
mock_exec.side_effect = [('Parent Depth Parent inode File name\n'
'------ ----- -------------- ---------\n'
' no 2 148488 '
'/gpfs0/test.txt', ''),
('', '')]
self.assertFalse(self.driver._gpfs_redirect(''))
self.flags(volume_driver=self.driver_name,
gpfs_max_clone_depth=org_value)
@mock.patch('cinder.utils.execute')
def test_gpfs_redirect_fail_match(self, mock_exec):
org_value = self.driver.configuration.gpfs_max_clone_depth
self.flags(volume_driver=self.driver_name, gpfs_max_clone_depth=1)
mock_exec.side_effect = [('Parent Depth Parent inode File name\n'
'------ ----- -------------- ---------\n'
' 148488 '
'/gpfs0/test.txt', ''),
('', '')]
self.assertFalse(self.driver._gpfs_redirect(''))
self.flags(volume_driver=self.driver_name,
gpfs_max_clone_depth=org_value)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
@mock.patch('cinder.utils.execute')
def test_create_gpfs_clone(self,
mock_exec,
mock_redirect,
mock_cr_gpfs_cp,
mock_cr_gpfs_snap):
mock_redirect.return_value = True
self.driver._create_gpfs_clone('', '')
mock_redirect.side_effect = [True, False]
self.driver._create_gpfs_clone('', '')
@mock.patch('cinder.utils.execute')
def test_create_gpfs_copy(self, mock_exec):
self.driver._create_gpfs_copy('', '')
@mock.patch('cinder.utils.execute')
def test_create_gpfs_snap(self, mock_exec):
self.driver._create_gpfs_snap('')
self.driver._create_gpfs_snap('', '')
@mock.patch('cinder.utils.execute')
def test_is_gpfs_parent_file_ok(self, mock_exec):
mock_exec.side_effect = [('Parent Depth Parent inode File name\n'
'------ ----- -------------- ---------\n'
' yes 2 148488 '
'/gpfs0/test.txt', ''),
('Parent Depth Parent inode File name\n'
'------ ----- -------------- ---------\n'
' no 2 148488 '
'/gpfs0/test.txt', '')]
self.assertTrue(self.driver._is_gpfs_parent_file(''))
self.assertFalse(self.driver._is_gpfs_parent_file(''))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_rw_permission')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_snapshot_path')
def test_create_snapshot(self,
mock_get_snapshot_path,
mock_local_path,
mock_create_gpfs_snap,
mock_set_rw_permission,
mock_gpfs_redirect):
org_value = self.driver.configuration.gpfs_mount_point_base
mock_get_snapshot_path.return_value = "/tmp/fakepath"
self.flags(volume_driver=self.driver_name,
gpfs_mount_point_base=self.volumes_path)
snapshot = {}
snapshot['volume_name'] = 'test'
self.driver.create_snapshot(snapshot)
self.flags(volume_driver=self.driver_name,
gpfs_mount_point_base=org_value)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_snapshot_path')
def test_delete_snapshot(self,
mock_snapshot_path,
mock_exec):
snapshot = self._fake_snapshot()
snapshot_path = "/tmp/fakepath"
mock_snapshot_path.return_value = snapshot_path
snapshot_ts_path = '%s.ts' % snapshot_path
self.driver.delete_snapshot(snapshot)
mock_exec.assert_any_call('mv', snapshot_path,
snapshot_ts_path)
mock_exec.assert_any_call('rm', '-f', snapshot_ts_path,
check_exit_code=False)
def test_ensure_export(self):
self.assertEqual(None, self.driver.ensure_export('', ''))
def test_create_export(self):
self.assertEqual(None, self.driver.create_export('', '', {}))
def test_remove_export(self):
self.assertEqual(None, self.driver.remove_export('', ''))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_initialize_connection(self, mock_local_path):
volume = self._fake_volume()
mock_local_path.return_value = "/tmp/fakepath"
data = self.driver.initialize_connection(volume, '')
self.assertEqual('test', data['data']['name'])
self.assertEqual("/tmp/fakepath", data['data']['device_path'])
self.assertEqual('gpfs', data['driver_volume_type'])
def test_terminate_connection(self):
self.assertEqual(None, self.driver.terminate_connection('', ''))
def test_get_volume_stats(self):
fake_avail = 80 * units.Gi
fake_size = 2 * fake_avail
with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_available_capacity',
return_value=(fake_avail, fake_size)):
stats = self.driver.get_volume_stats()
self.assertEqual('GPFS', stats['volume_backend_name'])
self.assertEqual('file', stats['storage_protocol'])
self.assertTrue(stats['gpfs_encryption_rest'])
stats = self.driver.get_volume_stats(True)
self.assertEqual('GPFS', stats['volume_backend_name'])
self.assertEqual('file', stats['storage_protocol'])
self.assertTrue(stats['gpfs_encryption_rest'])
@mock.patch('cinder.utils.execute')
def test_get_gpfs_encryption_status_true(self, mock_exec):
mock_exec.return_value = ('mmlsfs::HEADER:version:reserved:reserved:'
'deviceName:fieldName:data:remarks:\n'
'mmlsfs::0:1:::gpfs:encryption:Yes:', '')
self.assertEqual('Yes', self.driver._get_gpfs_encryption_status())
@mock.patch('cinder.utils.execute')
def test_get_gpfs_encryption_status_false(self, mock_exec):
mock_exec.return_value = ('mmlsfs::HEADER:version:reserved:reserved:'
'deviceName:fieldName:data:remarks:\n'
'mmlsfs::0:1:::gpfs:encryption:No:', '')
self.assertEqual('No', self.driver._get_gpfs_encryption_status())
@mock.patch('cinder.utils.execute')
def test_get_gpfs_encryption_status_fail(self, mock_exec):
mock_exec.side_effect = (
processutils.ProcessExecutionError(stdout='test', stderr='test'))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_gpfs_encryption_status)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_update_volume_stats')
def test_get_volume_stats_none_stats(self, mock_upd_vol_stats):
_stats_org = self.driver._stats
self.driver._stats = mock.Mock()
self.driver._stats.return_value = None
self.driver.get_volume_stats()
self.driver._stats = _stats_org
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._clone_image')
def test_clone_image_pub(self, mock_exec):
self.driver.clone_image('', '', '', {'id': 1}, '')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
def test_is_cloneable_ok(self, mock_is_gpfs_path):
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode='test')
CONF.gpfs_images_dir = self.images_dir
mock_is_gpfs_path.return_value = None
self.assertEqual((True, None, os.path.join(CONF.gpfs_images_dir,
'12345')),
self.driver._is_cloneable('12345'))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
def test_is_cloneable_fail_config(self, mock_is_gpfs_path):
self.flags(volume_driver=self.driver_name, gpfs_images_share_mode='')
CONF.gpfs_images_dir = ''
mock_is_gpfs_path.return_value = None
self.assertNotEqual((True, None, os.path.join(CONF.gpfs_images_dir,
'12345')),
self.driver._is_cloneable('12345'))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
def test_is_cloneable_fail_path(self, mock_is_gpfs_path):
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode='test')
CONF.gpfs_images_dir = self.images_dir
mock_is_gpfs_path.side_effect = (
processutils.ProcessExecutionError(stdout='test', stderr='test'))
self.assertNotEqual((True, None, os.path.join(CONF.gpfs_images_dir,
'12345')),
self.driver._is_cloneable('12345'))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_resize_volume_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_rw_permission')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_is_gpfs_parent_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_verify_gpfs_path_state')
def test_clone_image_clonable(self,
mock_verify_gpfs_path_state,
mock_is_cloneable,
mock_local_path,
mock_is_gpfs_parent_file,
mock_create_gpfs_snap,
mock_qemu_img_info,
mock_create_gpfs_copy,
mock_conv_image,
mock_set_rw_permission,
mock_resize_volume_file):
mock_is_cloneable.return_value = (True, 'test', self.images_dir)
mock_is_gpfs_parent_file.return_value = False
mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('')
volume = self._fake_volume()
self.assertEqual(({'provider_location': None}, True),
self.driver._clone_image(volume, '', 1))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver'
'._verify_gpfs_path_state')
def test_clone_image_not_cloneable(self,
mock_verify_gpfs_path_state,
mock_is_cloneable):
mock_is_cloneable.return_value = (False, 'test', self.images_dir)
volume = self._fake_volume()
self.assertEqual((None, False),
self.driver._clone_image(volume, '', 1))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_resize_volume_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_rw_permission')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_is_gpfs_parent_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_verify_gpfs_path_state')
def test_clone_image_format_raw_copy_on_write(self,
mock_verify_gpfs_path_state,
mock_is_cloneable,
mock_local_path,
mock_is_gpfs_parent_file,
mock_create_gpfs_snap,
mock_qemu_img_info,
mock_create_gpfs_copy,
mock_set_rw_permission,
mock_resize_volume_file):
mock_is_cloneable.return_value = (True, 'test', self.images_dir)
mock_local_path.return_value = self.volumes_path
mock_is_gpfs_parent_file.return_value = False
mock_qemu_img_info.return_value = self._fake_qemu_raw_image_info('')
volume = self._fake_volume()
org_value = self.driver.configuration.gpfs_images_share_mode
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode='copy_on_write')
self.assertEqual(({'provider_location': None}, True),
self.driver._clone_image(volume, '', 1))
mock_create_gpfs_snap.assert_called_once_with(self.images_dir)
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode=org_value)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_resize_volume_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_rw_permission')
@mock.patch('shutil.copyfile')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_is_gpfs_parent_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_verify_gpfs_path_state')
def test_clone_image_format_raw_copy(self,
mock_verify_gpfs_path_state,
mock_is_cloneable,
mock_local_path,
mock_is_gpfs_parent_file,
mock_qemu_img_info,
mock_copyfile,
mock_set_rw_permission,
mock_resize_volume_file):
mock_is_cloneable.return_value = (True, 'test', self.images_dir)
mock_local_path.return_value = self.volumes_path
mock_qemu_img_info.return_value = self._fake_qemu_raw_image_info('')
volume = self._fake_volume()
org_value = self.driver.configuration.gpfs_images_share_mode
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode='copy')
self.assertEqual(({'provider_location': None}, True),
self.driver._clone_image(volume, '', 1))
mock_copyfile.assert_called_once_with(self.images_dir,
self.volumes_path)
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode=org_value)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_resize_volume_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_set_rw_permission')
@mock.patch('cinder.image.image_utils.convert_image')
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_verify_gpfs_path_state')
def test_clone_image_format_qcow2(self,
mock_verify_gpfs_path_state,
mock_is_cloneable,
mock_local_path,
mock_qemu_img_info,
mock_conv_image,
mock_set_rw_permission,
mock_resize_volume_file):
mock_is_cloneable.return_value = (True, 'test', self.images_dir)
mock_local_path.return_value = self.volumes_path
mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('')
volume = self._fake_volume()
self.assertEqual(({'provider_location': None}, True),
self.driver._clone_image(volume, '', 1))
mock_conv_image.assert_called_once_with(self.images_dir,
self.volumes_path,
'raw')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_resize_volume_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
@mock.patch('cinder.image.image_utils.fetch_to_raw')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_verify_gpfs_path_state')
def test_copy_image_to_volume(self,
mock_verify_gpfs_path_state,
mock_fetch_to_raw,
mock_local_path,
mock_resize_volume_file):
volume = self._fake_volume()
self.driver.copy_image_to_volume('', volume, '', 1)
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.resize_image')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_resize_volume_file_ok(self,
mock_local_path,
mock_resize_image,
mock_qemu_img_info):
volume = self._fake_volume()
mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('')
self.assertEqual(self._fake_qemu_qcow2_image_info('').virtual_size,
self.driver._resize_volume_file(volume, 2000))
@mock.patch('cinder.image.image_utils.qemu_img_info')
@mock.patch('cinder.image.image_utils.resize_image')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_resize_volume_file_fail(self,
mock_local_path,
mock_resize_image,
mock_qemu_img_info):
volume = self._fake_volume()
mock_resize_image.side_effect = (
processutils.ProcessExecutionError(stdout='test', stderr='test'))
mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._resize_volume_file, volume, 2000)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_resize_volume_file')
def test_extend_volume(self, mock_resize_volume_file):
volume = self._fake_volume()
self.driver.extend_volume(volume, 2000)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
@mock.patch('cinder.image.image_utils.upload_volume')
def test_copy_volume_to_image(self, mock_upload_volume, mock_local_path):
volume = self._fake_volume()
self.driver.copy_volume_to_image('', volume, '', '')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._delete_gpfs_file')
@mock.patch('six.moves.builtins.open')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_create_gpfs_clone')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_backup_volume(self,
mock_local_path,
mock_create_gpfs_clone,
mock_gpfs_redirect,
mock_temp_chown,
mock_file_open,
mock_delete_gpfs_file):
volume = self._fake_volume()
self.driver.db = mock.Mock()
self.driver.db.volume_get = mock.Mock()
self.driver.db.volume_get.return_value = volume
backup = {}
backup['volume_id'] = 'test'
backup['id'] = '123456'
backup_service = mock.Mock()
mock_local_path.return_value = self.volumes_path
self.driver.backup_volume('', backup, backup_service)
@mock.patch('six.moves.builtins.open')
@mock.patch('cinder.utils.temporary_chown')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_restore_backup(self,
mock_local_path,
mock_temp_chown,
mock_file_open):
volume = self._fake_volume()
backup = {}
backup['id'] = '123456'
backup_service = mock.Mock()
mock_local_path.return_value = self.volumes_path
self.driver.restore_backup('', backup, volume, backup_service)
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_can_migrate_locally')
def test_migrate_volume_ok(self, mock_local, mock_exec):
volume = self._fake_volume()
host = {}
host = {'host': 'foo', 'capabilities': {}}
mock_local.return_value = (self.driver.configuration.
gpfs_mount_point_base + '_')
self.assertEqual((True, None),
self.driver._migrate_volume(volume, host))
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_can_migrate_locally')
def test_migrate_volume_fail_dest_path(self, mock_local, mock_exec):
volume = self._fake_volume()
host = {}
host = {'host': 'foo', 'capabilities': {}}
mock_local.return_value = None
self.assertEqual((False, None),
self.driver._migrate_volume(volume, host))
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_can_migrate_locally')
def test_migrate_volume_fail_mpb(self, mock_local, mock_exec):
volume = self._fake_volume()
host = {}
host = {'host': 'foo', 'capabilities': {}}
mock_local.return_value = (self.driver.configuration.
gpfs_mount_point_base)
mock_exec.side_effect = (
processutils.ProcessExecutionError(stdout='test', stderr='test'))
self.assertEqual((True, None),
self.driver._migrate_volume(volume, host))
@mock.patch('cinder.utils.execute')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_can_migrate_locally')
def test_migrate_volume_fail_mv(self, mock_local, mock_exec):
volume = self._fake_volume()
host = {}
host = {'host': 'foo', 'capabilities': {}}
mock_local.return_value = (
self.driver.configuration.gpfs_mount_point_base + '_')
mock_exec.side_effect = (
processutils.ProcessExecutionError(stdout='test', stderr='test'))
self.assertEqual((False, None),
self.driver._migrate_volume(volume, host))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
def test_migrate_volume_ok_pub(self, mock_migrate_volume):
self.driver.migrate_volume('', '', '')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_update_volume_storage_pool')
@mock.patch('cinder.volume.drivers.ibm.gpfs._different')
def test_retype_ok(self, mock_different, mock_strg_pool, mock_migrate_vol):
ctxt = self.context
(volume, new_type, diff, host) = self._fake_retype_arguments()
self.driver.db = mock.Mock()
mock_different.side_effect = [False, True, True]
mock_strg_pool.return_value = True
mock_migrate_vol.return_value = (True, True)
self.assertTrue(self.driver.retype(ctxt, volume, new_type, diff, host))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_update_volume_storage_pool')
@mock.patch('cinder.volume.drivers.ibm.gpfs._different')
def test_retype_diff_backend(self,
mock_different,
mock_strg_pool,
mock_migrate_vol):
ctxt = self.context
(volume, new_type, diff, host) = self._fake_retype_arguments()
mock_different.side_effect = [True, True, True]
self.assertFalse(self.driver.retype(ctxt,
volume,
new_type,
diff, host))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_update_volume_storage_pool')
@mock.patch('cinder.volume.drivers.ibm.gpfs._different')
def test_retype_diff_pools_migrated(self,
mock_different,
mock_strg_pool,
mock_migrate_vol):
ctxt = self.context
(volume, new_type, diff, host) = self._fake_retype_arguments()
self.driver.db = mock.Mock()
mock_different.side_effect = [False, False, True]
mock_strg_pool.return_value = True
mock_migrate_vol.return_value = (True, True)
self.assertTrue(self.driver.retype(ctxt, volume, new_type, diff, host))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_update_volume_storage_pool')
@mock.patch('cinder.volume.drivers.ibm.gpfs._different')
def test_retype_diff_pools(self,
mock_different,
mock_strg_pool,
mock_migrate_vol):
ctxt = self.context
(volume, new_type, diff, host) = self._fake_retype_arguments()
mock_different.side_effect = [False, False, True]
mock_strg_pool.return_value = True
mock_migrate_vol.return_value = (False, False)
self.assertFalse(self.driver.retype(ctxt,
volume,
new_type,
diff,
host))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_update_volume_storage_pool')
@mock.patch('cinder.volume.drivers.ibm.gpfs._different')
def test_retype_no_diff_hit(self,
mock_different,
mock_strg_pool,
mock_migrate_vol):
ctxt = self.context
(volume, new_type, diff, host) = self._fake_retype_arguments()
mock_different.side_effect = [False, False, False]
self.assertFalse(self.driver.retype(ctxt,
volume,
new_type,
diff,
host))
@mock.patch('cinder.utils.execute')
def test_mkfs_ok(self, mock_exec):
volume = self._fake_volume()
self.driver._mkfs(volume, 'swap')
self.driver._mkfs(volume, 'swap', 'test')
self.driver._mkfs(volume, 'ext3', 'test')
self.driver._mkfs(volume, 'vfat', 'test')
@mock.patch('cinder.utils.execute')
def test_mkfs_fail_mk(self, mock_exec):
volume = self._fake_volume()
mock_exec.side_effect = (
processutils.ProcessExecutionError(stdout='test', stderr='test'))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._mkfs, volume, 'swap', 'test')
@mock.patch('cinder.utils.execute')
def test_get_available_capacity_ok(self, mock_exec):
mock_exec.return_value = ('Filesystem 1-blocks Used '
'Available Capacity Mounted on\n'
'/dev/gpfs 10737418240 544735232 '
'10192683008 6%% /gpfs0', '')
self.assertEqual((10192683008, 10737418240),
self.driver._get_available_capacity('/gpfs0'))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_verify_gpfs_path_state')
@mock.patch('cinder.utils.execute')
def test_get_available_capacity_fail_mounted(self,
mock_exec,
mock_path_state):
mock_path_state.side_effect = (
exception.VolumeBackendAPIException('test'))
mock_exec.return_value = ('Filesystem 1-blocks Used '
'Available Capacity Mounted on\n'
'/dev/gpfs 10737418240 544735232 '
'10192683008 6%% /gpfs0', '')
self.assertEqual((0, 0), self.driver._get_available_capacity('/gpfs0'))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
def test_verify_gpfs_path_state_ok(self, mock_is_gpfs_path):
self.driver._verify_gpfs_path_state(self.images_dir)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
def test_verify_gpfs_path_state_fail_path(self, mock_is_gpfs_path):
mock_is_gpfs_path.side_effect = (
processutils.ProcessExecutionError(stdout='test', stderr='test'))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._verify_gpfs_path_state, self.images_dir)
@mock.patch('cinder.utils.execute')
def test_create_consistencygroup(self, mock_exec):
ctxt = self.context
group = self._fake_group()
self.driver.create_consistencygroup(ctxt, group)
fsdev = self.driver._gpfs_device
cgname = "consisgroup-%s" % group['id']
cgpath = os.path.join(self.driver.configuration.gpfs_mount_point_base,
cgname)
cmd = ['mmcrfileset', fsdev, cgname, '--inode-space', 'new']
mock_exec.assert_any_call(*cmd)
cmd = ['mmlinkfileset', fsdev, cgname, '-J', cgpath]
mock_exec.assert_any_call(*cmd)
cmd = ['chmod', '770', cgpath]
mock_exec.assert_any_call(*cmd)
@mock.patch('cinder.utils.execute')
def test_create_consistencygroup_fail(self, mock_exec):
ctxt = self.context
group = self._fake_group()
mock_exec.side_effect = (
processutils.ProcessExecutionError(stdout='test', stderr='test'))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_consistencygroup, ctxt, group)
@mock.patch('cinder.utils.execute')
def test_delete_consistencygroup(self, mock_exec):
ctxt = self.context
group = self._fake_group()
group['status'] = 'available'
volume = self._fake_volume()
volume['status'] = 'available'
volumes = []
volumes.append(volume)
self.driver.db = mock.Mock()
self.driver.db.volume_get_all_by_group = mock.Mock()
self.driver.db.volume_get_all_by_group.return_value = volumes
self.driver.delete_consistencygroup(ctxt, group)
fsdev = self.driver._gpfs_device
cgname = "consisgroup-%s" % group['id']
cmd = ['mmunlinkfileset', fsdev, cgname, '-f']
mock_exec.assert_any_call(*cmd)
cmd = ['mmdelfileset', fsdev, cgname, '-f']
mock_exec.assert_any_call(*cmd)
@mock.patch('cinder.utils.execute')
def test_delete_consistencygroup_fail(self, mock_exec):
ctxt = self.context
group = self._fake_group()
group['status'] = 'available'
self.driver.db = mock.Mock()
self.driver.db.volume_get_all_by_group = mock.Mock()
self.driver.db.volume_get_all_by_group.return_value = []
mock_exec.side_effect = (
processutils.ProcessExecutionError(stdout='test', stderr='test'))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_consistencygroup, ctxt, group)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.create_snapshot')
def test_create_cgsnapshot(self, mock_create_snap):
ctxt = self.context
cgsnap = self._fake_cgsnapshot()
self.driver.db = mock.Mock()
self.driver.db.snapshot_get_all_for_cgsnapshot = mock.Mock()
snapshot1 = self._fake_snapshot()
snapshots = [snapshot1]
self.driver.db.snapshot_get_all_for_cgsnapshot.return_value = snapshots
model_update, snapshots = self.driver.create_cgsnapshot(ctxt, cgsnap)
self.driver.create_snapshot.assert_called_once_with(snapshot1)
self.assertEqual({'status': cgsnap['status']}, model_update)
self.assertEqual('available', snapshot1['status'])
self.driver.db.snapshot_get_all_for_cgsnapshot.\
assert_called_once_with(ctxt, cgsnap['id'])
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.create_snapshot')
def test_create_cgsnapshot_empty(self, mock_create_snap):
ctxt = self.context
cgsnap = self._fake_cgsnapshot()
self.driver.db = mock.Mock()
self.driver.db.snapshot_get_all_for_cgsnapshot = mock.Mock()
snapshots = []
self.driver.db.snapshot_get_all_for_cgsnapshot.return_value = snapshots
model_update, snapshots = self.driver.create_cgsnapshot(ctxt, cgsnap)
self.assertFalse(self.driver.create_snapshot.called)
self.assertEqual({'status': cgsnap['status']}, model_update)
self.driver.db.snapshot_get_all_for_cgsnapshot.\
assert_called_once_with(ctxt, cgsnap['id'])
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.delete_snapshot')
def test_delete_cgsnapshot(self, mock_delete_snap):
ctxt = self.context
cgsnap = self._fake_cgsnapshot()
self.driver.db = mock.Mock()
self.driver.db.snapshot_get_all_for_cgsnapshot = mock.Mock()
snapshot1 = self._fake_snapshot()
snapshots = [snapshot1]
self.driver.db.snapshot_get_all_for_cgsnapshot.return_value = snapshots
model_update, snapshots = self.driver.delete_cgsnapshot(ctxt, cgsnap)
self.driver.delete_snapshot.assert_called_once_with(snapshot1)
self.assertEqual({'status': cgsnap['status']}, model_update)
self.assertEqual('deleted', snapshot1['status'])
self.driver.db.snapshot_get_all_for_cgsnapshot.\
assert_called_once_with(ctxt, cgsnap['id'])
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.delete_snapshot')
def test_delete_cgsnapshot_empty(self, mock_delete_snap):
ctxt = self.context
cgsnap = self._fake_cgsnapshot()
self.driver.db = mock.Mock()
self.driver.db.snapshot_get_all_for_cgsnapshot = mock.Mock()
snapshots = []
self.driver.db.snapshot_get_all_for_cgsnapshot.return_value = snapshots
model_update, snapshots = self.driver.delete_cgsnapshot(ctxt, cgsnap)
self.assertFalse(self.driver.delete_snapshot.called)
self.assertEqual({'status': cgsnap['status']}, model_update)
self.driver.db.snapshot_get_all_for_cgsnapshot.\
assert_called_once_with(ctxt, cgsnap['id'])
def test_local_path_volume_not_in_cg(self):
volume = self._fake_volume()
volume['consistencygroup_id'] = None
volume_path = os.path.join(
self.driver.configuration.gpfs_mount_point_base,
volume['name']
)
ret = self.driver.local_path(volume)
self.assertEqual(volume_path, ret)
def test_local_path_volume_in_cg(self):
volume = self._fake_volume()
cgname = "consisgroup-%s" % volume['consistencygroup_id']
volume_path = os.path.join(
self.driver.configuration.gpfs_mount_point_base,
cgname,
volume['name']
)
ret = self.driver.local_path(volume)
self.assertEqual(volume_path, ret)
@mock.patch('cinder.context.get_admin_context')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_get_snapshot_path(self, mock_local_path, mock_admin_context):
volume = self._fake_volume()
self.driver.db = mock.Mock()
self.driver.db.volume_get = mock.Mock()
self.driver.db.volume_get.return_value = volume
volume_path = self.volumes_path
mock_local_path.return_value = volume_path
snapshot = self._fake_snapshot()
ret = self.driver._get_snapshot_path(snapshot)
self.assertEqual(
os.path.join(os.path.dirname(volume_path), snapshot['name']), ret
)
@mock.patch('cinder.utils.execute')
def test_gpfs_full_copy(self, mock_exec):
src = "/tmp/vol1"
dest = "/tmp/vol2"
self.driver._gpfs_full_copy(src, dest)
mock_exec.assert_called_once_with('cp', src, dest,
check_exit_code=True)
def _fake_volume(self):
volume = {}
volume['id'] = '123456'
volume['name'] = 'test'
volume['size'] = 1000
volume['consistencygroup_id'] = 'cg-1234'
return volume
def _fake_snapshot(self):
snapshot = {}
snapshot['id'] = '12345'
snapshot['name'] = 'test-snap'
snapshot['size'] = 1000
snapshot['volume_id'] = '123456'
snapshot['status'] = 'available'
return snapshot
def _fake_volume_in_cg(self):
volume = {}
volume['id'] = '123456'
volume['name'] = 'test'
volume['size'] = 1000
volume['consistencygroup_id'] = 'fakecg'
return volume
def _fake_group(self):
group = {}
group['name'] = 'test_group'
group['id'] = '123456'
return group
def _fake_cgsnapshot(self):
cgsnap = {}
cgsnap['id'] = '123456'
cgsnap['name'] = 'testsnap'
cgsnap['consistencygroup_id'] = '123456'
cgsnap['status'] = 'available'
return cgsnap
def _fake_qemu_qcow2_image_info(self, path):
data = FakeQemuImgInfo()
data.file_format = 'qcow2'
data.backing_file = None
data.virtual_size = 1 * units.Gi
return data
def _fake_qemu_raw_image_info(self, path):
data = FakeQemuImgInfo()
data.file_format = 'raw'
data.backing_file = None
data.virtual_size = 1 * units.Gi
return data
def _fake_retype_arguments(self):
ctxt = self.context
loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id
cap = {'location_info': loc}
host = {'host': 'foo', 'capabilities': cap}
key_specs_old = {'capabilities:storage_pool': 'bronze',
'volume_backend_name': 'backend1'}
key_specs_new = {'capabilities:storage_pool': 'gold',
'volume_backend_name': 'backend1'}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
volume_types.get_volume_type(ctxt, old_type_ref['id'])
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
diff, _equal = volume_types.volume_types_diff(ctxt,
old_type_ref['id'],
new_type_ref['id'])
volume = self._fake_volume()
volume['host'] = host
return (volume, new_type, diff, host)
class GPFSNFSDriverTestCase(test.TestCase):
driver_name = "cinder.volume.drivers.gpfs.GPFSNFSDriver"
TEST_NFS_EXPORT = 'nfs-host1:/export'
TEST_SIZE_IN_GB = 1
TEST_EXTEND_SIZE_IN_GB = 2
TEST_MNT_POINT = '/mnt/nfs'
TEST_MNT_POINT_BASE = '/mnt'
TEST_GPFS_MNT_POINT_BASE = '/export'
TEST_LOCAL_PATH = '/mnt/nfs/volume-123'
TEST_VOLUME_PATH = '/export/volume-123'
TEST_SNAP_PATH = '/export/snapshot-123'
def _execute_wrapper(self, cmd, *args, **kwargs):
try:
kwargs.pop('run_as_root')
except KeyError:
pass
return utils.execute(cmd, *args, **kwargs)
def _fake_volume(self):
volume = {}
volume['id'] = '123456'
volume['name'] = 'test'
volume['size'] = 1000
volume['consistencygroup_id'] = 'cg-1234'
return volume
def _fake_snapshot(self):
snapshot = {}
snapshot['id'] = '12345'
snapshot['name'] = 'test-snap'
snapshot['size'] = 1000
snapshot['volume_id'] = '123456'
snapshot['status'] = 'available'
return snapshot
def setUp(self):
super(GPFSNFSDriverTestCase, self).setUp()
self.driver = gpfs.GPFSNFSDriver(configuration=conf.
Configuration(None))
self.driver.gpfs_execute = self._execute_wrapper
self.driver.set_execute(self._execute_wrapper)
self.context = context.get_admin_context()
self.context.user_id = 'fake'
self.context.project_id = 'fake'
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
'_run_ssh')
def test_gpfs_remote_execute(self, mock_run_ssh):
mock_run_ssh.return_value = 'test'
self.driver._gpfs_remote_execute('test', check_exit_code=True)
expected = [mock.call(('test',), True)]
self.assertEqual(expected, mock_run_ssh.mock_calls)
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
'_ensure_shares_mounted')
def test_update_volume_stats(self, mock_ensure):
"""Check update volume stats."""
mock_ensure.return_value = True
fake_avail = 80 * units.Gi
fake_size = 2 * fake_avail
fake_used = 10 * units.Gi
with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
'_get_capacity_info',
return_value=(fake_avail, fake_size, fake_used)):
stats = self.driver.get_volume_stats()
self.assertEqual('GPFSNFS', stats['volume_backend_name'])
self.assertEqual('file', stats['storage_protocol'])
stats = self.driver.get_volume_stats(True)
self.assertEqual('GPFSNFS', stats['volume_backend_name'])
self.assertEqual('file', stats['storage_protocol'])
def test_get_volume_path(self):
self.driver.configuration.gpfs_mount_point_base = (
self.TEST_GPFS_MNT_POINT_BASE)
volume = self._fake_volume()
self.assertEqual('/export/consisgroup-cg-1234/test',
self.driver._get_volume_path(volume))
volume['consistencygroup_id'] = None
self.assertEqual('/export/test',
self.driver._get_volume_path(volume))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
'_get_mount_point_for_share')
def test_local_path(self, mock_mount_point):
mock_mount_point.return_value = self.TEST_MNT_POINT
volume = self._fake_volume()
volume['provider_location'] = self.TEST_GPFS_MNT_POINT_BASE
self.assertEqual('/mnt/nfs/consisgroup-cg-1234/test',
self.driver.local_path(volume))
volume['consistencygroup_id'] = None
self.assertEqual('/mnt/nfs/test',
self.driver.local_path(volume))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
'_get_volume_path')
def test_get_snapshot_path(self, mock_volume_path):
volume = self._fake_volume()
self.driver.db = mock.Mock()
self.driver.db.volume_get = mock.Mock()
self.driver.db.volume_get.return_value = volume
mock_volume_path.return_value = os.path.join(self.
TEST_GPFS_MNT_POINT_BASE,
volume['name'])
snapshot = self._fake_snapshot()
self.assertEqual('/export/test-snap',
self.driver._get_snapshot_path(snapshot))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
'_find_share')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'create_volume')
def test_create_volume(self,
mock_create_volume,
mock_find_share):
volume = self._fake_volume()
mock_find_share.return_value = self.TEST_VOLUME_PATH
self.assertEqual({'provider_location': self.TEST_VOLUME_PATH},
self.driver.create_volume(volume))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_delete_gpfs_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
'local_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
'_get_volume_path')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_verify_gpfs_path_state')
def test_delete_volume(self,
mock_verify_gpfs_path_state,
mock_volume_path,
mock_local_path,
mock_delete_gpfs_file):
self.driver.delete_volume('')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'delete_snapshot')
def test_delete_snapshot(self,
mock_delete_snapshot):
self.driver.delete_snapshot('')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_resize_volume_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
'_find_share')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_create_volume_from_snapshot')
def test_create_volume_from_snapshot(self,
mock_create_volume_from_snapshot,
mock_find_share,
mock_resize_volume_file):
volume = self._fake_volume()
snapshot = self._fake_snapshot()
mock_find_share.return_value = self.TEST_VOLUME_PATH
self.assertEqual({'provider_location': self.TEST_VOLUME_PATH},
self.driver.create_volume_from_snapshot(volume,
snapshot))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_resize_volume_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
'_find_share')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_create_cloned_volume')
def test_create_cloned_volume(self,
mock_create_cloned_volume,
mock_find_share,
mock_resize_volume_file):
volume = self._fake_volume()
src_vref = self._fake_volume()
mock_find_share.return_value = self.TEST_VOLUME_PATH
self.assertEqual({'provider_location': self.TEST_VOLUME_PATH},
self.driver.create_cloned_volume(volume, src_vref))
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_delete_gpfs_file')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_do_backup')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_create_backup_source')
@mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.'
'local_path')
def test_backup_volume(self,
mock_local_path,
mock_create_backup_source,
mock_do_backup,
mock_delete_gpfs_file):
volume = self._fake_volume()
self.driver.db = mock.Mock()
self.driver.db.volume_get = mock.Mock()
self.driver.db.volume_get.return_value = volume
backup = {}
backup['volume_id'] = 'test'
backup['id'] = '123456'
backup_service = mock.Mock()
mock_local_path.return_value = self.TEST_VOLUME_PATH
self.driver.backup_volume('', backup, backup_service)
| |
from __future__ import unicode_literals
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Product._meta_title'
db.add_column('shop_product', '_meta_title',
self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Product._meta_title'
db.delete_column('shop_product', '_meta_title')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'generic.rating': {
'Meta': {'object_name': 'Rating'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'pages.page': {
'Meta': {'ordering': "('titles',)", 'object_name': 'Page'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_menus': ('mezzanine.pages.fields.MenusField', [], {'default': '[1, 2, 3]', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
#'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
},
'shop.cart': {
'Meta': {'object_name': 'Cart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'shop.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Cart']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'shop.category': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Category', '_ormbases': ['pages.Page']},
'combined': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product_options'", 'blank': 'True', 'to': "orm['shop.ProductOption']"}),
'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'price_max': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'price_min': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'sale': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.Sale']", 'null': 'True', 'blank': 'True'})
},
'shop.discountcode': {
'Meta': {'object_name': 'DiscountCode'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'discountcode_related'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'code': ('cartridge.shop.fields.DiscountCodeField', [], {'unique': 'True', 'max_length': '20'}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('cartridge.shop.fields.PercentageField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'free_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_purchase': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'uses_remaining': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'shop.order': {
'Meta': {'ordering': "('-id',)", 'object_name': 'Order'},
'additional_instructions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'billing_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'billing_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'billing_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'discount_code': ('cartridge.shop.fields.DiscountCodeField', [], {'max_length': '20', 'blank': 'True'}),
'discount_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'shipping_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'shipping_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'shipping_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'shipping_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'shop.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Order']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.product': {
'Meta': {'object_name': 'Product'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
#'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
#'rating': ('mezzanine.generic.fields.RatingField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.Rating']", 'frozen_by_south': 'True'}),
'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'upsell_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'upsell_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"})
},
'shop.productaction': {
'Meta': {'unique_together': "(('product', 'timestamp'),)", 'object_name': 'ProductAction'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'to': "orm['shop.Product']"}),
'timestamp': ('django.db.models.fields.IntegerField', [], {}),
'total_cart': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_purchase': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shop.productimage': {
'Meta': {'ordering': "('_order',)", 'object_name': 'ProductImage'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['shop.Product']"})
},
'shop.productoption': {
'Meta': {'object_name': 'ProductOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
'shop.productvariation': {
'Meta': {'ordering': "('-default',)", 'object_name': 'ProductVariation'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.ProductImage']", 'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'option1': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'option2': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variations'", 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.sale': {
'Meta': {'object_name': 'Sale'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sale_related'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('cartridge.shop.fields.PercentageField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['shop']
| |
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.1 Real-time Push Cloud API
## -----------------------------------
import json
import time
import hashlib
import urllib2
import tornado.httpclient
import sys
import uuid
try:
from hashlib import sha256
digestmod = sha256
except ImportError:
import Crypto.Hash.SHA256 as digestmod
sha256 = digestmod.new
import hmac
import tornado.ioloop
ioloop = tornado.ioloop.IOLoop.instance()
from PubnubCrypto import PubnubCrypto
class Pubnub():
def __init__(
self,
publish_key,
subscribe_key,
secret_key = False,
cipher_key = False,
ssl_on = False,
origin = 'pubsub.pubnub.com'
) :
"""
#**
#* Pubnub
#*
#* Init the Pubnub Client API
#*
#* @param string publish_key required key to send messages.
#* @param string subscribe_key required key to receive messages.
#* @param string secret_key required key to sign messages.
#* @param boolean ssl required for 2048 bit encrypted messages.
#* @param string origin PUBNUB Server Origin.
#**
## Initiat Class
pubnub = Pubnub( 'PUBLISH-KEY', 'SUBSCRIBE-KEY', 'SECRET-KEY', False )
"""
self.origin = origin
self.publish_key = publish_key
self.subscribe_key = subscribe_key
self.secret_key = secret_key
self.cipher_key = cipher_key
self.ssl = ssl_on
self.subscriptions = {}
if self.ssl :
self.origin = 'https://' + self.origin
else :
self.origin = 'http://' + self.origin
def publish( self, args ) :
"""
#**
#* Publish
#*
#* Send a message to a channel.
#*
#* @param array args with channel and message.
#* @return array success information.
#**
## Publish Example
def publish_complete(info):
print(info)
pubnub.publish({
'channel' : 'hello_world',
'message' : {
'some_text' : 'Hello my World'
},
'callback' : publish_complete
})
"""
## Fail if bad input.
if not (args['channel'] and args['message']) :
print('Missing Channel or Message')
return False
## Capture User Input
channel = str(args['channel'])
message = args['message']
if self.cipher_key :
pc = PubnubCrypto()
out = []
if type( message ) == type(list()):
for item in message:
encryptItem = pc.encrypt(self.cipher_key, item ).rstrip()
out.append(encryptItem)
message = json.dumps(out)
elif type( message ) == type(dict()):
outdict = {}
for k, item in message.iteritems():
encryptItem = pc.encrypt(self.cipher_key, item ).rstrip()
outdict[k] = encryptItem
out.append(outdict)
message = json.dumps(out[0])
else:
message = json.dumps(pc.encrypt(self.cipher_key, message).replace('\n',''))
else :
message = json.dumps(args['message'])
## Capture Callback
if args.has_key('callback') :
callback = args['callback']
else :
callback = lambda x : x
## Sign Message
if self.secret_key :
hashObject = sha256()
hashObject.update(self.secret_key)
hashedSecret = hashObject.hexdigest()
hash = hmac.HMAC(hashedSecret, '/'.join([
self.publish_key,
self.subscribe_key,
self.secret_key,
channel,
message
]), digestmod=digestmod)
signature = hash.hexdigest()
else :
signature = '0'
## Send Message
return self._request([
'publish',
self.publish_key,
self.subscribe_key,
signature,
channel,
'0',
message
], callback );
def subscribe( self, args ) :
"""
#**
#* Subscribe
#*
#* This is NON-BLOCKING.
#* Listen for a message on a channel.
#*
#* @param array args with channel and message.
#* @return false on fail, array on success.
#**
## Subscribe Example
def receive(message) :
print(message)
return True
## On Connect Callback
def connected() :
pubnub.publish({
'channel' : 'hello_world',
'message' : { 'some_var' : 'text' }
})
## Subscribe
pubnub.subscribe({
'channel' : 'hello_world',
'connect' : connected,
'callback' : receive
})
"""
## Fail if missing channel
if not 'channel' in args :
print('Missing Channel.')
return False
## Fail if missing callback
if not 'callback' in args :
print('Missing Callback.')
return False
## Capture User Input
channel = str(args['channel'])
callback = args['callback']
connectcb = args['connect']
if 'errorback' in args:
errorback = args['errorback']
else:
errorback = lambda x: x
## New Channel?
if not (channel in self.subscriptions) :
self.subscriptions[channel] = {
'first' : False,
'connected' : 0,
'timetoken' : '0'
}
## Ensure Single Connection
if self.subscriptions[channel]['connected'] :
print("Already Connected")
return False
self.subscriptions[channel]['connected'] = 1
## SUBSCRIPTION RECURSION
def substabizel():
## STOP CONNECTION?
if not self.subscriptions[channel]['connected']:
return
def sub_callback(response):
## STOP CONNECTION?
if not self.subscriptions[channel]['connected']:
return
## CONNECTED CALLBACK
if not self.subscriptions[channel]['first'] :
self.subscriptions[channel]['first'] = True
connectcb()
## PROBLEM?
if not response:
def time_callback(_time):
if not _time:
ioloop.add_timeout(time.time()+1, substabizel)
return errorback("Lost Network Connection")
else:
ioloop.add_timeout(time.time()+1, substabizel)
## ENSURE CONNECTED (Call Time Function)
return self.time({ 'callback' : time_callback })
self.subscriptions[channel]['timetoken'] = response[1]
substabizel()
pc = PubnubCrypto()
out = []
for message in response[0]:
if self.cipher_key :
if type( message ) == type(list()):
for item in message:
encryptItem = pc.decrypt(self.cipher_key, item )
out.append(encryptItem)
message = out
elif type( message ) == type(dict()):
outdict = {}
for k, item in message.iteritems():
encryptItem = pc.decrypt(self.cipher_key, item )
outdict[k] = encryptItem
out.append(outdict)
message = out[0]
else:
message = pc.decrypt(self.cipher_key, message )
callback(message)
## CONNECT TO PUBNUB SUBSCRIBE SERVERS
try :
self._request( [
'subscribe',
self.subscribe_key,
channel,
'0',
str(self.subscriptions[channel]['timetoken'])
], sub_callback )
except :
ioloop.add_timeout(time.time()+1, substabizel)
return
## BEGIN SUBSCRIPTION (LISTEN FOR MESSAGES)
substabizel()
def unsubscribe( self, args ):
channel = str(args['channel'])
if not (channel in self.subscriptions):
return False
## DISCONNECT
self.subscriptions[channel]['connected'] = 0
self.subscriptions[channel]['timetoken'] = 0
self.subscriptions[channel]['first'] = False
def history( self, args ) :
"""
#**
#* History
#*
#* Load history from a channel.
#*
#* @param array args with 'channel' and 'limit'.
#* @return mixed false on fail, array on success.
#*
## History Example
history = pubnub.history({
'channel' : 'hello_world',
'limit' : 1
})
print(history)
"""
## Capture User Input
limit = args.has_key('limit') and int(args['limit']) or 10
channel = str(args['channel'])
## Fail if bad input.
if not channel :
print('Missing Channel')
return False
## Get History
return self._request( [
'history',
self.subscribe_key,
channel,
'0',
str(limit)
], args['callback'] );
def time( self, args ) :
"""
#**
#* Time
#*
#* Timestamp from PubNub Cloud.
#*
#* @return int timestamp.
#*
## PubNub Server Time Example
def time_complete(timestamp):
print(timestamp)
pubnub.time(time_complete)
"""
def complete(response) :
args['callback'](response[0])
self._request( [
'time',
'0'
], complete )
def uuid(self) :
"""
#**
#* uuid
#*
#* Generate a UUID
#*
#* @return UUID.
#*
## PubNub UUID Example
uuid = pubnub.uuid()
print(uuid)
"""
return uuid.uuid1()
def _request( self, request, callback ) :
## Build URL
url = self.origin + '/' + "/".join([
"".join([ ' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and
hex(ord(ch)).replace( '0x', '%' ).upper() or
ch for ch in list(bit)
]) for bit in request])
requestType = request[0]
def complete(response) :
if response.error:
return callback(None)
obj = json.loads(response.buffer.getvalue())
pc = PubnubCrypto()
out = []
if self.cipher_key :
if requestType == "history" :
if type(obj) == type(list()):
for item in obj:
if type(item) == type(list()):
for subitem in item:
encryptItem = pc.decrypt(self.cipher_key, subitem )
out.append(encryptItem)
elif type(item) == type(dict()):
outdict = {}
for k, subitem in item.iteritems():
encryptItem = pc.decrypt(self.cipher_key, subitem )
outdict[k] = encryptItem
out.append(outdict)
else :
encryptItem = pc.decrypt(self.cipher_key, item )
out.append(encryptItem)
callback(out)
elif type( obj ) == type(dict()):
for k, item in obj.iteritems():
encryptItem = pc.decrypt(self.cipher_key, item )
out.append(encryptItem)
callback(out)
else :
callback(obj)
else :
callback(obj)
## Send Request Expecting JSON Response
http = tornado.httpclient.AsyncHTTPClient()
request = tornado.httpclient.HTTPRequest( url, 'GET', dict({'V':'3.1','User-Agent': 'Python-Tornado','Accept-Encoding': 'gzip'}) )
http.fetch(
request,
callback=complete,
connect_timeout=200,
request_timeout=200
)
| |
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the nova-policy-check CLI interfaces.
"""
from io import StringIO
import fixtures
import mock
from nova.cmd import policy
import nova.conf
from nova import context as nova_context
from nova.db.main import api as db
from nova import exception
from nova.policies import base as base_policies
from nova.policies import instance_actions as ia_policies
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_instance
CONF = nova.conf.CONF
class TestPolicyCheck(test.NoDBTestCase):
def setUp(self):
super(TestPolicyCheck, self).setUp()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
self.policy = self.useFixture(nova_fixtures.RealPolicyFixture())
self.cmd = policy.PolicyCommands()
@mock.patch.object(policy.PolicyCommands, '_filter_rules')
@mock.patch.object(policy.PolicyCommands, '_get_target')
@mock.patch.object(policy.PolicyCommands, '_get_context')
def test_check(self, mock_get_context, mock_get_target,
mock_filter_rules):
fake_rules = ['fake:rule', 'faux:roule']
mock_filter_rules.return_value = fake_rules
self.cmd.check(target=mock.sentinel.target)
mock_get_context.assert_called_once_with()
mock_get_target.assert_called_once_with(mock_get_context.return_value,
mock.sentinel.target)
mock_filter_rules.assert_called_once_with(
mock_get_context.return_value, '', mock_get_target.return_value)
self.assertEqual('\n'.join(fake_rules) + '\n', self.output.getvalue())
@mock.patch.object(nova_context, 'RequestContext')
@mock.patch.object(policy, 'CONF')
def test_get_context(self, mock_CONF, mock_RequestContext):
context = self.cmd._get_context()
self.assertEqual(mock_RequestContext.return_value, context)
mock_RequestContext.assert_called_once_with(
roles=mock_CONF.os_roles,
user_id=mock_CONF.os_user_id,
project_id=mock_CONF.os_tenant_id)
def test_get_target_none(self):
target = self.cmd._get_target(mock.sentinel.context, None)
self.assertIsNone(target)
def test_get_target_invalid_attribute(self):
self.assertRaises(exception.InvalidAttribute, self.cmd._get_target,
mock.sentinel.context, ['nope=nada'])
def test_get_target(self):
expected_target = {
'project_id': 'fake-proj',
'user_id': 'fake-user',
'quota_class': 'fake-quota-class',
'availability_zone': 'fake-az',
}
given_target = ['='.join([key, val])
for key, val in expected_target.items()]
actual_target = self.cmd._get_target(mock.sentinel.context,
given_target)
self.assertDictEqual(expected_target, actual_target)
@mock.patch.object(nova_context, 'get_admin_context')
@mock.patch.object(db, 'instance_get_by_uuid')
def test_get_target_instance(self, mock_instance_get,
mock_get_admin_context):
admin_context = nova_context.RequestContext(is_admin=True)
mock_get_admin_context.return_value = admin_context
given_target = ['instance_id=fake_id']
mock_instance_get.return_value = fake_instance.fake_db_instance()
target = self.cmd._get_target(mock.sentinel.context,
given_target)
self.assertEqual(target,
{'user_id': 'fake-user', 'project_id': 'fake-project'})
mock_instance_get.assert_called_once_with(admin_context,
'fake_id')
def _check_filter_rules(self, context=None, target=None,
expected_rules=None):
context = context or nova_context.get_admin_context()
if expected_rules is None:
expected_rules = [
r.name for r in ia_policies.list_rules()]
passing_rules = self.cmd._filter_rules(
context, 'os-instance-actions:list', target)
passing_rules += self.cmd._filter_rules(
context, 'os-instance-actions:show', target)
passing_rules += self.cmd._filter_rules(
context, 'os-instance-actions:events', target)
passing_rules += self.cmd._filter_rules(
context, 'os-instance-actions:events:details', target)
self.assertEqual(set(expected_rules), set(passing_rules))
def test_filter_rules_non_admin(self):
context = nova_context.RequestContext()
rule_conditions = [base_policies.PROJECT_READER]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(context, expected_rules=expected_rules)
def test_filter_rules_admin(self):
self._check_filter_rules()
def test_filter_rules_instance_non_admin(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
context = nova_context.RequestContext()
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str == base_policies.RULE_ANY]
self._check_filter_rules(context, instance, expected_rules)
def test_filter_rules_instance_admin(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
self._check_filter_rules(target=instance)
def test_filter_rules_instance_owner(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
rule_conditions = [base_policies.PROJECT_READER]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(db_context, instance, expected_rules)
@mock.patch.object(policy.config, 'parse_args')
@mock.patch.object(policy, 'CONF')
def _check_main(self, mock_CONF, mock_parse_args,
category_name='check', expected_return_value=0):
mock_CONF.category.name = category_name
return_value = policy.main()
self.assertEqual(expected_return_value, return_value)
mock_CONF.register_cli_opts.assert_called_once_with(
policy.cli_opts)
mock_CONF.register_cli_opt.assert_called_once_with(
policy.category_opt)
@mock.patch.object(policy.version, 'version_string_with_package',
return_value="x.x.x")
def test_main_version(self, mock_version_string):
self._check_main(category_name='version')
self.assertEqual("x.x.x\n", self.output.getvalue())
@mock.patch.object(policy.cmd_common, 'print_bash_completion')
def test_main_bash_completion(self, mock_print_bash):
self._check_main(category_name='bash-completion')
mock_print_bash.assert_called_once_with(policy.CATEGORIES)
@mock.patch.object(policy.cmd_common, 'get_action_fn')
def test_main(self, mock_get_action_fn):
mock_fn = mock.Mock()
mock_fn_args = [mock.sentinel.arg]
mock_fn_kwargs = {'key': mock.sentinel.value}
mock_get_action_fn.return_value = (mock_fn, mock_fn_args,
mock_fn_kwargs)
self._check_main(expected_return_value=mock_fn.return_value)
mock_fn.assert_called_once_with(mock.sentinel.arg,
key=mock.sentinel.value)
@mock.patch.object(policy.cmd_common, 'get_action_fn')
def test_main_error(self, mock_get_action_fn):
mock_fn = mock.Mock(side_effect=Exception)
mock_get_action_fn.return_value = (mock_fn, [], {})
self._check_main(expected_return_value=1)
self.assertIn("error: ", self.output.getvalue())
| |
from xscontainer import api_helper
from xscontainer import util
from xscontainer.util import log
import constants
import fcntl
import errno
import os
import paramiko
import paramiko.rsakey
import select
import socket
import StringIO
import sys
DOCKER_SOCKET_PATH = '/var/run/docker.sock'
SSH_PORT = 22
ERROR_CAUSE_NETWORK = (
"Error: Cannot find a valid IP that allows SSH connections to "
"the VM. Please make sure that Tools are installed, a "
"network route is set up, there is a SSH server running inside "
"the VM that is reachable from Dom0.")
class SshException(util.XSContainerException):
pass
class VmHostKeyException(SshException):
pass
class AuthenticationException(SshException):
pass
def prepare_request_cmd():
return ("ncat -U %s" % (DOCKER_SOCKET_PATH))
class MyHostKeyPolicy(paramiko.MissingHostKeyPolicy):
_session = None
_vm_uuid = None
def __init__(self, session, vm_uuid):
self._session = session
self._vm_uuid = vm_uuid
def missing_host_key(self, client, hostname, key):
hostkey = key.get_base64()
remembered_hostkey = api_helper.get_ssh_hostkey(self._session,
self._vm_uuid)
if remembered_hostkey:
# We have a key on record
if hostkey == remembered_hostkey:
# all good - continue
return
else:
# bad - throw error because of mismatch
message = ("Key for VM %s does not match the known public key."
% (self._vm_uuid))
log.error(message)
raise VmHostKeyException(message)
else:
# we don't have key on record. Let's remember this one for next
# time
log.debug("No public key on record found for %s. Will remember."
% hostkey)
api_helper.set_ssh_hostkey(self._session, self._vm_uuid, hostkey)
# all good - continue
return
def prepare_ssh_client(session, vmuuid):
username = api_helper.get_vm_xscontainer_username(session, vmuuid)
host = api_helper.get_suitable_vm_ip(session, vmuuid, SSH_PORT)
log.info("prepare_ssh_client for vm %s, via %s@%s"
% (vmuuid, username, host))
client = paramiko.SSHClient()
pkey = paramiko.rsakey.RSAKey.from_private_key(
StringIO.StringIO(api_helper.get_idrsa_secret_private(session)))
client.get_host_keys().clear()
client.set_missing_host_key_policy(MyHostKeyPolicy(session, vmuuid))
try:
client.connect(host, port=SSH_PORT, username=username,
pkey=pkey, look_for_keys=False)
except SshException:
# This exception is already improved - leave it as it is
raise
except paramiko.AuthenticationException, exception:
message = ("prepare_ssh_client failed to authenticate with private key"
" on VM %s" % (vmuuid))
log.info(message)
raise AuthenticationException(message)
except (paramiko.SSHException, socket.error), exception:
# reraise as SshException
raise SshException("prepare_ssh_client: %s" % exception,
(sys.exc_info()[2]))
return client
def execute_docker(session, vmuuid, request):
return execute_ssh(session, vmuuid, prepare_request_cmd(), request)
def execute_ssh(session, vmuuid, cmd, stdin_input=None):
client = None
try:
try:
client = prepare_ssh_client(session, vmuuid)
if isinstance(cmd, list):
cmd = ' '.join(cmd)
stripped_stdin_input = stdin_input
if stripped_stdin_input:
stripped_stdin_input = stripped_stdin_input.strip()
log.info("execute_ssh will run '%s' with stdin '%s' on vm %s"
% (cmd, stripped_stdin_input, vmuuid))
stdin, stdout, _ = client.exec_command(cmd)
if stdin_input:
stdin.write(stdin_input)
stdin.channel.shutdown_write()
output = stdout.read(constants.MAX_BUFFER_SIZE)
if stdout.read(1) != "":
raise SshException("too much data was returned when executing"
"'%s'" % (cmd))
returncode = stdout.channel.recv_exit_status()
if returncode != 0:
log.info("execute_ssh '%s' on vm %s exited with rc %d: Stdout:"
" %s" % (cmd, vmuuid, returncode, stdout))
raise SshException("Returncode for '%s' is not 0" % cmd)
return output
except SshException:
# This exception is already improved - leave it as it is
raise
except Exception, exception:
# reraise as SshException
raise SshException("execute_ssh: %s" % exception,
(sys.exc_info()[2]))
finally:
if client:
client.close()
def execute_docker_data_listen(session, vmuuid, request,
stop_monitoring_request):
ssh_client = prepare_ssh_client(session, vmuuid)
try:
cmd = prepare_request_cmd()
log.info("execute_docker_listen_charbychar is running '%s' on VM '%s'"
% (cmd, vmuuid))
stdin, stdout, _ = ssh_client.exec_command(cmd)
stdin.write(request)
# set unblocking io for select.select
stdout_fd = stdout.channel.fileno()
fcntl.fcntl(stdout_fd,
fcntl.F_SETFL,
os.O_NONBLOCK | fcntl.fcntl(stdout_fd, fcntl.F_GETFL))
while not stop_monitoring_request:
rlist, _, _ = select.select([stdout_fd], [], [],
constants.MONITOR_EVENTS_POLL_INTERVAL)
if not rlist:
continue
try:
read_data = stdout.read(1)
if read_data == "":
break
yield read_data
except IOError, exception:
log.info("IOError")
if exception[0] not in (errno.EAGAIN, errno.EINTR):
log.info("Cleared")
raise
sys.exc_clear()
finally:
try:
ssh_client.close()
except Exception:
util.log.exception("Error when closing ssh_client for %r"
% ssh_client)
log.info('execute_docker_listen_charbychar (%s) exited' % cmd)
def determine_error_cause(session, vmuuid):
cause = ""
try:
api_helper.get_suitable_vm_ip(session, vmuuid, SSH_PORT)
except util.XSContainerException:
cause = ERROR_CAUSE_NETWORK
# No reason to continue, if there is no network connection
return cause
try:
execute_ssh(session, vmuuid, ['echo', 'hello world'])
except AuthenticationException:
cause = (cause + "Unable to verify key-based authentication. "
"Please prepare the VM to install a key.")
# No reason to continue, if there is no SSH connection
return cause
except VmHostKeyException:
cause = (cause + "The SSH host key of the VM has unexpectedly"
" changed, which could potentially be a security breach."
" If you think this is safe and expected, you"
" can reset the record stored in XS using xe"
" vm-param-remove uuid=<vm-uuid> param-name=other-config"
" param-key=xscontainer-sshhostkey")
# No reason to continue, if there is no SSH connection
return cause
except SshException:
cause = (cause + "Unable to connect to the VM using SSH. Please "
"check the logs inside the VM and also try manually.")
# No reason to continue, if there is no SSH connection
return cause
# @todo: we could alternatively support socat
# @todo: we could probably prepare this as part of xscontainer-prepare-vm
try:
execute_ssh(session, vmuuid, ['command -v ncat'])
except util.XSContainerException:
cause = (cause + "Unable to find ncat inside the VM. Please install "
"ncat. ")
try:
execute_ssh(session, vmuuid, ['test', '-S', DOCKER_SOCKET_PATH])
except util.XSContainerException:
cause = (cause + "Unable to find the Docker unix socket at %s."
% (DOCKER_SOCKET_PATH) +
" Please install and run Docker.")
# No reason to continue, if there is no docker socket
return cause
try:
execute_ssh(session, vmuuid, ['test -r "%s" && test -w "%s" '
% (DOCKER_SOCKET_PATH,
DOCKER_SOCKET_PATH)])
except util.XSContainerException:
cause = (cause + "Unable to access the Docker unix socket. "
"Please make sure the specified user account "
"belongs to the docker account group.")
if cause == "":
cause = "Unable to determine cause of failure."
return cause
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeline visualization for TensorFlow using Chrome Trace Format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
# The timeline target is usually imported as part of BUILD target
# "platform_test", which includes also includes the "platform"
# dependency. This is why the logging import here is okay.
from tensorflow.python.platform import tf_logging as logging
class AllocationMaximum(collections.namedtuple(
'AllocationMaximum', ('timestamp', 'num_bytes', 'tensors'))):
"""Stores the maximum allocation for a given allocator within the timelne.
Parameters:
timestamp: `tensorflow::Env::NowMicros()` when this maximum was reached.
num_bytes: the total memory used at this time.
tensors: the set of tensors allocated at this time.
"""
pass
class StepStatsAnalysis(collections.namedtuple(
'StepStatsAnalysis', ('chrome_trace', 'allocator_maximums'))):
"""Stores the step stats analysis output.
Parameters:
chrome_trace: A dict containing the chrome trace analysis.
allocator_maximums: A dict mapping allocator names to AllocationMaximum.
"""
pass
class _ChromeTraceFormatter(object):
"""A helper class for generating traces in Chrome Trace Format."""
def __init__(self, show_memory=False):
"""Constructs a new Chrome Trace formatter."""
self._show_memory = show_memory
self._events = []
self._metadata = []
def _create_event(self, ph, category, name, pid, tid, timestamp):
"""Creates a new Chrome Trace event.
For details of the file format, see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
Args:
ph: The type of event - usually a single character.
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
Returns:
A JSON compatible event object.
"""
event = {}
event['ph'] = ph
event['cat'] = category
event['name'] = name
event['pid'] = pid
event['tid'] = tid
event['ts'] = timestamp
return event
def emit_pid(self, name, pid):
"""Adds a process metadata event to the trace.
Args:
name: The process name as a string.
pid: Identifier of the process as an integer.
"""
event = {}
event['name'] = 'process_name'
event['ph'] = 'M'
event['pid'] = pid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_tid(self, name, pid, tid):
"""Adds a thread metadata event to the trace.
Args:
name: The thread name as a string.
pid: Identifier of the process as an integer.
tid: Identifier of the thread as an integer.
"""
event = {}
event['name'] = 'thread_name'
event['ph'] = 'M'
event['pid'] = pid
event['tid'] = tid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_region(self, timestamp, duration, pid, tid, category, name, args):
"""Adds a region event to the trace.
Args:
timestamp: The start timestamp of this region as a long integer.
duration: The duration of this region as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
category: The event category as a string.
name: The event name as a string.
args: A JSON-compatible dictionary of event arguments.
"""
event = self._create_event('X', category, name, pid, tid, timestamp)
event['dur'] = duration
event['args'] = args
self._events.append(event)
def emit_obj_create(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object creation event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('N', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_delete(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object deletion event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('D', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_snapshot(self, category, name, timestamp, pid, tid, object_id,
snapshot):
"""Adds an object snapshot event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
snapshot: A JSON-compatible representation of the object.
"""
event = self._create_event('O', category, name, pid, tid, timestamp)
event['id'] = object_id
event['args'] = {'snapshot': snapshot}
self._events.append(event)
def emit_flow_start(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow start event to the trace.
When matched with a flow end event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_flow_end(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow end event to the trace.
When matched with a flow start event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_counter(self, category, name, pid, timestamp, counter, value):
"""Emits a record for a single counter.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counter: Name of the counter as a string.
value: Value of the counter as an integer.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = {counter: value}
self._events.append(event)
def emit_counters(self, category, name, pid, timestamp, counters):
"""Emits a counter record for the dictionary 'counters'.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counters: Dictionary of counter values.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = counters.copy()
self._events.append(event)
def format_to_string(self, pretty=False):
"""Formats the chrome trace to a string.
Args:
pretty: (Optional.) If True, produce human-readable JSON output.
Returns:
A JSON-formatted string in Chrome Trace format.
"""
trace = {}
trace['traceEvents'] = self._metadata + self._events
if pretty:
return json.dumps(trace, indent=4, separators=(',', ': '))
else:
return json.dumps(trace, separators=(',', ':'))
class _TensorTracker(object):
"""An internal class to track the lifetime of a Tensor."""
def __init__(self, name, object_id, timestamp, pid, allocator, num_bytes):
"""Creates an object to track tensor references.
This class is not thread safe and is intended only for internal use by
the 'Timeline' class in this file.
Args:
name: The name of the Tensor as a string.
object_id: Chrome Trace object identifier assigned for this Tensor.
timestamp: The creation timestamp of this event as a long integer.
pid: Process identifier of the assicaiated device, as an integer.
allocator: Name of the allocator used to create the Tensor.
num_bytes: Number of bytes allocated (long integer).
Returns:
A 'TensorTracker' object.
"""
self._name = name
self._pid = pid
self._object_id = object_id
self._create_time = timestamp
self._allocator = allocator
self._num_bytes = num_bytes
self._ref_times = []
self._unref_times = []
@property
def name(self):
"""Name of this tensor."""
return self._name
@property
def pid(self):
"""ID of the process which created this tensor (an integer)."""
return self._pid
@property
def create_time(self):
"""Timestamp when this tensor was created (long integer)."""
return self._create_time
@property
def object_id(self):
"""Returns the object identifier of this tensor (integer)."""
return self._object_id
@property
def num_bytes(self):
"""Size of this tensor in bytes (long integer)."""
return self._num_bytes
@property
def allocator(self):
"""Name of the allocator used to create this tensor (string)."""
return self._allocator
@property
def last_unref(self):
"""Last unreference timestamp of this tensor (long integer)."""
return max(self._unref_times)
def add_ref(self, timestamp):
"""Adds a reference to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object reference as an integer.
"""
self._ref_times.append(timestamp)
def add_unref(self, timestamp):
"""Adds an unref to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object unreference as an integer.
"""
self._unref_times.append(timestamp)
class Timeline(object):
"""A class for visualizing execution timelines of TensorFlow steps."""
def __init__(self, step_stats, graph=None):
"""Constructs a new Timeline.
A 'Timeline' is used for visualizing the execution of a TensorFlow
computation. It shows the timings and concurrency of execution at
the granularity of TensorFlow Ops.
This class is not thread safe.
Args:
step_stats: The 'StepStats' proto recording execution times.
graph: (Optional) The 'Graph' that was executed.
"""
self._step_stats = step_stats
self._graph = graph
self._chrome_trace = _ChromeTraceFormatter()
self._next_pid = 0
self._device_pids = {} # device name -> pid for compute activity.
self._tensor_pids = {} # device name -> pid for tensors.
self._tensors = {} # tensor_name -> TensorTracker
self._next_flow_id = 0
self._flow_starts = {} # tensor_name -> (timestamp, pid, tid)
self._alloc_times = {} # tensor_name -> ( time, allocator, size )
self._allocator_maximums = {} # allocator name => maximum bytes long
def _alloc_pid(self):
"""Allocate a process Id."""
pid = self._next_pid
self._next_pid += 1
return pid
def _alloc_flow_id(self):
"""Allocate a flow Id."""
flow_id = self._next_flow_id
self._next_flow_id += 1
return flow_id
def _parse_op_label(self, label):
"""Parses the fields in a node timeline label."""
nn, rest = label.split(' = ')
op, rest = rest.split('(')
if rest == ')':
inputs = []
else:
inputs = rest[:-1].split(', ')
return nn, op, inputs
def _assign_lanes(self):
"""Assigns non-overlapping lanes for the activities on each device."""
for device_stats in self._step_stats.dev_stats:
# TODO(pbar): Genuine thread IDs in NodeExecStats might be helpful.
lanes = [0]
for ns in device_stats.node_stats:
l = -1
for (i, lts) in enumerate(lanes):
if ns.all_start_micros > lts:
l = i
lanes[l] = ns.all_start_micros + ns.all_end_rel_micros
break
if l < 0:
l = len(lanes)
lanes.append(ns.all_start_micros + ns.all_end_rel_micros)
ns.thread_id = l
def _emit_op(self, nodestats, pid, is_gputrace):
"""Generates a Chrome Trace event to show Op execution.
Args:
nodestats: The 'NodeExecStats' proto recording op execution.
pid: The pid assigned for the device where this op ran.
is_gputrace: If True then this op came from the GPUTracer.
"""
node_name = nodestats.node_name
start = nodestats.all_start_micros
duration = nodestats.all_end_rel_micros
tid = nodestats.thread_id
if is_gputrace:
# Node names should always have the form 'name:op'.
fields = node_name.split(':') + ['unknown']
node_name, op = fields[:2]
inputs = []
else:
_, op, inputs = self._parse_op_label(nodestats.timeline_label)
args = {'name': node_name, 'op': op}
for i, iname in enumerate(inputs):
args['input%d' % i] = iname
self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args)
def _emit_tensor_snapshot(self, tensor, timestamp, pid, tid, value):
"""Generate Chrome Trace snapshot event for a computed Tensor.
Args:
tensor: A 'TensorTracker' object.
timestamp: The timestamp of this snapshot as a long integer.
pid: The pid assigned for showing the device where this op ran.
tid: The tid of the thread computing the tensor snapshot.
value: A JSON-compliant snapshot of the object.
"""
desc = str(value.tensor_description).replace('"', '')
snapshot = {'tensor_description': desc}
self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid,
tid, tensor.object_id, snapshot)
def _produce_tensor(self, name, timestamp, tensors_pid, allocator, num_bytes):
object_id = len(self._tensors)
tensor = _TensorTracker(name, object_id, timestamp, tensors_pid, allocator,
num_bytes)
self._tensors[name] = tensor
return tensor
def _is_gputrace_device(self, device_name):
"""Returns true if this device is part of the GPUTracer logging."""
return '/stream:' in device_name or '/memcpy' in device_name
def _allocate_pids(self):
"""Allocate fake process ids for each device in the StepStats."""
self._allocators_pid = self._alloc_pid()
self._chrome_trace.emit_pid('Allocators', self._allocators_pid)
# Add processes in the Chrome trace to show compute and data activity.
for dev_stats in self._step_stats.dev_stats:
device_pid = self._alloc_pid()
self._device_pids[dev_stats.device] = device_pid
tensors_pid = self._alloc_pid()
self._tensor_pids[dev_stats.device] = tensors_pid
self._chrome_trace.emit_pid(dev_stats.device + ' Compute', device_pid)
self._chrome_trace.emit_pid(dev_stats.device + ' Tensors', tensors_pid)
def _analyze_tensors(self, show_memory):
"""Analyze tensor references to track dataflow."""
for dev_stats in self._step_stats.dev_stats:
device_pid = self._device_pids[dev_stats.device]
tensors_pid = self._tensor_pids[dev_stats.device]
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
node_name = node_stats.node_name
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
for index, output in enumerate(node_stats.output):
if index:
output_name = '%s:%d' % (node_name, index)
else:
output_name = node_name
allocation = output.tensor_description.allocation_description
num_bytes = allocation.requested_bytes
allocator_name = allocation.allocator_name
tensor = self._produce_tensor(output_name, start_time, tensors_pid,
allocator_name, num_bytes)
tensor.add_ref(start_time)
tensor.add_unref(end_time)
self._flow_starts[output_name] = (end_time, device_pid, tid)
if show_memory:
self._chrome_trace.emit_obj_create('Tensor', output_name,
start_time, tensors_pid, tid,
tensor.object_id)
self._emit_tensor_snapshot(tensor, end_time - 1, tensors_pid, tid,
output)
def _show_compute(self, show_dataflow):
"""Visualize the computation activity."""
for dev_stats in self._step_stats.dev_stats:
device_name = dev_stats.device
device_pid = self._device_pids[device_name]
is_gputrace = self._is_gputrace_device(device_name)
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
self._emit_op(node_stats, device_pid, is_gputrace)
if is_gputrace:
continue
_, _, inputs = self._parse_op_label(node_stats.timeline_label)
for input_name in inputs:
if input_name not in self._tensors:
# This can happen when partitioning has inserted a Send/Recv.
# We remove the numeric suffix so that the dataflow appears to
# come from the original node. Ideally, the StepStats would
# contain logging for the Send and Recv nodes.
index = input_name.rfind('/_')
if index > 0:
input_name = input_name[:index]
if input_name in self._tensors:
tensor = self._tensors[input_name]
tensor.add_ref(start_time)
tensor.add_unref(end_time - 1)
if show_dataflow:
# We use a different flow ID for every graph edge.
create_time, create_pid, create_tid = self._flow_starts[
input_name]
# Don't add flows when producer and consumer ops are on the same
# pid/tid since the horizontal arrows clutter the visualization.
if create_pid != device_pid or create_tid != tid:
flow_id = self._alloc_flow_id()
self._chrome_trace.emit_flow_start(input_name, create_time,
create_pid, create_tid,
flow_id)
self._chrome_trace.emit_flow_end(input_name, start_time,
device_pid, tid, flow_id)
else:
logging.vlog(1, 'Can\'t find tensor %s - removed by CSE?',
input_name)
def _show_memory_counters(self):
"""Produce a counter series for each memory allocator."""
# Iterate over all tensor trackers to build a list of allocations and
# frees for each allocator. Then sort the lists and emit a cumulative
# counter series for each allocator.
allocations = {}
for name in self._tensors:
tensor = self._tensors[name]
self._chrome_trace.emit_obj_delete('Tensor', name, tensor.last_unref,
tensor.pid, 0, tensor.object_id)
allocator = tensor.allocator
if allocator not in allocations:
allocations[allocator] = []
num_bytes = tensor.num_bytes
allocations[allocator].append((tensor.create_time, num_bytes, name))
allocations[allocator].append((tensor.last_unref, -num_bytes, name))
alloc_maxes = {}
# Generate a counter series showing total allocations for each allocator.
for allocator in allocations:
alloc_list = allocations[allocator]
alloc_list.sort()
total_bytes = 0
alloc_tensor_set = set()
alloc_maxes[allocator] = AllocationMaximum(
timestamp=0, num_bytes=0, tensors=set())
for time, num_bytes, name in alloc_list:
total_bytes += num_bytes
if num_bytes < 0:
alloc_tensor_set.discard(name)
else:
alloc_tensor_set.add(name)
if total_bytes > alloc_maxes[allocator].num_bytes:
alloc_maxes[allocator] = AllocationMaximum(
timestamp=time,
num_bytes=total_bytes,
tensors=copy.deepcopy(alloc_tensor_set))
self._chrome_trace.emit_counter('Memory', allocator,
self._allocators_pid, time, allocator,
total_bytes)
self._allocator_maximums = alloc_maxes
def analyze_step_stats(self, show_dataflow=True, show_memory=True):
self._allocate_pids()
self._assign_lanes()
self._analyze_tensors(show_memory)
self._show_compute(show_dataflow)
if show_memory:
self._show_memory_counters()
return StepStatsAnalysis(
chrome_trace=self._chrome_trace,
allocator_maximums=self._allocator_maximums)
def generate_chrome_trace_format(self, show_dataflow=True, show_memory=False):
"""Produces a trace in Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
Returns:
A JSON formatted string in Chrome Trace format.
"""
step_stats_analysis = self.analyze_step_stats(
show_dataflow=show_dataflow, show_memory=show_memory)
return step_stats_analysis.chrome_trace.format_to_string(pretty=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.