repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
coderfi/ansible-modules-extras
database/mongodb_user.py
24
8978
#!/usr/bin/python # (c) 2012, Elliott Foster <elliott@fourkitchens.com> # Sponsored by Four Kitchens http://fourkitchens.com. # (c) 2014, Epic Games, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: mongodb_user short_description: Adds or removes a user from a MongoDB database. description: - Adds or removes a user from a MongoDB database. version_added: "1.1" options: login_user: description: - The username used to authenticate with required: false default: null login_password: description: - The password used to authenticate with required: false default: null login_host: description: - The host running the database required: false default: localhost login_port: description: - The port to connect to required: false default: 27017 replica_set: version_added: "1.6" description: - Replica set to connect to (automatically connects to primary for writes) required: false default: null database: description: - The name of the database to add/remove the user from required: true user: description: - The name of the user to add or remove required: true default: null password: description: - The password to use for the user required: false default: null ssl: version_added: "1.8" description: - Whether to use an SSL connection when connecting to the database default: False roles: version_added: "1.3" description: - "The database user roles valid values are one or more of the following: read, 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'dbAdminAnyDatabase'" - This param requires mongodb 2.4+ and pymongo 2.5+ required: false default: "readWrite" state: state: description: - The database user state required: false default: present choices: [ "present", "absent" ] notes: - Requires the pymongo Python package on the remote host, version 2.4.2+. This can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html requirements: [ "pymongo" ] author: Elliott Foster ''' EXAMPLES = ''' # Create 'burgers' database user with name 'bob' and password '12345'. - mongodb_user: database=burgers name=bob password=12345 state=present # Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly) - mongodb_user: database=burgers name=bob password=12345 state=present ssl=True # Delete 'burgers' database user with name 'bob'. - mongodb_user: database=burgers name=bob state=absent # Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style) - mongodb_user: database=burgers name=ben password=12345 roles='read' state=present - mongodb_user: database=burgers name=jim password=12345 roles='readWrite,dbAdmin,userAdmin' state=present - mongodb_user: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present # add a user to database in a replica set, the primary server is automatically discovered and written to - mongodb_user: database=burgers name=bob replica_set=blecher password=12345 roles='readWriteAnyDatabase' state=present ''' import ConfigParser from distutils.version import LooseVersion try: from pymongo.errors import ConnectionFailure from pymongo.errors import OperationFailure from pymongo import version as PyMongoVersion from pymongo import MongoClient except ImportError: try: # for older PyMongo 2.2 from pymongo import Connection as MongoClient except ImportError: pymongo_found = False else: pymongo_found = True else: pymongo_found = True # ========================================= # MongoDB module specific support methods. # def user_add(module, client, db_name, user, password, roles): db = client[db_name] if roles is None: db.add_user(user, password, False) else: try: db.add_user(user, password, None, roles=roles) except OperationFailure, e: err_msg = str(e) if LooseVersion(PyMongoVersion) <= LooseVersion('2.5'): err_msg = err_msg + ' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)' module.fail_json(msg=err_msg) def user_remove(client, db_name, user): db = client[db_name] db.remove_user(user) def load_mongocnf(): config = ConfigParser.RawConfigParser() mongocnf = os.path.expanduser('~/.mongodb.cnf') try: config.readfp(open(mongocnf)) creds = dict( user=config.get('client', 'user'), password=config.get('client', 'pass') ) except (ConfigParser.NoOptionError, IOError): return False return creds # ========================================= # Module execution. # def main(): module = AnsibleModule( argument_spec = dict( login_user=dict(default=None), login_password=dict(default=None), login_host=dict(default='localhost'), login_port=dict(default='27017'), replica_set=dict(default=None), database=dict(required=True, aliases=['db']), user=dict(required=True, aliases=['name']), password=dict(aliases=['pass']), ssl=dict(default=False), roles=dict(default=None, type='list'), state=dict(default='present', choices=['absent', 'present']), ) ) if not pymongo_found: module.fail_json(msg='the python pymongo module is required') login_user = module.params['login_user'] login_password = module.params['login_password'] login_host = module.params['login_host'] login_port = module.params['login_port'] replica_set = module.params['replica_set'] db_name = module.params['database'] user = module.params['user'] password = module.params['password'] ssl = module.params['ssl'] roles = module.params['roles'] state = module.params['state'] try: if replica_set: client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl) else: client = MongoClient(login_host, int(login_port), ssl=ssl) # try to authenticate as a target user to check if it already exists try: client[db_name].authenticate(user, password) if state == 'present': module.exit_json(changed=False, user=user) except OperationFailure: if state == 'absent': module.exit_json(changed=False, user=user) if login_user is None and login_password is None: mongocnf_creds = load_mongocnf() if mongocnf_creds is not False: login_user = mongocnf_creds['user'] login_password = mongocnf_creds['password'] elif login_password is None and login_user is not None: module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') if login_user is not None and login_password is not None: client.admin.authenticate(login_user, login_password) except ConnectionFailure, e: module.fail_json(msg='unable to connect to database: %s' % str(e)) if state == 'present': if password is None: module.fail_json(msg='password parameter required when adding a user') try: user_add(module, client, db_name, user, password, roles) except OperationFailure, e: module.fail_json(msg='Unable to add or update user: %s' % str(e)) elif state == 'absent': try: user_remove(client, db_name, user) except OperationFailure, e: module.fail_json(msg='Unable to remove user: %s' % str(e)) module.exit_json(changed=True, user=user) # import module snippets from ansible.module_utils.basic import * main()
gpl-3.0
ThomasWu/PersonalNewsWeb
backend/common/configuration_service_client_test.py
1
1367
import configuration_service_client as conf_client def test_basic_get(): amqp_settings = conf_client.getSystemSettings('amqp') print amqp_settings assert amqp_settings is not None assert len(amqp_settings) > 0 print 'test basic get passed' def test_setAndDrop(): system = 'test' settings = {'url': 'test'} conf_client.setSystemSettings(system, settings) received_settings = conf_client.getSystemSettings(system) assert received_settings == settings conf_client.dropSystemSettings(system) received_settings = conf_client.getSystemSettings(system) assert received_settings is None print 'test set and drop passed' def test_invalidSet(): valid_system_name = 'test' invalid_system_name = 1 valid_settings = {'url': 'test'} invalid_settings = None # test invalid system name conf_client.setSystemSettings(invalid_system_name, valid_settings) received_settings = conf_client.getSystemSettings(invalid_system_name) assert received_settings is None # test invalid settings conf_client.setSystemSettings(valid_system_name, invalid_settings) received_settings = conf_client.getSystemSettings(valid_system_name) assert received_settings is None print 'test invalid set passed' if __name__=='__main__': test_basic_get() test_setAndDrop() test_invalidSet()
mit
marifersahin/pyfolio
venv/bin/player.py
1
2205
#!/Users/polizom/Django/pyfolio/venv/bin/python2.7 # # The Python Imaging Library # $Id$ # from __future__ import print_function try: from tkinter import * except ImportError: from Tkinter import * from PIL import Image, ImageTk import sys # -------------------------------------------------------------------- # an image animation player class UI(Label): def __init__(self, master, im): if isinstance(im, list): # list of images self.im = im[1:] im = self.im[0] else: # sequence self.im = im if im.mode == "1": self.image = ImageTk.BitmapImage(im, foreground="white") else: self.image = ImageTk.PhotoImage(im) Label.__init__(self, master, image=self.image, bg="black", bd=0) self.update() try: duration = im.info["duration"] except KeyError: duration = 100 self.after(duration, self.next) def next(self): if isinstance(self.im, list): try: im = self.im[0] del self.im[0] self.image.paste(im) except IndexError: return # end of list else: try: im = self.im im.seek(im.tell() + 1) self.image.paste(im) except EOFError: return # end of file try: duration = im.info["duration"] except KeyError: duration = 100 self.after(duration, self.next) self.update_idletasks() # -------------------------------------------------------------------- # script interface if __name__ == "__main__": if not sys.argv[1:]: print("Syntax: python player.py imagefile(s)") sys.exit(1) filename = sys.argv[1] root = Tk() root.title(filename) if len(sys.argv) > 2: # list of images print("loading...") im = [] for filename in sys.argv[1:]: im.append(Image.open(filename)) else: # sequence im = Image.open(filename) UI(root, im).pack() root.mainloop()
mit
a25kk/tam
src/tam.sitecontent/tam/sitecontent/testing.py
1
1960
# -*- coding: utf-8 -*- """Base module for unittesting.""" from plone.app.testing import applyProfile from plone.app.testing import FunctionalTesting from plone.app.testing import IntegrationTesting from plone.app.testing import login from plone.app.testing import PLONE_FIXTURE from plone.app.testing import PloneSandboxLayer from plone.app.testing import setRoles from plone.app.testing import TEST_USER_ID from plone.app.testing import TEST_USER_NAME from plone.testing import z2 import unittest2 as unittest class tamSitecontentLayer(PloneSandboxLayer): defaultBases = (PLONE_FIXTURE,) def setUpZope(self, app, configurationContext): """Set up Zope.""" # Load ZCML import tam.sitecontent self.loadZCML(package=tam.sitecontent) z2.installProduct(app, 'tam.sitecontent') def setUpPloneSite(self, portal): """Set up Plone.""" # Install into Plone site using portal_setup applyProfile(portal, 'tam.sitecontent:default') # Login and create some test content setRoles(portal, TEST_USER_ID, ['Manager']) login(portal, TEST_USER_NAME) portal.invokeFactory('Folder', 'folder') # Commit so that the test browser sees these objects portal.portal_catalog.clearFindAndRebuild() import transaction transaction.commit() def tearDownZope(self, app): """Tear down Zope.""" z2.uninstallProduct(app, 'tam.sitecontent') FIXTURE = tamSitecontentLayer() INTEGRATION_TESTING = IntegrationTesting( bases=(FIXTURE,), name="tamSitecontentLayer:Integration") FUNCTIONAL_TESTING = FunctionalTesting( bases=(FIXTURE,), name="tamSitecontentLayer:Functional") class IntegrationTestCase(unittest.TestCase): """Base class for integration tests.""" layer = INTEGRATION_TESTING class FunctionalTestCase(unittest.TestCase): """Base class for functional tests.""" layer = FUNCTIONAL_TESTING
mit
bguillot/OpenUpgrade
openerp/report/render/rml2html/rml2html.py
438
15438
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2005, Fabien Pinckaers, UCL, FSA # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # ############################################################################## import sys import cStringIO from lxml import etree import copy from openerp.report.render.rml2pdf import utils class _flowable(object): def __init__(self, template, doc, localcontext = None): self._tags = { 'title': self._tag_title, 'spacer': self._tag_spacer, 'para': self._tag_para, 'section':self._section, 'nextFrame': self._tag_next_frame, 'blockTable': self._tag_table, 'pageBreak': self._tag_page_break, 'setNextTemplate': self._tag_next_template, } self.template = template self.doc = doc self.localcontext = localcontext self._cache = {} def _tag_page_break(self, node): return '<br/>'*3 def _tag_next_template(self, node): return '' def _tag_next_frame(self, node): result=self.template.frame_stop() result+='<br/>' result+=self.template.frame_start() return result def _tag_title(self, node): node.tag='h1' return etree.tostring(node) def _tag_spacer(self, node): length = 1+int(utils.unit_get(node.get('length')))/35 return "<br/>"*length def _tag_table(self, node): new_node = copy.deepcopy(node) for child in new_node: new_node.remove(child) new_node.tag = 'table' def process(node,new_node): for child in utils._child_get(node,self): new_child = copy.deepcopy(child) new_node.append(new_child) if len(child): for n in new_child: new_child.remove(n) process(child, new_child) else: new_child.text = utils._process_text(self, child.text) new_child.tag = 'p' try: if new_child.get('style').find('terp_tblheader')!= -1: new_node.tag = 'th' except Exception: pass process(node,new_node) if new_node.get('colWidths',False): sizes = map(lambda x: utils.unit_get(x), new_node.get('colWidths').split(',')) tr = etree.SubElement(new_node, 'tr') for s in sizes: etree.SubElement(tr, 'td', width=str(s)) return etree.tostring(new_node) def _tag_para(self, node): new_node = copy.deepcopy(node) new_node.tag = 'p' if new_node.attrib.get('style',False): new_node.set('class', new_node.get('style')) new_node.text = utils._process_text(self, node.text) return etree.tostring(new_node) def _section(self, node): result = '' for child in utils._child_get(node, self): if child.tag in self._tags: result += self._tags[child.tag](child) return result def render(self, node): result = self.template.start() result += self.template.frame_start() for n in utils._child_get(node, self): if n.tag in self._tags: result += self._tags[n.tag](n) else: pass result += self.template.frame_stop() result += self.template.end() return result.encode('utf-8').replace('"',"\'").replace('°','&deg;') class _rml_tmpl_tag(object): def __init__(self, *args): pass def tag_start(self): return '' def tag_end(self): return False def tag_stop(self): return '' def tag_mergeable(self): return True class _rml_tmpl_frame(_rml_tmpl_tag): def __init__(self, posx, width): self.width = width self.posx = posx def tag_start(self): return "<table border=\'0\' width=\'%d\'><tr><td width=\'%d\'>&nbsp;</td><td>" % (self.width+self.posx,self.posx) def tag_end(self): return True def tag_stop(self): return '</td></tr></table><br/>' def tag_mergeable(self): return False def merge(self, frame): pass class _rml_tmpl_draw_string(_rml_tmpl_tag): def __init__(self, node, style,localcontext = {}): self.localcontext = localcontext self.posx = utils.unit_get(node.get('x')) self.posy = utils.unit_get(node.get('y')) aligns = { 'drawString': 'left', 'drawRightString': 'right', 'drawCentredString': 'center' } align = aligns[node.tag] self.pos = [(self.posx, self.posy, align, utils._process_text(self, node.text), style.get('td'), style.font_size_get('td'))] def tag_start(self): self.pos.sort() res = "<table border='0' cellpadding='0' cellspacing='0'><tr>" posx = 0 i = 0 for (x,y,align,txt, style, fs) in self.pos: if align=="left": pos2 = len(txt)*fs res+="<td width=\'%d\'></td><td style=\'%s\' width=\'%d\'>%s</td>" % (x - posx, style, pos2, txt) posx = x+pos2 if align=="right": res+="<td width=\'%d\' align=\'right\' style=\'%s\'>%s</td>" % (x - posx, style, txt) posx = x if align=="center": res+="<td width=\'%d\' align=\'center\' style=\'%s\'>%s</td>" % ((x - posx)*2, style, txt) posx = 2*x-posx i+=1 res+='</tr></table>' return res def merge(self, ds): self.pos+=ds.pos class _rml_tmpl_draw_lines(_rml_tmpl_tag): def __init__(self, node, style, localcontext = {}): self.localcontext = localcontext coord = [utils.unit_get(x) for x in utils._process_text(self, node.text).split(' ')] self.ok = False self.posx = coord[0] self.posy = coord[1] self.width = coord[2]-coord[0] self.ok = coord[1]==coord[3] self.style = style self.style = style.get('hr') def tag_start(self): if self.ok: return "<table border=\'0\' cellpadding=\'0\' cellspacing=\'0\' width=\'%d\'><tr><td width=\'%d\'></td><td><hr width=\'100%%\' style=\'margin:0px; %s\'></td></tr></table>" % (self.posx+self.width,self.posx,self.style) else: return '' class _rml_stylesheet(object): def __init__(self, localcontext, stylesheet, doc): self.doc = doc self.localcontext = localcontext self.attrs = {} self._tags = { 'fontSize': lambda x: ('font-size',str(utils.unit_get(x)+5.0)+'px'), 'alignment': lambda x: ('text-align',str(x)) } result = '' for ps in stylesheet.findall('paraStyle'): attr = {} attrs = ps.attrib for key, val in attrs.items(): attr[key] = val attrs = [] for a in attr: if a in self._tags: attrs.append('%s:%s' % self._tags[a](attr[a])) if len(attrs): result += 'p.'+attr['name']+' {'+'; '.join(attrs)+'}\n' self.result = result def render(self): return self.result class _rml_draw_style(object): def __init__(self): self.style = {} self._styles = { 'fill': lambda x: {'td': {'color':x.get('color')}}, 'setFont': lambda x: {'td': {'font-size':x.get('size')+'px'}}, 'stroke': lambda x: {'hr': {'color':x.get('color')}}, } def update(self, node): if node.tag in self._styles: result = self._styles[node.tag](node) for key in result: if key in self.style: self.style[key].update(result[key]) else: self.style[key] = result[key] def font_size_get(self,tag): size = utils.unit_get(self.style.get('td', {}).get('font-size','16')) return size def get(self,tag): if not tag in self.style: return "" return ';'.join(['%s:%s' % (x[0],x[1]) for x in self.style[tag].items()]) class _rml_template(object): def __init__(self, template, localcontext=None): self.frame_pos = -1 self.localcontext = localcontext self.frames = [] self.template_order = [] self.page_template = {} self.loop = 0 self._tags = { 'drawString': _rml_tmpl_draw_string, 'drawRightString': _rml_tmpl_draw_string, 'drawCentredString': _rml_tmpl_draw_string, 'lines': _rml_tmpl_draw_lines } self.style = _rml_draw_style() rc = 'data:image/png;base64,' self.data = '' for pt in template.findall('pageTemplate'): frames = {} id = pt.get('id') self.template_order.append(id) for tmpl in pt.findall('frame'): posy = int(utils.unit_get(tmpl.get('y1'))) posx = int(utils.unit_get(tmpl.get('x1'))) frames[(posy,posx,tmpl.get('id'))] = _rml_tmpl_frame(posx, utils.unit_get(tmpl.get('width'))) for tmpl in pt.findall('pageGraphics'): for n in tmpl: if n.tag == 'image': self.data = rc + utils._process_text(self, n.text) if n.tag in self._tags: t = self._tags[n.tag](n, self.style,self.localcontext) frames[(t.posy,t.posx,n.tag)] = t else: self.style.update(n) keys = frames.keys() keys.sort() keys.reverse() self.page_template[id] = [] for key in range(len(keys)): if key>0 and keys[key-1][0] == keys[key][0]: if type(self.page_template[id][-1]) == type(frames[keys[key]]): if self.page_template[id][-1].tag_mergeable(): self.page_template[id][-1].merge(frames[keys[key]]) continue self.page_template[id].append(frames[keys[key]]) self.template = self.template_order[0] def _get_style(self): return self.style def set_next_template(self): self.template = self.template_order[(self.template_order.index(name)+1) % self.template_order] self.frame_pos = -1 def set_template(self, name): self.template = name self.frame_pos = -1 def frame_start(self): result = '' frames = self.page_template[self.template] ok = True while ok: self.frame_pos += 1 if self.frame_pos>=len(frames): self.frame_pos=0 self.loop=1 ok = False continue f = frames[self.frame_pos] result+=f.tag_start() ok = not f.tag_end() if ok: result+=f.tag_stop() return result def frame_stop(self): frames = self.page_template[self.template] f = frames[self.frame_pos] result=f.tag_stop() return result def start(self): return '' def end(self): result = '' while not self.loop: result += self.frame_start() result += self.frame_stop() return result class _rml_doc(object): def __init__(self, data, localcontext): self.dom = etree.XML(data) self.localcontext = localcontext self.filename = self.dom.get('filename') self.result = '' def render(self, out): self.result += '''<!DOCTYPE HTML PUBLIC "-//w3c//DTD HTML 4.0 Frameset//EN"> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <style type="text/css"> p {margin:0px; font-size:12px;} td {font-size:14px;} ''' style = self.dom.findall('stylesheet')[0] s = _rml_stylesheet(self.localcontext, style, self.dom) self.result += s.render() self.result+=''' </style> ''' list_story =[] for story in utils._child_get(self.dom, self, 'story'): template = _rml_template(self.dom.findall('template')[0], self.localcontext) f = _flowable(template, self.dom, localcontext = self.localcontext) story_text = f.render(story) list_story.append(story_text) del f if template.data: tag = '''<img src = '%s' width=80 height=72/>'''% template.data else: tag = '' self.result +=''' <script type="text/javascript"> var indexer = 0; var aryTest = %s ; function nextData() { if(indexer < aryTest.length -1) { indexer += 1; document.getElementById("tiny_data").innerHTML=aryTest[indexer]; } } function prevData() { if (indexer > 0) { indexer -= 1; document.getElementById("tiny_data").innerHTML=aryTest[indexer]; } } </script> </head> <body> %s <div id="tiny_data"> %s </div> <br> <input type="button" value="next" onclick="nextData();"> <input type="button" value="prev" onclick="prevData();"> </body></html>'''%(list_story,tag,list_story[0]) out.write( self.result) def parseString(data,localcontext = {}, fout=None): r = _rml_doc(data, localcontext) if fout: fp = file(fout,'wb') r.render(fp) fp.close() return fout else: fp = cStringIO.StringIO() r.render(fp) return fp.getvalue() def rml2html_help(): print 'Usage: rml2html input.rml >output.html' print 'Render the standard input (RML) and output an HTML file' sys.exit(0) if __name__=="__main__": if len(sys.argv)>1: if sys.argv[1]=='--help': rml2html_help() print parseString(file(sys.argv[1], 'r').read()), else: print 'Usage: rml2html input.rml >output.html' print 'Try \'rml2html --help\' for more information.' # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
mscuthbert/abjad
abjad/tools/pitchtools/NumberedInterval.py
2
12039
# -*- encoding: utf-8 -*- import functools from abjad.tools import mathtools from abjad.tools.pitchtools.Interval import Interval @functools.total_ordering class NumberedInterval(Interval): '''A numbered interval. :: >>> numbered_interval = pitchtools.NumberedInterval(-14) >>> numbered_interval NumberedInterval(-14) ''' ### CLASS VARIABLES ### __slots__ = ( '_number', ) ### INITIALIZER ### def __init__(self, arg=None): from abjad.tools import pitchtools if isinstance(arg, (int, float, int)): number = arg elif isinstance(arg, pitchtools.Interval): number = arg.semitones elif isinstance(arg, pitchtools.IntervalClass): interval_class = pitchtools.NumberedIntervalClass(arg) number = interval_class.number elif arg is None: number = 0 else: message = 'can not initialize {}: {!r}.' message = message.format(type(self).__name__, arg) raise TypeError(message) self._number = number ### SPECIAL METHODS ### def __abs__(self): r'''Absolute value of numbered interval. Returns new numbered interval. ''' return type(self)(abs(self._number)) def __add__(self, arg): r'''Adds `arg` to numbered interval. Returns new numbered interval. ''' if isinstance(arg, type(self)): number = self.number + arg.number return type(self)(number) message = 'must be {}: {!r}.' message = message.format(type(self), arg) raise TypeError(message) def __copy__(self): r'''Copies numbered interval. Returns new numbered interval. ''' return type(self)(self.number) def __eq__(self, arg): r'''Is true when `arg` is a numbered interval with number equal to that of this numbered interval. Otherwise false. Returns boolean. ''' if isinstance(arg, type(self)): if self.number == arg.number: return True return False def __float__(self): r'''Changes numbered interval to float. Returns float. ''' return float(self._number) def __hash__(self): r'''Hashes numbered interval. Returns integer. ''' return hash(repr(self)) def __int__(self): r'''Changes numbered interval to integer. Returns integer. ''' return int(self._number) def __lt__(self, arg): r'''Is true when `arg` is a numbered interval with same direction number as this numbered interval and with number greater than that of this numbered interval. Otherwise false. Returns boolean. ''' if not isinstance(arg, type(self)): message = 'must be numbered interval: {!r}.' message = message.format(arg) raise TypeError(message) if not self.direction_number == arg.direction_number: message = 'can only compare intervals of same direction.' raise ValueError(message) return abs(self.number) < abs(arg.number) def __neg__(self): r'''Negates numbered interval. Returns new numbered interval. ''' return type(self)(-self._number) def __str__(self): r'''String representation of numbered interval. Returns string. ''' return self._format_string def __sub__(self, arg): r'''Subtracts `arg` from numbered interval. Returns new numbered interval. ''' if isinstance(arg, type(self)): number = self.number - arg.number return type(self)(number) message = 'must be {}: {!r}.' message = message.format(type(self), arg) raise TypeError(message) ### PRIVATE PROPERTIES ### @property def _format_string(self): return '{}{}'.format(self._direction_symbol, abs(self.number)) @property def _storage_format_specification(self): from abjad.tools import systemtools positional_argument_values = ( self.number, ) return systemtools.StorageFormatSpecification( self, is_indented=False, keyword_argument_names=(), positional_argument_values=positional_argument_values, ) ### PUBLIC METHODS ### @classmethod def from_pitch_carriers(cls, pitch_carrier_1, pitch_carrier_2): '''Makes numbered interval from `pitch_carrier_1` and `pitch_carrier_2`. :: >>> pitchtools.NumberedInterval.from_pitch_carriers( ... NamedPitch(-2), ... NamedPitch(12), ... ) NumberedInterval(14) Returns numbered interval. ''' from abjad.tools import pitchtools # get pitches pitch_1 = pitchtools.NamedPitch.from_pitch_carrier(pitch_carrier_1) pitch_2 = pitchtools.NamedPitch.from_pitch_carrier(pitch_carrier_2) # get difference in semitones number = pitchtools.NumberedPitch(pitch_2).pitch_number - \ pitchtools.NumberedPitch(pitch_1).pitch_number # change 1.0, 2.0, ... into 1, 2, ... number = mathtools.integer_equivalent_number_to_integer(number) # return numbered interval return cls(number) ### PUBLIC PROPERTIES ### @property def direction_number(self): r'''Direction sign of numbered interval. :: >>> pitchtools.NumberedInterval(-14).direction_number -1 Returns integer. ''' return mathtools.sign(self.number) @property def direction_string(self): r'''Direction string of named interval. :: >>> pitchtools.NumberedInterval(-14).direction_string 'descending' Returns ``'ascending'``, ``'descending'`` or none. ''' if self.direction_number == -1: return 'descending' elif self.direction_number == 0: return None elif self.direction_number == 1: return 'ascending' @property def number(self): r'''Number of numbered interval. Returns number. ''' return self._number @property def numbered_interval_number(self): r'''Number of numbered interval. :: >>> pitchtools.NumberedInterval(-14).numbered_interval_number -14 Returns integer or float. ''' return self._number @property def semitones(self): r'''Semitones corresponding to numbered interval. Returns nonnegative number. ''' return self.number ### PUBLIC METHODS ### def to_named_interval(self, staff_positions): r'''Changes numbered interval to named interval that encompasses `staff_positions`. .. container:: example :: >>> numbered_interval = pitchtools.NumberedInterval(1) >>> numbered_interval.to_named_interval(2) NamedInterval('+m2') Returns named interval. ''' from abjad.tools import pitchtools direction_number = mathtools.sign(self.number) if staff_positions == 1: quality_string = None if self.number % 12 == 11: quality_string = 'augmented' elif self.number % 12 == 0: quality_string = 'perfect' elif self.number % 12 == 1: quality_string = 'augmented' if not direction_number == 0: staff_positions *= direction_number if quality_string is None: # TODO: handle double-augmented named intervals return pitchtools.NamedInterval(self.number) named_interval = pitchtools.NamedInterval( quality_string, staff_positions) return named_interval named_interval_class_number = staff_positions % 7 numbered_interval_class_number = abs(self.number) % 12 if named_interval_class_number == 0: if numbered_interval_class_number == 9: quality_string = 'diminished' elif numbered_interval_class_number == 10: quality_string = 'minor' elif numbered_interval_class_number == 11: quality_string = 'major' elif numbered_interval_class_number == 0: quality_string = 'augmented' elif named_interval_class_number == 1: if numbered_interval_class_number == 11: quality_string = 'diminished' elif numbered_interval_class_number == 0: quality_string = 'perfect' elif numbered_interval_class_number == 1: quality_string = 'augmented' elif named_interval_class_number == 2: if numbered_interval_class_number == 0: quality_string = 'diminished' elif numbered_interval_class_number == 1: quality_string = 'minor' elif numbered_interval_class_number == 2: quality_string = 'major' elif numbered_interval_class_number == 3: quality_string = 'augmented' elif named_interval_class_number == 3: if numbered_interval_class_number == 2: quality_string = 'diminished' elif numbered_interval_class_number == 3: quality_string = 'minor' elif numbered_interval_class_number == 4: quality_string = 'major' elif numbered_interval_class_number == 5: quality_string = 'augmented' elif named_interval_class_number == 4: if numbered_interval_class_number == 4: quality_string = 'diminished' elif numbered_interval_class_number == 5: quality_string = 'perfect' elif numbered_interval_class_number == 6: quality_string = 'augmented' elif named_interval_class_number == 5: if numbered_interval_class_number == 6: quality_string = 'diminished' elif numbered_interval_class_number == 7: quality_string = 'perfect' elif numbered_interval_class_number == 8: quality_string = 'augmented' elif named_interval_class_number == 6: if numbered_interval_class_number == 7: quality_string = 'diminished' elif numbered_interval_class_number == 8: quality_string = 'minor' elif numbered_interval_class_number == 9: quality_string = 'major' elif numbered_interval_class_number == 10: quality_string = 'augmented' elif named_interval_class_number == 7: if numbered_interval_class_number == 9: quality_string = 'diminished' elif numbered_interval_class_number == 10: quality_string = 'minor' elif numbered_interval_class_number == 11: quality_string = 'major' elif numbered_interval_class_number == 0: quality_string = 'augmented' elif named_interval_class_number == 8: if numbered_interval_class_number == 11: quality_string = 'diminished' elif numbered_interval_class_number == 0: quality_string = 'perfect' elif numbered_interval_class_number == 1: quality_string = 'augmented' if not direction_number == 0: staff_positions *= direction_number named_interval = pitchtools.NamedInterval( quality_string, staff_positions, ) return named_interval
gpl-3.0
iamkingmaker/zipline
tests/risk/answer_key.py
39
11989
# # Copyright 2014 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import hashlib import os import numpy as np import pandas as pd import pytz import xlrd import requests from six.moves import map def col_letter_to_index(col_letter): # Only supports single letter, # but answer key doesn't need multi-letter, yet. index = 0 for i, char in enumerate(reversed(col_letter)): index += ((ord(char) - 65) + 1) * pow(26, i) return index DIR = os.path.dirname(os.path.realpath(__file__)) ANSWER_KEY_CHECKSUMS_PATH = os.path.join(DIR, 'risk-answer-key-checksums') ANSWER_KEY_CHECKSUMS = open(ANSWER_KEY_CHECKSUMS_PATH, 'r').read().splitlines() ANSWER_KEY_FILENAME = 'risk-answer-key.xlsx' ANSWER_KEY_PATH = os.path.join(DIR, ANSWER_KEY_FILENAME) ANSWER_KEY_BUCKET_NAME = 'zipline-test_data' ANSWER_KEY_DL_TEMPLATE = """ https://s3.amazonaws.com/zipline-test-data/risk/{md5}/risk-answer-key.xlsx """.strip() LATEST_ANSWER_KEY_URL = ANSWER_KEY_DL_TEMPLATE.format( md5=ANSWER_KEY_CHECKSUMS[-1]) def answer_key_signature(): with open(ANSWER_KEY_PATH, 'rb') as f: md5 = hashlib.md5() buf = f.read(1024) md5.update(buf) while buf != b"": buf = f.read(1024) md5.update(buf) return md5.hexdigest() def ensure_latest_answer_key(): """ Get the latest answer key from a publically available location. Logic for determining what and when to download is as such: - If there is no local spreadsheet file, then get the lastest answer key, as defined by the last row in the checksum file. - If there is a local spreadsheet file: -- If the spreadsheet's checksum is in the checksum file: --- If the spreadsheet's checksum does not match the latest, then grab the the latest checksum and replace the local checksum file. --- If the spreadsheet's checksum matches the latest, then skip download, and use the local spreadsheet as a cached copy. -- If the spreadsheet's checksum is not in the checksum file, then leave the local file alone, assuming that the local xls's md5 is not in the list due to local modifications during development. It is possible that md5's could collide, if that is ever case, we should then find an alternative naming scheme. The spreadsheet answer sheet is not kept in SCM, as every edit would increase the repo size by the file size, since it is treated as a binary. """ answer_key_dl_checksum = None local_answer_key_exists = os.path.exists(ANSWER_KEY_PATH) if local_answer_key_exists: local_hash = answer_key_signature() if local_hash in ANSWER_KEY_CHECKSUMS: # Assume previously downloaded version. # Check for latest. if local_hash != ANSWER_KEY_CHECKSUMS[-1]: # More recent checksum, download answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1] else: # Assume local copy that is being developed on answer_key_dl_checksum = None else: answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1] if answer_key_dl_checksum: res = requests.get( ANSWER_KEY_DL_TEMPLATE.format(md5=answer_key_dl_checksum)) with open(ANSWER_KEY_PATH, 'wb') as f: f.write(res.content) # Get latest answer key on load. ensure_latest_answer_key() class DataIndex(object): """ Coordinates for the spreadsheet, using the values as seen in the notebook. The python-excel libraries use 0 index, while the spreadsheet in a GUI uses a 1 index. """ def __init__(self, sheet_name, col, row_start, row_end, value_type='float'): self.sheet_name = sheet_name self.col = col self.row_start = row_start self.row_end = row_end self.value_type = value_type @property def col_index(self): return col_letter_to_index(self.col) - 1 @property def row_start_index(self): return self.row_start - 1 @property def row_end_index(self): return self.row_end - 1 def __str__(self): return "'{sheet_name}'!{col}{row_start}:{col}{row_end}".format( sheet_name=self.sheet_name, col=self.col, row_start=self.row_start, row_end=self.row_end ) class AnswerKey(object): INDEXES = { 'RETURNS': DataIndex('Sim Period', 'D', 4, 255), 'BENCHMARK': { 'Dates': DataIndex('s_p', 'A', 4, 254, value_type='date'), 'Returns': DataIndex('s_p', 'H', 4, 254) }, # Below matches the inconsistent capitalization in spreadsheet 'BENCHMARK_PERIOD_RETURNS': { 'Monthly': DataIndex('s_p', 'R', 8, 19), '3-Month': DataIndex('s_p', 'S', 10, 19), '6-month': DataIndex('s_p', 'T', 13, 19), 'year': DataIndex('s_p', 'U', 19, 19), }, 'BENCHMARK_PERIOD_VOLATILITY': { 'Monthly': DataIndex('s_p', 'V', 8, 19), '3-Month': DataIndex('s_p', 'W', 10, 19), '6-month': DataIndex('s_p', 'X', 13, 19), 'year': DataIndex('s_p', 'Y', 19, 19), }, 'ALGORITHM_PERIOD_RETURNS': { 'Monthly': DataIndex('Sim Period', 'Z', 23, 34), '3-Month': DataIndex('Sim Period', 'AA', 25, 34), '6-month': DataIndex('Sim Period', 'AB', 28, 34), 'year': DataIndex('Sim Period', 'AC', 34, 34), }, 'ALGORITHM_PERIOD_VOLATILITY': { 'Monthly': DataIndex('Sim Period', 'AH', 23, 34), '3-Month': DataIndex('Sim Period', 'AI', 25, 34), '6-month': DataIndex('Sim Period', 'AJ', 28, 34), 'year': DataIndex('Sim Period', 'AK', 34, 34), }, 'ALGORITHM_PERIOD_SHARPE': { 'Monthly': DataIndex('Sim Period', 'AL', 23, 34), '3-Month': DataIndex('Sim Period', 'AM', 25, 34), '6-month': DataIndex('Sim Period', 'AN', 28, 34), 'year': DataIndex('Sim Period', 'AO', 34, 34), }, 'ALGORITHM_PERIOD_BETA': { 'Monthly': DataIndex('Sim Period', 'AP', 23, 34), '3-Month': DataIndex('Sim Period', 'AQ', 25, 34), '6-month': DataIndex('Sim Period', 'AR', 28, 34), 'year': DataIndex('Sim Period', 'AS', 34, 34), }, 'ALGORITHM_PERIOD_ALPHA': { 'Monthly': DataIndex('Sim Period', 'AT', 23, 34), '3-Month': DataIndex('Sim Period', 'AU', 25, 34), '6-month': DataIndex('Sim Period', 'AV', 28, 34), 'year': DataIndex('Sim Period', 'AW', 34, 34), }, 'ALGORITHM_PERIOD_BENCHMARK_VARIANCE': { 'Monthly': DataIndex('Sim Period', 'BJ', 23, 34), '3-Month': DataIndex('Sim Period', 'BK', 25, 34), '6-month': DataIndex('Sim Period', 'BL', 28, 34), 'year': DataIndex('Sim Period', 'BM', 34, 34), }, 'ALGORITHM_PERIOD_COVARIANCE': { 'Monthly': DataIndex('Sim Period', 'BF', 23, 34), '3-Month': DataIndex('Sim Period', 'BG', 25, 34), '6-month': DataIndex('Sim Period', 'BH', 28, 34), 'year': DataIndex('Sim Period', 'BI', 34, 34), }, 'ALGORITHM_PERIOD_DOWNSIDE_RISK': { 'Monthly': DataIndex('Sim Period', 'BN', 23, 34), '3-Month': DataIndex('Sim Period', 'BO', 25, 34), '6-month': DataIndex('Sim Period', 'BP', 28, 34), 'year': DataIndex('Sim Period', 'BQ', 34, 34), }, 'ALGORITHM_PERIOD_SORTINO': { 'Monthly': DataIndex('Sim Period', 'BR', 23, 34), '3-Month': DataIndex('Sim Period', 'BS', 25, 34), '6-month': DataIndex('Sim Period', 'BT', 28, 34), 'year': DataIndex('Sim Period', 'BU', 34, 34), }, 'ALGORITHM_RETURN_VALUES': DataIndex( 'Sim Cumulative', 'D', 4, 254), 'ALGORITHM_CUMULATIVE_VOLATILITY': DataIndex( 'Sim Cumulative', 'P', 4, 254), 'ALGORITHM_CUMULATIVE_SHARPE': DataIndex( 'Sim Cumulative', 'R', 4, 254), 'CUMULATIVE_DOWNSIDE_RISK': DataIndex( 'Sim Cumulative', 'U', 4, 254), 'CUMULATIVE_SORTINO': DataIndex( 'Sim Cumulative', 'V', 4, 254), 'CUMULATIVE_INFORMATION': DataIndex( 'Sim Cumulative', 'AA', 4, 254), 'CUMULATIVE_BETA': DataIndex( 'Sim Cumulative', 'AD', 4, 254), 'CUMULATIVE_ALPHA': DataIndex( 'Sim Cumulative', 'AE', 4, 254), 'CUMULATIVE_MAX_DRAWDOWN': DataIndex( 'Sim Cumulative', 'AH', 4, 254), } def __init__(self): self.workbook = xlrd.open_workbook(ANSWER_KEY_PATH) self.sheets = {} self.sheets['Sim Period'] = self.workbook.sheet_by_name('Sim Period') self.sheets['Sim Cumulative'] = self.workbook.sheet_by_name( 'Sim Cumulative') self.sheets['s_p'] = self.workbook.sheet_by_name('s_p') for name, index in self.INDEXES.items(): if isinstance(index, dict): subvalues = {} for subkey, subindex in index.items(): subvalues[subkey] = self.get_values(subindex) setattr(self, name, subvalues) else: setattr(self, name, self.get_values(index)) def parse_date_value(self, value): return xlrd.xldate_as_tuple(value, 0) def parse_float_value(self, value): return value if value != '' else np.nan def get_raw_values(self, data_index): return self.sheets[data_index.sheet_name].col_values( data_index.col_index, data_index.row_start_index, data_index.row_end_index + 1) @property def value_type_to_value_func(self): return { 'float': self.parse_float_value, 'date': self.parse_date_value, } def get_values(self, data_index): value_parser = self.value_type_to_value_func[data_index.value_type] return [value for value in map(value_parser, self.get_raw_values(data_index))] ANSWER_KEY = AnswerKey() BENCHMARK_DATES = ANSWER_KEY.BENCHMARK['Dates'] BENCHMARK_RETURNS = ANSWER_KEY.BENCHMARK['Returns'] DATES = [datetime.datetime(*x, tzinfo=pytz.UTC) for x in BENCHMARK_DATES] BENCHMARK = pd.Series(dict(zip(DATES, BENCHMARK_RETURNS))) ALGORITHM_RETURNS = pd.Series( dict(zip(DATES, ANSWER_KEY.ALGORITHM_RETURN_VALUES))) RETURNS_DATA = pd.DataFrame({'Benchmark Returns': BENCHMARK, 'Algorithm Returns': ALGORITHM_RETURNS}) RISK_CUMULATIVE = pd.DataFrame({ 'volatility': pd.Series(dict(zip( DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_VOLATILITY))), 'sharpe': pd.Series(dict(zip( DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_SHARPE))), 'downside_risk': pd.Series(dict(zip( DATES, ANSWER_KEY.CUMULATIVE_DOWNSIDE_RISK))), 'sortino': pd.Series(dict(zip( DATES, ANSWER_KEY.CUMULATIVE_SORTINO))), 'information': pd.Series(dict(zip( DATES, ANSWER_KEY.CUMULATIVE_INFORMATION))), 'alpha': pd.Series(dict(zip( DATES, ANSWER_KEY.CUMULATIVE_ALPHA))), 'beta': pd.Series(dict(zip( DATES, ANSWER_KEY.CUMULATIVE_BETA))), 'max_drawdown': pd.Series(dict(zip( DATES, ANSWER_KEY.CUMULATIVE_MAX_DRAWDOWN))), })
apache-2.0
carljm/django
tests/extra_regress/tests.py
8
16055
from __future__ import unicode_literals import datetime from collections import OrderedDict from django.contrib.auth.models import User from django.test import TestCase from .models import Order, RevisionableModel, TestObject class ExtraRegressTests(TestCase): def setUp(self): self.u = User.objects.create_user( username="fred", password="secret", email="fred@example.com" ) def test_regression_7314_7372(self): """ Regression tests for #7314 and #7372 """ rm = RevisionableModel.objects.create( title='First Revision', when=datetime.datetime(2008, 9, 28, 10, 30, 0) ) self.assertEqual(rm.pk, rm.base.pk) rm2 = rm.new_revision() rm2.title = "Second Revision" rm.when = datetime.datetime(2008, 9, 28, 14, 25, 0) rm2.save() self.assertEqual(rm2.title, 'Second Revision') self.assertEqual(rm2.base.title, 'First Revision') self.assertNotEqual(rm2.pk, rm.pk) self.assertEqual(rm2.base.pk, rm.pk) # Queryset to match most recent revision: qs = RevisionableModel.objects.extra( where=["%(table)s.id IN (SELECT MAX(rev.id) FROM %(table)s rev GROUP BY rev.base_id)" % { 'table': RevisionableModel._meta.db_table, }] ) self.assertQuerysetEqual( qs, [('Second Revision', 'First Revision')], transform=lambda r: (r.title, r.base.title) ) # Queryset to search for string in title: qs2 = RevisionableModel.objects.filter(title__contains="Revision") self.assertQuerysetEqual( qs2, [ ('First Revision', 'First Revision'), ('Second Revision', 'First Revision'), ], transform=lambda r: (r.title, r.base.title), ordered=False ) # Following queryset should return the most recent revision: self.assertQuerysetEqual( qs & qs2, [('Second Revision', 'First Revision')], transform=lambda r: (r.title, r.base.title), ordered=False ) def test_extra_stay_tied(self): # Extra select parameters should stay tied to their corresponding # select portions. Applies when portions are updated or otherwise # moved around. qs = User.objects.extra( select=OrderedDict((("alpha", "%s"), ("beta", "2"), ("gamma", "%s"))), select_params=(1, 3) ) qs = qs.extra(select={"beta": 4}) qs = qs.extra(select={"alpha": "%s"}, select_params=[5]) self.assertEqual( list(qs.filter(id=self.u.id).values('alpha', 'beta', 'gamma')), [{'alpha': 5, 'beta': 4, 'gamma': 3}] ) def test_regression_7957(self): """ Regression test for #7957: Combining extra() calls should leave the corresponding parameters associated with the right extra() bit. I.e. internal dictionary must remain sorted. """ self.assertEqual( (User.objects .extra(select={"alpha": "%s"}, select_params=(1,)) .extra(select={"beta": "%s"}, select_params=(2,))[0].alpha), 1 ) self.assertEqual( (User.objects .extra(select={"beta": "%s"}, select_params=(1,)) .extra(select={"alpha": "%s"}, select_params=(2,))[0].alpha), 2 ) def test_regression_7961(self): """ Regression test for #7961: When not using a portion of an extra(...) in a query, remove any corresponding parameters from the query as well. """ self.assertEqual( list(User.objects.extra(select={"alpha": "%s"}, select_params=(-6,)) .filter(id=self.u.id).values_list('id', flat=True)), [self.u.id] ) def test_regression_8063(self): """ Regression test for #8063: limiting a query shouldn't discard any extra() bits. """ qs = User.objects.all().extra(where=['id=%s'], params=[self.u.id]) self.assertQuerysetEqual(qs, ['<User: fred>']) self.assertQuerysetEqual(qs[:1], ['<User: fred>']) def test_regression_8039(self): """ Regression test for #8039: Ordering sometimes removed relevant tables from extra(). This test is the critical case: ordering uses a table, but then removes the reference because of an optimization. The table should still be present because of the extra() call. """ self.assertQuerysetEqual( (Order.objects .extra(where=["username=%s"], params=["fred"], tables=["auth_user"]) .order_by('created_by')), [] ) def test_regression_8819(self): """ Regression test for #8819: Fields in the extra(select=...) list should be available to extra(order_by=...). """ self.assertQuerysetEqual( User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}).distinct(), ['<User: fred>'] ) self.assertQuerysetEqual( User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']), ['<User: fred>'] ) self.assertQuerysetEqual( User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']).distinct(), ['<User: fred>'] ) def test_dates_query(self): """ When calling the dates() method on a queryset with extra selection columns, we can (and should) ignore those columns. They don't change the result and cause incorrect SQL to be produced otherwise. """ RevisionableModel.objects.create( title='First Revision', when=datetime.datetime(2008, 9, 28, 10, 30, 0) ) self.assertSequenceEqual( RevisionableModel.objects.extra(select={"the_answer": 'id'}).datetimes('when', 'month'), [datetime.datetime(2008, 9, 1, 0, 0)], ) def test_values_with_extra(self): """ Regression test for #10256... If there is a values() clause, Extra columns are only returned if they are explicitly mentioned. """ obj = TestObject(first='first', second='second', third='third') obj.save() self.assertEqual( list( TestObject.objects .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) .values() ), [{ 'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first', 'id': obj.pk, 'first': 'first' }] ) # Extra clauses after an empty values clause are still included self.assertEqual( list( TestObject.objects .values() .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) ), [{ 'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first', 'id': obj.pk, 'first': 'first' }] ) # Extra columns are ignored if not mentioned in the values() clause self.assertEqual( list( TestObject.objects .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) .values('first', 'second') ), [{'second': 'second', 'first': 'first'}] ) # Extra columns after a non-empty values() clause are ignored self.assertEqual( list( TestObject.objects .values('first', 'second') .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) ), [{'second': 'second', 'first': 'first'}] ) # Extra columns can be partially returned self.assertEqual( list( TestObject.objects .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) .values('first', 'second', 'foo') ), [{'second': 'second', 'foo': 'first', 'first': 'first'}] ) # Also works if only extra columns are included self.assertEqual( list( TestObject.objects .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) .values('foo', 'whiz') ), [{'foo': 'first', 'whiz': 'third'}] ) # Values list works the same way # All columns are returned for an empty values_list() self.assertEqual( list( TestObject.objects .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) .values_list() ), [('first', 'second', 'third', obj.pk, 'first', 'second', 'third')] ) # Extra columns after an empty values_list() are still included self.assertEqual( list( TestObject.objects .values_list() .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) ), [('first', 'second', 'third', obj.pk, 'first', 'second', 'third')] ) # Extra columns ignored completely if not mentioned in values_list() self.assertEqual( list( TestObject.objects .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) .values_list('first', 'second') ), [('first', 'second')] ) # Extra columns after a non-empty values_list() clause are ignored completely self.assertEqual( list( TestObject.objects .values_list('first', 'second') .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) ), [('first', 'second')] ) self.assertEqual( list( TestObject.objects .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) .values_list('second', flat=True) ), ['second'] ) # Only the extra columns specified in the values_list() are returned self.assertEqual( list( TestObject.objects .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) .values_list('first', 'second', 'whiz') ), [('first', 'second', 'third')] ) # ...also works if only extra columns are included self.assertEqual( list( TestObject.objects .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) .values_list('foo', 'whiz') ), [('first', 'third')] ) self.assertEqual( list( TestObject.objects .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) .values_list('whiz', flat=True) ), ['third'] ) # ... and values are returned in the order they are specified self.assertEqual( list( TestObject.objects .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) .values_list('whiz', 'foo') ), [('third', 'first')] ) self.assertEqual( list( TestObject.objects .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) .values_list('first', 'id') ), [('first', obj.pk)] ) self.assertEqual( list( TestObject.objects .extra(select=OrderedDict((('foo', 'first'), ('bar', 'second'), ('whiz', 'third')))) .values_list('whiz', 'first', 'bar', 'id') ), [('third', 'first', 'second', obj.pk)] ) def test_regression_10847(self): """ Regression for #10847: the list of extra columns can always be accurately evaluated. Using an inner query ensures that as_sql() is producing correct output without requiring full evaluation and execution of the inner query. """ obj = TestObject(first='first', second='second', third='third') obj.save() self.assertEqual( list(TestObject.objects.extra(select={'extra': 1}).values('pk')), [{'pk': obj.pk}] ) self.assertQuerysetEqual( TestObject.objects.filter( pk__in=TestObject.objects.extra(select={'extra': 1}).values('pk') ), ['<TestObject: TestObject: first,second,third>'] ) self.assertEqual( list(TestObject.objects.values('pk').extra(select={'extra': 1})), [{'pk': obj.pk}] ) self.assertQuerysetEqual( TestObject.objects.filter( pk__in=TestObject.objects.values('pk').extra(select={'extra': 1}) ), ['<TestObject: TestObject: first,second,third>'] ) self.assertQuerysetEqual( TestObject.objects.filter(pk=obj.pk) | TestObject.objects.extra(where=["id > %s"], params=[obj.pk]), ['<TestObject: TestObject: first,second,third>'] ) def test_regression_17877(self): """ Ensure that extra WHERE clauses get correctly ANDed, even when they contain OR operations. """ # Test Case 1: should appear in queryset. t = TestObject(first='a', second='a', third='a') t.save() # Test Case 2: should appear in queryset. t = TestObject(first='b', second='a', third='a') t.save() # Test Case 3: should not appear in queryset, bug case. t = TestObject(first='a', second='a', third='b') t.save() # Test Case 4: should not appear in queryset. t = TestObject(first='b', second='a', third='b') t.save() # Test Case 5: should not appear in queryset. t = TestObject(first='b', second='b', third='a') t.save() # Test Case 6: should not appear in queryset, bug case. t = TestObject(first='a', second='b', third='b') t.save() self.assertQuerysetEqual( TestObject.objects.extra( where=["first = 'a' OR second = 'a'", "third = 'a'"], ), ['<TestObject: TestObject: a,a,a>', '<TestObject: TestObject: b,a,a>'], ordered=False ) def test_extra_values_distinct_ordering(self): t1 = TestObject.objects.create(first='a', second='a', third='a') t2 = TestObject.objects.create(first='a', second='b', third='b') qs = TestObject.objects.extra( select={'second_extra': 'second'} ).values_list('id', flat=True).distinct() self.assertSequenceEqual(qs.order_by('second_extra'), [t1.pk, t2.pk]) self.assertSequenceEqual(qs.order_by('-second_extra'), [t2.pk, t1.pk]) # Note: the extra ordering must appear in select clause, so we get two # non-distinct results here (this is on purpose, see #7070). self.assertSequenceEqual(qs.order_by('-second_extra').values_list('first', flat=True), ['a', 'a'])
bsd-3-clause
facelessuser/Pywin32
lib/x32/win32comext/axdebug/util.py
11
3672
# Utility function for wrapping objects. Centralising allows me to turn # debugging on and off for the entire package in a single spot. import sys import win32com.server.util from win32com.server.exception import Exception import winerror import win32api import os try: os.environ["DEBUG_AXDEBUG"] debugging = 1 except KeyError: debugging = 0 def trace(*args): if not debugging: return print(str(win32api.GetCurrentThreadId()) + ":", end=' ') for arg in args: print(arg, end=' ') print() # The AXDebugging implementation assumes that the returned COM pointers are in # some cases identical. Eg, from a C++ perspective: # p->GetSomeInterface( &p1 ); # p->GetSomeInterface( &p2 ); # p1==p2 # By default, this is _not_ true for Python. # (Now this is only true for Document objects, and Python # now does ensure this. all_wrapped = {} def _wrap_nodebug(object, iid): return win32com.server.util.wrap(object, iid) def _wrap_debug(object, iid): import win32com.server.policy dispatcher = win32com.server.policy.DispatcherWin32trace return win32com.server.util.wrap(object, iid, useDispatcher = dispatcher) if debugging: _wrap = _wrap_debug else: _wrap = _wrap_nodebug def _wrap_remove(object, iid = None): # Old - no longer used or necessary! return def _dump_wrapped(): from win32com.server.util import unwrap print("Wrapped items:") for key, items in all_wrapped.items(): print(key, end=' ') try: ob = unwrap(key) print(ob, sys.getrefcount(ob)) except: print("<error>") def RaiseNotImpl(who = None): if who is not None: print("********* Function %s Raising E_NOTIMPL ************" % (who)) # Print a sort-of "traceback", dumping all the frames leading to here. try: 1/0 except: frame = sys.exc_info()[2].tb_frame while frame: print("File: %s, Line: %d" % (frame.f_code.co_filename, frame.f_lineno)) frame = frame.f_back # and raise the exception for COM raise Exception(scode=winerror.E_NOTIMPL) import win32com.server.policy class Dispatcher(win32com.server.policy.DispatcherWin32trace): def __init__(self, policyClass, object): win32com.server.policy.DispatcherTrace.__init__(self, policyClass, object) import win32traceutil # Sets up everything. # print "Object with win32trace dispatcher created (object=%s)" % `object` def _QueryInterface_(self, iid): rc = win32com.server.policy.DispatcherBase._QueryInterface_(self, iid) # if not rc: # self._trace_("in _QueryInterface_ with unsupported IID %s (%s)\n" % (IIDToInterfaceName(iid),iid)) return rc def _Invoke_(self, dispid, lcid, wFlags, args): print("In Invoke with", dispid, lcid, wFlags, args, "with object",self.policy._obj_) try: rc = win32com.server.policy.DispatcherBase._Invoke_(self, dispid, lcid, wFlags, args) # print "Invoke of", dispid, "returning", rc return rc except Exception: t, v, tb = sys.exc_info() tb = None # A cycle scode = v.scode try: desc = " (" + str(v.description) + ")" except AttributeError: desc = "" print("*** Invoke of %s raised COM exception 0x%x%s" % (dispid, scode, desc)) except: print("*** Invoke of %s failed:" % dispid) typ, val, tb = sys.exc_info() import traceback traceback.print_exception(typ, val, tb) raise
bsd-3-clause
ioram7/keystone-federado-pgid2013
build/paste/build/lib.linux-x86_64-2.7/paste/auth/digest.py
12
8174
# (c) 2005 Clark C. Evans # This module is part of the Python Paste Project and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php # This code was written with funding by http://prometheusresearch.com """ Digest HTTP/1.1 Authentication This module implements ``Digest`` authentication as described by RFC 2617 [1]_ . Basically, you just put this module before your application, and it takes care of requesting and handling authentication requests. This module has been tested with several common browsers "out-in-the-wild". >>> from paste.wsgilib import dump_environ >>> from paste.httpserver import serve >>> # from paste.auth.digest import digest_password, AuthDigestHandler >>> realm = 'Test Realm' >>> def authfunc(environ, realm, username): ... return digest_password(realm, username, username) >>> serve(AuthDigestHandler(dump_environ, realm, authfunc)) serving on... This code has not been audited by a security expert, please use with caution (or better yet, report security holes). At this time, this implementation does not provide for further challenges, nor does it support Authentication-Info header. It also uses md5, and an option to use sha would be a good thing. .. [1] http://www.faqs.org/rfcs/rfc2617.html """ from paste.httpexceptions import HTTPUnauthorized from paste.httpheaders import * try: from hashlib import md5 except ImportError: from md5 import md5 import time, random from urllib import quote as url_quote def digest_password(realm, username, password): """ construct the appropriate hashcode needed for HTTP digest """ return md5("%s:%s:%s" % (username, realm, password)).hexdigest() class AuthDigestAuthenticator(object): """ implementation of RFC 2617 - HTTP Digest Authentication """ def __init__(self, realm, authfunc): self.nonce = {} # list to prevent replay attacks self.authfunc = authfunc self.realm = realm def build_authentication(self, stale = ''): """ builds the authentication error """ nonce = md5( "%s:%s" % (time.time(), random.random())).hexdigest() opaque = md5( "%s:%s" % (time.time(), random.random())).hexdigest() self.nonce[nonce] = None parts = {'realm': self.realm, 'qop': 'auth', 'nonce': nonce, 'opaque': opaque } if stale: parts['stale'] = 'true' head = ", ".join(['%s="%s"' % (k, v) for (k, v) in parts.items()]) head = [("WWW-Authenticate", 'Digest %s' % head)] return HTTPUnauthorized(headers=head) def compute(self, ha1, username, response, method, path, nonce, nc, cnonce, qop): """ computes the authentication, raises error if unsuccessful """ if not ha1: return self.build_authentication() ha2 = md5('%s:%s' % (method, path)).hexdigest() if qop: chk = "%s:%s:%s:%s:%s:%s" % (ha1, nonce, nc, cnonce, qop, ha2) else: chk = "%s:%s:%s" % (ha1, nonce, ha2) if response != md5(chk).hexdigest(): if nonce in self.nonce: del self.nonce[nonce] return self.build_authentication() pnc = self.nonce.get(nonce,'00000000') if nc <= pnc: if nonce in self.nonce: del self.nonce[nonce] return self.build_authentication(stale = True) self.nonce[nonce] = nc return username def authenticate(self, environ): """ This function takes a WSGI environment and authenticates the request returning authenticated user or error. """ method = REQUEST_METHOD(environ) fullpath = url_quote(SCRIPT_NAME(environ)) + url_quote(PATH_INFO(environ)) authorization = AUTHORIZATION(environ) if not authorization: return self.build_authentication() (authmeth, auth) = authorization.split(" ", 1) if 'digest' != authmeth.lower(): return self.build_authentication() amap = {} for itm in auth.split(", "): (k,v) = [s.strip() for s in itm.split("=", 1)] amap[k] = v.replace('"', '') try: username = amap['username'] authpath = amap['uri'] nonce = amap['nonce'] realm = amap['realm'] response = amap['response'] assert authpath.split("?", 1)[0] in fullpath assert realm == self.realm qop = amap.get('qop', '') cnonce = amap.get('cnonce', '') nc = amap.get('nc', '00000000') if qop: assert 'auth' == qop assert nonce and nc except: return self.build_authentication() ha1 = self.authfunc(environ, realm, username) return self.compute(ha1, username, response, method, authpath, nonce, nc, cnonce, qop) __call__ = authenticate class AuthDigestHandler(object): """ middleware for HTTP Digest authentication (RFC 2617) This component follows the procedure below: 0. If the REMOTE_USER environment variable is already populated; then this middleware is a no-op, and the request is passed along to the application. 1. If the HTTP_AUTHORIZATION header was not provided or specifies an algorithem other than ``digest``, then a HTTPUnauthorized response is generated with the challenge. 2. If the response is malformed or or if the user's credientials do not pass muster, another HTTPUnauthorized is raised. 3. If all goes well, and the user's credintials pass; then REMOTE_USER environment variable is filled in and the AUTH_TYPE is listed as 'digest'. Parameters: ``application`` The application object is called only upon successful authentication, and can assume ``environ['REMOTE_USER']`` is set. If the ``REMOTE_USER`` is already set, this middleware is simply pass-through. ``realm`` This is a identifier for the authority that is requesting authorization. It is shown to the user and should be unique within the domain it is being used. ``authfunc`` This is a callback function which performs the actual authentication; the signature of this callback is: authfunc(environ, realm, username) -> hashcode This module provides a 'digest_password' helper function which can help construct the hashcode; it is recommended that the hashcode is stored in a database, not the user's actual password (since you only need the hashcode). """ def __init__(self, application, realm, authfunc): self.authenticate = AuthDigestAuthenticator(realm, authfunc) self.application = application def __call__(self, environ, start_response): username = REMOTE_USER(environ) if not username: result = self.authenticate(environ) if isinstance(result, str): AUTH_TYPE.update(environ,'digest') REMOTE_USER.update(environ, result) else: return result.wsgi_application(environ, start_response) return self.application(environ, start_response) middleware = AuthDigestHandler __all__ = ['digest_password', 'AuthDigestHandler' ] def make_digest(app, global_conf, realm, authfunc, **kw): """ Grant access via digest authentication Config looks like this:: [filter:grant] use = egg:Paste#auth_digest realm=myrealm authfunc=somepackage.somemodule:somefunction """ from paste.util.import_string import eval_import import types authfunc = eval_import(authfunc) assert isinstance(authfunc, types.FunctionType), "authfunc must resolve to a function" return AuthDigestHandler(app, realm, authfunc) if "__main__" == __name__: import doctest doctest.testmod(optionflags=doctest.ELLIPSIS)
apache-2.0
pdehaye/theming-edx-platform
i18n/tests/test_validate.py
16
1119
import os, sys, logging from unittest import TestCase from nose.plugins.skip import SkipTest from config import LOCALE_DIR from execute import call def test_po_files(root=LOCALE_DIR): """ This is a generator. It yields all of the .po files under root, and tests each one. """ log = logging.getLogger(__name__) logging.basicConfig(stream=sys.stdout, level=logging.INFO) for (dirpath, dirnames, filenames) in os.walk(root): for name in filenames: (base, ext) = os.path.splitext(name) if ext.lower() == '.po': yield validate_po_file, os.path.join(dirpath, name), log def validate_po_file(filename, log): """ Call GNU msgfmt -c on each .po file to validate its format. Any errors caught by msgfmt are logged to log. """ # Skip this test for now because it's very noisy raise SkipTest() # Use relative paths to make output less noisy. rfile = os.path.relpath(filename, LOCALE_DIR) (out, err) = call(['msgfmt','-c', rfile], working_directory=LOCALE_DIR) if err != '': log.warn('\n'+err)
agpl-3.0
meisterkleister/erpnext
erpnext/patches/v5_1/fix_against_account.py
107
1271
from __future__ import unicode_literals import frappe from erpnext.accounts.doctype.gl_entry.gl_entry import update_against_account def execute(): from_date = "2015-05-01" for doc in frappe.get_all("Journal Entry", filters={"creation": (">", from_date), "docstatus": "1"}): # update in gl_entry update_against_account("Journal Entry", doc.name) # update in jv doc = frappe.get_doc("Journal Entry", doc.name) doc.set_against_account() doc.db_update() for doc in frappe.get_all("Sales Invoice", filters={"creation": (">", from_date), "docstatus": "1"}, fields=["name", "customer"]): frappe.db.sql("""update `tabGL Entry` set against=%s where voucher_type='Sales Invoice' and voucher_no=%s and credit > 0 and ifnull(party, '')=''""", (doc.customer, doc.name)) for doc in frappe.get_all("Purchase Invoice", filters={"creation": (">", from_date), "docstatus": "1"}, fields=["name", "supplier"]): frappe.db.sql("""update `tabGL Entry` set against=%s where voucher_type='Purchase Invoice' and voucher_no=%s and debit > 0 and ifnull(party, '')=''""", (doc.supplier, doc.name))
agpl-3.0
lanselin/pysal
pysal/core/IOHandlers/arcgis_dbf.py
20
6913
import pysal import os.path import pysal.core.FileIO as FileIO from pysal.weights import W from pysal.weights.util import remap_ids from warnings import warn __author__ = "Myunghwa Hwang <mhwang4@gmail.com>" __all__ = ["ArcGISDbfIO"] class ArcGISDbfIO(FileIO.FileIO): """ Opens, reads, and writes weights file objects in ArcGIS dbf format. Spatial weights objects in the ArcGIS dbf format are used in ArcGIS Spatial Statistics tools. This format is the same as the general dbf format, but the structure of the weights dbf file is fixed unlike other dbf files. This dbf format can be used with the "Generate Spatial Weights Matrix" tool, but not with the tools under the "Mapping Clusters" category. The ArcGIS dbf file is assumed to have three or four data columns. When the file has four columns, the first column is meaningless and will be ignored in PySAL during both file reading and file writing. The next three columns hold origin IDs, destinations IDs, and weight values. When the file has three columns, it is assumed that only these data columns exist in the stated order. The name for the orgin IDs column should be the name of ID variable in the original source data table. The names for the destination IDs and weight values columns are NID and WEIGHT, respectively. ArcGIS Spatial Statistics tools support only unique integer IDs. Therefore, the values for origin and destination ID columns should be integer. For the case where the IDs of a weights object are not integers, ArcGISDbfIO allows users to use internal id values corresponding to record numbers, instead of original ids. An exemplary structure of an ArcGIS dbf file is as follows: [Line 1] Field1 RECORD_ID NID WEIGHT [Line 2] 0 72 76 1 [Line 3] 0 72 79 1 [Line 4] 0 72 78 1 ... Unlike the ArcGIS text format, this format does not seem to include self-neighbors. References ---------- http://webhelp.esri.com/arcgisdesktop/9.3/index.cfm?TopicName=Convert_Spatial_Weights_Matrix_to_Table_(Spatial_Statistics) """ FORMATS = ['arcgis_dbf'] MODES = ['r', 'w'] def __init__(self, *args, **kwargs): self._varName = 'Unknown' args = args[:2] FileIO.FileIO.__init__(self, *args, **kwargs) self.file = pysal.open(self.dataPath, self.mode) def _set_varName(self, val): if issubclass(type(val), basestring): self._varName = val def _get_varName(self): return self._varName varName = property(fget=_get_varName, fset=_set_varName) def read(self, n=-1): self._complain_ifclosed(self.closed) return self._read() def seek(self, pos): self.file.seek(pos) self.pos = self.file.pos def _read(self): """Reads ArcGIS dbf file Returns a pysal.weights.weights.W object Examples -------- Type 'dir(w)' at the interpreter to see what methods are supported. Open an ArcGIS dbf file and read it into a pysal weights object >>> w = pysal.open(pysal.examples.get_path('arcgis_ohio.dbf'),'r','arcgis_dbf').read() Get the number of observations from the header >>> w.n 88 Get the mean number of neighbors >>> w.mean_neighbors 5.25 Get neighbor distances for a single observation >>> w[1] {2: 1.0, 11: 1.0, 6: 1.0, 7: 1.0} """ if self.pos > 0: raise StopIteration id_var = self.file.header[1] startPos = len(self.file.header) if startPos == 3: startPos = 0 elif startPos == 4: startPos = 1 else: raise ValueError("Wrong structure, a weights dbf file requires at least three data columns") self.varName = id_var id_type = int id_spec = self.file.field_spec[startPos] if id_spec[0] != 'N': raise TypeError('The data type for ids should be integer.') self.id_var = id_var weights = {} neighbors = {} for row in self.file: i, j, w = tuple(row)[startPos:] i = id_type(i) j = id_type(j) w = float(w) if i not in weights: weights[i] = [] neighbors[i] = [] weights[i].append(w) neighbors[i].append(j) self.pos = self.file.pos return W(neighbors, weights) def write(self, obj, useIdIndex=False): """ Parameters ---------- .write(weightsObject) accepts a weights object Returns ------ an ArcGIS dbf file write a weights object to the opened dbf file. Examples -------- >>> import tempfile, pysal, os >>> testfile = pysal.open(pysal.examples.get_path('arcgis_ohio.dbf'),'r','arcgis_dbf') >>> w = testfile.read() Create a temporary file for this example >>> f = tempfile.NamedTemporaryFile(suffix='.dbf') Reassign to new var >>> fname = f.name Close the temporary named file >>> f.close() Open the new file in write mode >>> o = pysal.open(fname,'w','arcgis_dbf') Write the Weights object into the open file >>> o.write(w) >>> o.close() Read in the newly created text file >>> wnew = pysal.open(fname,'r','arcgis_dbf').read() Compare values from old to new >>> wnew.pct_nonzero == w.pct_nonzero True Clean up temporary file created for this example >>> os.remove(fname) """ self._complain_ifclosed(self.closed) if issubclass(type(obj), W): self.file.header = [self.varName, 'NID', 'WEIGHT'] id_type = type(obj.id_order[0]) if id_type is not int and not useIdIndex: raise TypeError("ArcGIS DBF weight files support only integer IDs") if useIdIndex: id2i = obj.id2i obj = remap_ids(obj, id2i) id_spec = ('N', len(str(max(obj.id_order))), 0) self.file.field_spec = [id_spec, id_spec, ('N', 13, 6)] for id in obj.id_order: neighbors = zip(obj.neighbors[id], obj.weights[id]) for neighbor, weight in neighbors: self.file.write([id, neighbor, weight]) self.pos = self.file.pos else: raise TypeError("Expected a pysal weights object, got: %s" % ( type(obj))) def flush(self): self._complain_ifclosed(self.closed) self.file.flush() def close(self): self.file.close()
bsd-3-clause
TNosredna/CouchPotatoServer
libs/pyutil/assertutil.py
106
2753
# Copyright (c) 2003-2009 Zooko Wilcox-O'Hearn # This file is part of pyutil; see README.rst for licensing terms. """ Tests useful in assertion checking, prints out nicely formated messages too. """ from humanreadable import hr def _assert(___cond=False, *___args, **___kwargs): if ___cond: return True msgbuf=[] if ___args: msgbuf.append("%s %s" % tuple(map(hr, (___args[0], type(___args[0]),)))) msgbuf.extend([", %s %s" % tuple(map(hr, (arg, type(arg),))) for arg in ___args[1:]]) if ___kwargs: msgbuf.append(", %s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),))))) else: if ___kwargs: msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),))))) msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]]) raise AssertionError, "".join(msgbuf) def precondition(___cond=False, *___args, **___kwargs): if ___cond: return True msgbuf=["precondition", ] if ___args or ___kwargs: msgbuf.append(": ") if ___args: msgbuf.append("%s %s" % tuple(map(hr, (___args[0], type(___args[0]),)))) msgbuf.extend([", %s %s" % tuple(map(hr, (arg, type(arg),))) for arg in ___args[1:]]) if ___kwargs: msgbuf.append(", %s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),))))) else: if ___kwargs: msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),))))) msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]]) raise AssertionError, "".join(msgbuf) def postcondition(___cond=False, *___args, **___kwargs): if ___cond: return True msgbuf=["postcondition", ] if ___args or ___kwargs: msgbuf.append(": ") if ___args: msgbuf.append("%s %s" % tuple(map(hr, (___args[0], type(___args[0]),)))) msgbuf.extend([", %s %s" % tuple(map(hr, (arg, type(arg),))) for arg in ___args[1:]]) if ___kwargs: msgbuf.append(", %s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),))))) else: if ___kwargs: msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),))))) msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]]) raise AssertionError, "".join(msgbuf)
gpl-3.0
jeffery9/mixprint_addons
base_action_rule/tests/__init__.py
178
1129
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import base_action_rule_test checks = [ base_action_rule_test, ] # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
liamgh/liamgreenhughes-sl4a-tf101
python/src/Lib/encodings/iso8859_13.py
593
13527
""" Python Character Mapping Codec iso8859_13 generated from 'MAPPINGS/ISO8859/8859-13.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-13', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\x80' # 0x80 -> <control> u'\x81' # 0x81 -> <control> u'\x82' # 0x82 -> <control> u'\x83' # 0x83 -> <control> u'\x84' # 0x84 -> <control> u'\x85' # 0x85 -> <control> u'\x86' # 0x86 -> <control> u'\x87' # 0x87 -> <control> u'\x88' # 0x88 -> <control> u'\x89' # 0x89 -> <control> u'\x8a' # 0x8A -> <control> u'\x8b' # 0x8B -> <control> u'\x8c' # 0x8C -> <control> u'\x8d' # 0x8D -> <control> u'\x8e' # 0x8E -> <control> u'\x8f' # 0x8F -> <control> u'\x90' # 0x90 -> <control> u'\x91' # 0x91 -> <control> u'\x92' # 0x92 -> <control> u'\x93' # 0x93 -> <control> u'\x94' # 0x94 -> <control> u'\x95' # 0x95 -> <control> u'\x96' # 0x96 -> <control> u'\x97' # 0x97 -> <control> u'\x98' # 0x98 -> <control> u'\x99' # 0x99 -> <control> u'\x9a' # 0x9A -> <control> u'\x9b' # 0x9B -> <control> u'\x9c' # 0x9C -> <control> u'\x9d' # 0x9D -> <control> u'\x9e' # 0x9E -> <control> u'\x9f' # 0x9F -> <control> u'\xa0' # 0xA0 -> NO-BREAK SPACE u'\u201d' # 0xA1 -> RIGHT DOUBLE QUOTATION MARK u'\xa2' # 0xA2 -> CENT SIGN u'\xa3' # 0xA3 -> POUND SIGN u'\xa4' # 0xA4 -> CURRENCY SIGN u'\u201e' # 0xA5 -> DOUBLE LOW-9 QUOTATION MARK u'\xa6' # 0xA6 -> BROKEN BAR u'\xa7' # 0xA7 -> SECTION SIGN u'\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE u'\xa9' # 0xA9 -> COPYRIGHT SIGN u'\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xac' # 0xAC -> NOT SIGN u'\xad' # 0xAD -> SOFT HYPHEN u'\xae' # 0xAE -> REGISTERED SIGN u'\xc6' # 0xAF -> LATIN CAPITAL LETTER AE u'\xb0' # 0xB0 -> DEGREE SIGN u'\xb1' # 0xB1 -> PLUS-MINUS SIGN u'\xb2' # 0xB2 -> SUPERSCRIPT TWO u'\xb3' # 0xB3 -> SUPERSCRIPT THREE u'\u201c' # 0xB4 -> LEFT DOUBLE QUOTATION MARK u'\xb5' # 0xB5 -> MICRO SIGN u'\xb6' # 0xB6 -> PILCROW SIGN u'\xb7' # 0xB7 -> MIDDLE DOT u'\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE u'\xb9' # 0xB9 -> SUPERSCRIPT ONE u'\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS u'\xe6' # 0xBF -> LATIN SMALL LETTER AE u'\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK u'\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK u'\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON u'\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE u'\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK u'\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE u'\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE u'\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE u'\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA u'\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA u'\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON u'\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA u'\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE u'\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE u'\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\xd7' # 0xD7 -> MULTIPLICATION SIGN u'\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK u'\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE u'\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE u'\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE u'\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German) u'\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK u'\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK u'\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON u'\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE u'\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK u'\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE u'\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE u'\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE u'\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA u'\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA u'\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON u'\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA u'\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE u'\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE u'\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS u'\xf7' # 0xF7 -> DIVISION SIGN u'\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK u'\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE u'\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE u'\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE u'\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON u'\u2019' # 0xFF -> RIGHT SINGLE QUOTATION MARK ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
apache-2.0
iAMr00t/opencog
examples/python/conceptual_blending/blend_with_interaction_information.py
22
9180
#! /usr/bin/env python # # blend_with_information_interaction.py # """ Example usage of Conceptual Blending API. Instantiates blender with a simple dataset stored in an AtomSpace and learns a new concept. For complete documentation on how to pass additional parameters to blender, refer to the documentation at the following link: https://github.com/opencog/opencog/tree/master/opencog/python/blending/blend-config-format.md """ __author__ = 'DongMin Kim' from opencog.atomspace import AtomSpace from opencog.utilities import initialize_opencog from opencog.type_constructors import * from opencog.logger import log from blending.blend import ConceptualBlending """ Fourth Example: - Blend with custom config. - Give focus atom manually. - Choose one link set which has largest interaction information, and connect them to new blend. - Typical 'bat' -> eat flies -> has claws -> has sonar -> live in cave -> subclass of 'winged' -> subclass of 'nocturnal' - Typical 'man' -> eat steak -> subclass of 'two-legged' -> subclass of 'fingered' - Subset of 'bat' -> funnel_eared megabat vesper -> night_stalker(Eats steak) -> Zubat(Pokemon) - Subset of 'man' -> scientist police programmer -> ancients(Lives in cave) -> vegetarian(Don't eats steak) Output dump: --------Start fourth example-------- ConnectConflictInteractionInformation: Calculating probabilities (Total: 10) ConnectConflictInteractionInformation:PROB:: 1/10 (10.0%) ConnectConflictInteractionInformation:PROB:: 4/10 (40.0%) ConnectConflictInteractionInformation:PROB:: 7/10 (70.0%) ConnectConflictInteractionInformation: Calculating interaction information (Total: 512) ConnectConflictInteractionInformation:II:: 1/512 (0.1953125%) ConnectConflictInteractionInformation:II:: 129/512 (25.1953125%) ConnectConflictInteractionInformation:II:: 257/512 (50.1953125%) ConnectConflictInteractionInformation:II:: 385/512 (75.1953125%) # Link set that has largest interaction information value: sonar, nocturnal, winged, fingered, cave, steak, claws, flies, two-legged, : 1.11336374283 ... # For example, an entity that has links: # -> (eat steak) # -> (eat flies) AND (has sonar) AND (has claws) AND (winged) AND (fingered) AND (two-legged) # # is more surprising than the entity that has: # -> (live_in cave) # -> (eat flies) AND (has sonar) AND (has claws) AND (winged) AND (fingered) AND (two-legged) # sonar, winged, fingered, steak, claws, flies, two-legged, : 1.0826215744 sonar, winged, fingered, cave, claws, flies, two-legged, : 1.07536005974 ... nocturnal, winged, fingered, cave, steak, claws, flies, two-legged, : 1.06969738007 sonar, nocturnal, winged, fingered, cave, claws, flies, two-legged, : 1.06638371944 ... Newly blended node: (ConceptNode "bat-man") ; [443] Links in new blended node: [(InheritanceLink (av 0 0 0) (stv 1.000000 0.000000) (ConceptNode "bat-man" (av 0 0 0) (stv 1.000000 0.000000)) ; [443] (ConceptNode "man" (av 0 0 0) (stv 1.000000 0.000000)) ; [3] ) ; [658] ... , (EvaluationLink (av 0 0 0) (stv 0.700000 0.800000) (PredicateNode "eat" (av 0 0 0) (stv 1.000000 0.000000)) ; [4] (ConceptNode "bat-man" (av 0 0 0) (stv 1.000000 0.000000)) ; [443] (ConceptNode "steak" (av 0 0 0) (stv 1.000000 0.000000)) ; [5] ) ; [655] ] """ print "--------Start fourth example--------" # Interaction Information algorithm takes very long time - about O(n*(2^n)) - # if we not give any limit. If you want to check what is going now then you can # enable below logger option. # log.use_stdout(True) # log.set_level("DEBUG") a = AtomSpace() initialize_opencog(a) high_tv = TruthValue(0.7, 0.8) low_tv = TruthValue(0.3, 0.8) # 1. Define a super-class information bat = ConceptNode("bat") man = ConceptNode("man") eat = PredicateNode("eat") steak = ConceptNode("steak") flies = ConceptNode("flies") has = PredicateNode("has") claws = ConceptNode("claws") sonar = ConceptNode("sonar") live_in = PredicateNode("live_in") cave = ConceptNode("cave") pokeball = ConceptNode("pokeball") two_legged = ConceptNode("two-legged") fingered = ConceptNode("fingered") winged = ConceptNode("winged") huge_size = ConceptNode("huge_size") popular = ConceptNode("popular") nocturnal = ConceptNode("nocturnal") # Bat EvaluationLink(eat, bat, flies, high_tv) EvaluationLink(eat, bat, steak, low_tv) EvaluationLink(has, bat, claws, high_tv) EvaluationLink(has, bat, sonar, high_tv) EvaluationLink(live_in, bat, cave, high_tv) EvaluationLink(live_in, bat, pokeball, low_tv) InheritanceLink(bat, winged, high_tv) InheritanceLink(bat, nocturnal, high_tv) InheritanceLink(bat, two_legged, low_tv) InheritanceLink(bat, fingered, low_tv) InheritanceLink(bat, huge_size, low_tv) InheritanceLink(bat, popular, low_tv) # Man EvaluationLink(eat, man, flies, low_tv) EvaluationLink(eat, man, steak, high_tv) EvaluationLink(has, man, claws, low_tv) EvaluationLink(has, man, sonar, low_tv) EvaluationLink(live_in, man, cave, low_tv) EvaluationLink(live_in, man, pokeball, low_tv) InheritanceLink(man, winged, low_tv) InheritanceLink(man, nocturnal, low_tv) InheritanceLink(man, two_legged, high_tv) InheritanceLink(man, fingered, high_tv) InheritanceLink(man, huge_size, low_tv) InheritanceLink(man, popular, low_tv) # 2. Define sub-class information funnel_eared = ConceptNode("funnel_eared") megabat = ConceptNode("megabat") vesper = ConceptNode("vesper") night_stalker = ConceptNode("night_stalker") zubat = ConceptNode("zubat") scientist = ConceptNode("scientist") police = ConceptNode("police") programmer = ConceptNode("programmer") ancients = ConceptNode("ancients") vegetarian = ConceptNode("vegetarian") InheritanceLink(funnel_eared, bat) InheritanceLink(megabat, bat) InheritanceLink(vesper, bat) InheritanceLink(night_stalker, bat) InheritanceLink(zubat, bat) InheritanceLink(scientist, man) InheritanceLink(police, man) InheritanceLink(programmer, man) InheritanceLink(ancients, man) InheritanceLink(vegetarian, man) # 3. Describe about the several bats. EvaluationLink(eat, funnel_eared, flies, high_tv) EvaluationLink(has, funnel_eared, claws, high_tv) EvaluationLink(has, funnel_eared, sonar, high_tv) EvaluationLink(live_in, funnel_eared, cave, high_tv) InheritanceLink(funnel_eared, winged, high_tv) InheritanceLink(funnel_eared, nocturnal, high_tv) EvaluationLink(eat, megabat, flies, high_tv) EvaluationLink(has, megabat, claws, high_tv) EvaluationLink(has, megabat, sonar, high_tv) EvaluationLink(live_in, megabat, cave, high_tv) InheritanceLink(megabat, winged, high_tv) InheritanceLink(megabat, nocturnal, high_tv) InheritanceLink(megabat, huge_size, high_tv) EvaluationLink(eat, vesper, flies, high_tv) EvaluationLink(has, vesper, claws, high_tv) EvaluationLink(has, vesper, sonar, high_tv) EvaluationLink(live_in, vesper, cave, high_tv) InheritanceLink(vesper, winged, high_tv) InheritanceLink(vesper, nocturnal, high_tv) InheritanceLink(vesper, popular, high_tv) # Night Stalker eats meat too. EvaluationLink(eat, night_stalker, flies, high_tv) EvaluationLink(eat, night_stalker, steak, high_tv) EvaluationLink(has, night_stalker, claws, high_tv) EvaluationLink(has, night_stalker, sonar, high_tv) EvaluationLink(live_in, night_stalker, cave, high_tv) InheritanceLink(night_stalker, winged, high_tv) InheritanceLink(night_stalker, nocturnal, high_tv) # The Zubat(Pokemon) lives in pokeball. EvaluationLink(eat, zubat, steak, high_tv) EvaluationLink(has, zubat, claws, high_tv) EvaluationLink(has, zubat, sonar, high_tv) EvaluationLink(live_in, zubat, pokeball, high_tv) InheritanceLink(zubat, winged, high_tv) InheritanceLink(zubat, nocturnal, high_tv) InheritanceLink(zubat, popular, high_tv) # 4. Describe about the several men. EvaluationLink(eat, scientist, steak, high_tv) InheritanceLink(scientist, two_legged, high_tv) InheritanceLink(scientist, fingered, high_tv) EvaluationLink(eat, police, steak, high_tv) InheritanceLink(police, two_legged, high_tv) InheritanceLink(police, fingered, high_tv) EvaluationLink(eat, programmer, steak, high_tv) InheritanceLink(programmer, two_legged, high_tv) InheritanceLink(programmer, fingered, high_tv) InheritanceLink(programmer, nocturnal, high_tv) EvaluationLink(live_in, ancients, cave, high_tv) InheritanceLink(ancients, two_legged, high_tv) InheritanceLink(ancients, fingered, high_tv) InheritanceLink(vegetarian, two_legged, high_tv) InheritanceLink(vegetarian, fingered, high_tv) # 5. Make custom config. InheritanceLink( ConceptNode("my-config"), ConceptNode("BLEND") ) ExecutionLink( SchemaNode("BLEND:blending-decider"), ConceptNode("my-config"), ConceptNode("DecideNull") ) ExecutionLink( SchemaNode("BLEND:link-connector"), ConceptNode("my-config"), ConceptNode("ConnectConflictInteractionInformation") ) ExecutionLink( SchemaNode("BLEND:connect-check-type"), ConceptNode("my-config"), ConceptNode("Link") ) # Start Conceptual Blending. result = ConceptualBlending(a).run( [ a.add_node(types.ConceptNode, "bat"), a.add_node(types.ConceptNode, "man") ], ConceptNode("my-config") ) print "Newly blended node:" print str(result[0]) + "\n" print "Links in new blended node:" print result[0].incoming
agpl-3.0
fauskanger/Pretreat
app/classes/graph/path.py
1
1741
from app.config import config from app.pythomas import shapes as shapelib from app.pythomas import pythomas as lib class Path: def __init__(self, path_nodes): path_nodes = None if path_nodes == [None] else path_nodes self.nodes = [] if not path_nodes else path_nodes self.complete = False if path_nodes: self.complete = True def __add__(self, other): if self.last() is other.first(): if len(other.nodes) > 1: return Path(self.nodes + other.nodes[1:]) return self.copy() else: return Path(self.nodes + other.nodes) def add_node(self, node, index=None): if node in self.nodes: return False if index is None: self.nodes.append(node) else: self.nodes.insert(index, node) return True def remove_node(self, node): return lib.try_remove(self.nodes, node) def update(self, dt): pass def draw(self, batch=None): pass def delete(self): self.nodes.clear() def get_edge_list(self): nodes = self.get_node_list() edges = [] for i in range(1, self.get_count()): edges.append((nodes[i-1], nodes[i])) return edges def first(self): if not self.nodes: return None return self.nodes[0] def last(self): if not self.nodes: return None return self.nodes[-1] def has_node(self, node): return node in self.get_node_list() def get_node_list(self): return self.nodes def get_count(self): return len(self.nodes) def copy(self): return Path(self.nodes)
gpl-2.0
jdobes/spacewalk
rel-eng/copr-fork.py
5
2000
#!/usr/bin/python from copr import CoprClient import argparse import json parser = argparse.ArgumentParser( description='Fork copr nightly repo after Spacewalk release branching') parser.add_argument('--force', action='store_true', default=False, help='force fork into an existing project') parser.add_argument('source_repo', nargs=1, help='name of copr repo to be forked' ' (e.g. @spacewalkproject/nightly-client)') parser.add_argument('destination_repo', nargs=1, help='name of newly created copr repo' ' (e.g. @spacewalkproject/spacewalk-2.7-client)') parser.add_argument('git_branch', nargs=1, help='git branch associated with new forked repo' ' (e.g. SPACEWALK-2.7)') opts = parser.parse_args() dest_owner, dest_project = opts.destination_repo[0].split('/',2) myclient = CoprClient.create_from_file_config() print("Forking project: %s -> %s" % (opts.source_repo[0], opts.destination_repo[0])) myclient.fork_project(source=opts.source_repo[0], username=dest_owner, projectname=dest_project, confirm=opts.force) result = myclient.get_packages_list(ownername=dest_owner, projectname=dest_project) for pkg in result.packages_list: print(" Updating package: %s" % pkg.data['name']) pkg_source = json.loads(pkg.data['source_json']) if pkg_source and pkg_source.get('git_url', None): myclient.edit_package_tito(package_name=pkg.data['name'], ownername=dest_owner, projectname=dest_project, git_url=pkg_source['git_url'], git_dir=pkg_source['git_dir'], git_branch=opts.git_branch[0], webhook_rebuild=pkg.data['webhook_rebuild']) else: print(" ERROR: package git url is missing")
gpl-2.0
goddino/libjingle
talk/site_scons/site_tools/talk_linux.py
22
11731
# Copyright 2010 Google Inc. # All Rights Reserved. # Author: tschmelcher@google.com (Tristan Schmelcher) """Tool for helpers used in linux building process.""" import os import SCons.Defaults import subprocess def _OutputFromShellCommand(command): process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) return process.communicate()[0].strip() # This is a pure SCons helper function. def _InternalBuildDebianPackage(env, debian_files, package_files, output_dir=None, force_version=None): """Creates build rules to build a Debian package from the specified sources. Args: env: SCons Environment. debian_files: Array of the Debian control file sources that should be copied into the package source tree, e.g., changelog, control, rules, etc. package_files: An array of 2-tuples listing the files that should be copied into the package source tree. The first element is the path where the file should be placed for the .install control file to find it, relative to the generated debian package source directory. The second element is the file source. output_dir: An optional directory to place the files in. If omitted, the current output directory is used. force_version: Optional. Forces the version of the package to start with this version string if specified. If the last entry in the changelog is not for a version that starts with this then a dummy entry is generated with this version and a ~prerelease suffix (so that the final version will compare as greater). Return: A list of the targets (if any). """ if 0 != subprocess.call(['which', 'dpkg-buildpackage']): print ('dpkg-buildpackage not installed on this system; ' 'skipping DEB build stage') return [] # Read the control file and changelog file to determine the package name, # version, and arch that the Debian build tools will use to name the # generated files. control_file = None changelog_file = None for file in debian_files: if os.path.basename(file) == 'control': control_file = env.File(file).srcnode().abspath elif os.path.basename(file) == 'changelog': changelog_file = env.File(file).srcnode().abspath if not control_file: raise Exception('Need to have a control file') if not changelog_file: raise Exception('Need to have a changelog file') source = _OutputFromShellCommand( "awk '/^Source:/ { print $2; }' " + control_file) packages = _OutputFromShellCommand( "awk '/^Package:/ { print $2; }' " + control_file).split('\n') version = _OutputFromShellCommand( "sed -nr '1 { s/.*\\((.*)\\).*/\\1/; p }' " + changelog_file) arch = _OutputFromShellCommand('dpkg --print-architecture') add_dummy_changelog_entry = False if force_version and not version.startswith(force_version): print ('Warning: no entry in ' + changelog_file + ' for version ' + force_version + ' (last is ' + version +'). A dummy entry will be ' + 'generated. Remember to add the real changelog entry before ' + 'releasing.') version = force_version + '~prerelease' add_dummy_changelog_entry = True source_dir_name = source + '_' + version + '_' + arch target_file_names = [ source_dir_name + '.changes' ] for package in packages: package_file_name = package + '_' + version + '_' + arch + '.deb' target_file_names.append(package_file_name) # The targets if output_dir: targets = [os.path.join(output_dir, s) for s in target_file_names] else: targets = target_file_names # Path to where we will construct the debian build tree. deb_build_tree = os.path.join(source_dir_name, 'deb_build_tree') # First copy the files. for file in package_files: env.Command(os.path.join(deb_build_tree, file[0]), file[1], SCons.Defaults.Copy('$TARGET', '$SOURCE')) env.Depends(targets, os.path.join(deb_build_tree, file[0])) # Now copy the Debian metadata sources. We have to do this all at once so # that we can remove the target directory before copying, because there # can't be any other stale files there or else dpkg-buildpackage may use # them and give incorrect build output. copied_debian_files_paths = [] for file in debian_files: copied_debian_files_paths.append(os.path.join(deb_build_tree, 'debian', os.path.basename(file))) copy_commands = [ """dir=$$(dirname $TARGET) && \ rm -Rf $$dir && \ mkdir -p $$dir && \ cp $SOURCES $$dir && \ chmod -R u+w $$dir""" ] if add_dummy_changelog_entry: copy_commands += [ """debchange -c $$(dirname $TARGET)/changelog --newversion %s \ --distribution UNRELEASED \ 'Developer preview build. (This entry was auto-generated.)'""" % version ] env.Command(copied_debian_files_paths, debian_files, copy_commands) env.Depends(targets, copied_debian_files_paths) # Must explicitly specify -a because otherwise cross-builds won't work. # Must explicitly specify -D because -a disables it. # Must explicitly specify fakeroot because old dpkg tools don't assume that. env.Command(targets, None, """dir=%(dir)s && \ cd $$dir && \ dpkg-buildpackage -b -uc -a%(arch)s -D -rfakeroot && \ cd $$OLDPWD && \ for file in %(targets)s; do \ mv $$dir/../$$file $$(dirname $TARGET) || exit 1; \ done""" % {'dir':env.Dir(deb_build_tree).path, 'arch':arch, 'targets':' '.join(target_file_names)}) return targets def BuildDebianPackage(env, debian_files, package_files, force_version=None): """Creates build rules to build a Debian package from the specified sources. This is a Hammer-ified version of _InternalBuildDebianPackage that knows to put the packages in the Hammer staging dir. Args: env: SCons Environment. debian_files: Array of the Debian control file sources that should be copied into the package source tree, e.g., changelog, control, rules, etc. package_files: An array of 2-tuples listing the files that should be copied into the package source tree. The first element is the path where the file should be placed for the .install control file to find it, relative to the generated debian package source directory. The second element is the file source. force_version: Optional. Forces the version of the package to start with this version string if specified. If the last entry in the changelog is not for a version that starts with this then a dummy entry is generated with this version and a ~prerelease suffix (so that the final version will compare as greater). Return: A list of the targets (if any). """ if not env.Bit('host_linux'): return [] return _InternalBuildDebianPackage(env, debian_files, package_files, output_dir='$STAGING_DIR', force_version=force_version) def _GetPkgConfigCommand(): """Return the pkg-config command line to use. Returns: A string specifying the pkg-config command line to use. """ return os.environ.get('PKG_CONFIG') or 'pkg-config' def _EscapePosixShellArgument(arg): """Escapes a shell command line argument so that it is interpreted literally. Args: arg: The shell argument to escape. Returns: The escaped string. """ return "'%s'" % arg.replace("'", "'\\''") def _HavePackage(package): """Whether the given pkg-config package name is present on the build system. Args: package: The name of the package. Returns: True if the package is present, else False """ return subprocess.call('%s --exists %s' % ( _GetPkgConfigCommand(), _EscapePosixShellArgument(package)), shell=True) == 0 def _GetPackageFlags(flag_type, packages): """Get the flags needed to compile/link against the given package(s). Returns the flags that are needed to compile/link against the given pkg-config package(s). Args: flag_type: The option to pkg-config specifying the type of flags to get. packages: The list of package names as strings. Returns: The flags of the requested type. Raises: subprocess.CalledProcessError: The pkg-config command failed. """ pkg_config = _GetPkgConfigCommand() command = ' '.join([pkg_config] + [_EscapePosixShellArgument(arg) for arg in [flag_type] + packages]) process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) output = process.communicate()[0] if process.returncode != 0: raise subprocess.CalledProcessError(process.returncode, pkg_config) return output.strip().split(' ') def GetPackageParams(env, packages): """Get the params needed to compile/link against the given package(s). Returns the params that are needed to compile/link against the given pkg-config package(s). Args: env: The current SCons environment. packages: The name of the package, or a list of names. Returns: A dictionary containing the params. Raises: Exception: One or more of the packages is not installed. """ if not env.Bit('host_linux'): return {} if not SCons.Util.is_List(packages): packages = [packages] for package in packages: if not _HavePackage(package): raise Exception(('Required package \"%s\" was not found. Please install ' 'the package that provides the \"%s.pc\" file.') % (package, package)) package_ccflags = _GetPackageFlags('--cflags', packages) package_libs = _GetPackageFlags('--libs', packages) # Split package_libs into libs, libdirs, and misc. linker flags. (In a perfect # world we could just leave libdirs in link_flags, but some linkers are # somehow confused by the different argument order.) libs = [flag[2:] for flag in package_libs if flag[0:2] == '-l'] libdirs = [flag[2:] for flag in package_libs if flag[0:2] == '-L'] link_flags = [flag for flag in package_libs if flag[0:2] not in ['-l', '-L']] return { 'ccflags': package_ccflags, 'libs': libs, 'libdirs': libdirs, 'link_flags': link_flags, 'dependent_target_settings' : { 'libs': libs[:], 'libdirs': libdirs[:], 'link_flags': link_flags[:], }, } def EnableFeatureWherePackagePresent(env, bit, cpp_flag, package): """Enable a feature if a required pkg-config package is present. Args: env: The current SCons environment. bit: The name of the Bit to enable when the package is present. cpp_flag: The CPP flag to enable when the package is present. package: The name of the package. """ if not env.Bit('host_linux'): return if _HavePackage(package): env.SetBits(bit) env.Append(CPPDEFINES=[cpp_flag]) else: print ('Warning: Package \"%s\" not found. Feature \"%s\" will not be ' 'built. To build with this feature, install the package that ' 'provides the \"%s.pc\" file.') % (package, bit, package) def GetGccVersion(env): if env.Bit('cross_compile'): gcc_command = env['CXX'] else: gcc_command = 'gcc' version_string = _OutputFromShellCommand( '%s --version | head -n 1 |' r'sed "s/.*\([0-9]\+\.[0-9]\+\.[0-9]\+\).*/\1/g"' % gcc_command) return tuple([int(x or '0') for x in version_string.split('.')]) def generate(env): if env.Bit('linux'): env.AddMethod(EnableFeatureWherePackagePresent) env.AddMethod(GetPackageParams) env.AddMethod(BuildDebianPackage) env.AddMethod(GetGccVersion) def exists(env): return 1 # Required by scons
bsd-3-clause
h4ck3rm1k3/OpenWrt-Firefly-SDK
staging_dir/host/lib/scons-2.3.1/SCons/Tool/JavaCommon.py
8
12755
"""SCons.Tool.JavaCommon Stuff for processing Java. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/JavaCommon.py 2014/03/02 14:18:15 garyo" import os import os.path import re java_parsing = 1 default_java_version = '1.4' if java_parsing: # Parse Java files for class names. # # This is a really cool parser from Charles Crain # that finds appropriate class names in Java source. # A regular expression that will find, in a java file: # newlines; # double-backslashes; # a single-line comment "//"; # single or double quotes preceeded by a backslash; # single quotes, double quotes, open or close braces, semi-colons, # periods, open or close parentheses; # floating-point numbers; # any alphanumeric token (keyword, class name, specifier); # any alphanumeric token surrounded by angle brackets (generics); # the multi-line comment begin and end tokens /* and */; # array declarations "[]". _reToken = re.compile(r'(\n|\\\\|//|\\[\'"]|[\'"\{\}\;\.\(\)]|' + r'\d*\.\d*|[A-Za-z_][\w\$\.]*|<[A-Za-z_]\w+>|' + r'/\*|\*/|\[\])') class OuterState(object): """The initial state for parsing a Java file for classes, interfaces, and anonymous inner classes.""" def __init__(self, version=default_java_version): if not version in ('1.1', '1.2', '1.3','1.4', '1.5', '1.6', '1.7', '5', '6'): msg = "Java version %s not supported" % version raise NotImplementedError(msg) self.version = version self.listClasses = [] self.listOutputs = [] self.stackBrackets = [] self.brackets = 0 self.nextAnon = 1 self.localClasses = [] self.stackAnonClassBrackets = [] self.anonStacksStack = [[0]] self.package = None def trace(self): pass def __getClassState(self): try: return self.classState except AttributeError: ret = ClassState(self) self.classState = ret return ret def __getPackageState(self): try: return self.packageState except AttributeError: ret = PackageState(self) self.packageState = ret return ret def __getAnonClassState(self): try: return self.anonState except AttributeError: self.outer_state = self ret = SkipState(1, AnonClassState(self)) self.anonState = ret return ret def __getSkipState(self): try: return self.skipState except AttributeError: ret = SkipState(1, self) self.skipState = ret return ret def __getAnonStack(self): return self.anonStacksStack[-1] def openBracket(self): self.brackets = self.brackets + 1 def closeBracket(self): self.brackets = self.brackets - 1 if len(self.stackBrackets) and \ self.brackets == self.stackBrackets[-1]: self.listOutputs.append('$'.join(self.listClasses)) self.localClasses.pop() self.listClasses.pop() self.anonStacksStack.pop() self.stackBrackets.pop() if len(self.stackAnonClassBrackets) and \ self.brackets == self.stackAnonClassBrackets[-1]: self.__getAnonStack().pop() self.stackAnonClassBrackets.pop() def parseToken(self, token): if token[:2] == '//': return IgnoreState('\n', self) elif token == '/*': return IgnoreState('*/', self) elif token == '{': self.openBracket() elif token == '}': self.closeBracket() elif token in [ '"', "'" ]: return IgnoreState(token, self) elif token == "new": # anonymous inner class if len(self.listClasses) > 0: return self.__getAnonClassState() return self.__getSkipState() # Skip the class name elif token in ['class', 'interface', 'enum']: if len(self.listClasses) == 0: self.nextAnon = 1 self.stackBrackets.append(self.brackets) return self.__getClassState() elif token == 'package': return self.__getPackageState() elif token == '.': # Skip the attribute, it might be named "class", in which # case we don't want to treat the following token as # an inner class name... return self.__getSkipState() return self def addAnonClass(self): """Add an anonymous inner class""" if self.version in ('1.1', '1.2', '1.3', '1.4'): clazz = self.listClasses[0] self.listOutputs.append('%s$%d' % (clazz, self.nextAnon)) elif self.version in ('1.5', '1.6', '1.7', '5', '6'): self.stackAnonClassBrackets.append(self.brackets) className = [] className.extend(self.listClasses) self.__getAnonStack()[-1] = self.__getAnonStack()[-1] + 1 for anon in self.__getAnonStack(): className.append(str(anon)) self.listOutputs.append('$'.join(className)) self.nextAnon = self.nextAnon + 1 self.__getAnonStack().append(0) def setPackage(self, package): self.package = package class AnonClassState(object): """A state that looks for anonymous inner classes.""" def __init__(self, old_state): # outer_state is always an instance of OuterState self.outer_state = old_state.outer_state self.old_state = old_state self.brace_level = 0 def parseToken(self, token): # This is an anonymous class if and only if the next # non-whitespace token is a bracket. Everything between # braces should be parsed as normal java code. if token[:2] == '//': return IgnoreState('\n', self) elif token == '/*': return IgnoreState('*/', self) elif token == '\n': return self elif token[0] == '<' and token[-1] == '>': return self elif token == '(': self.brace_level = self.brace_level + 1 return self if self.brace_level > 0: if token == 'new': # look further for anonymous inner class return SkipState(1, AnonClassState(self)) elif token in [ '"', "'" ]: return IgnoreState(token, self) elif token == ')': self.brace_level = self.brace_level - 1 return self if token == '{': self.outer_state.addAnonClass() return self.old_state.parseToken(token) class SkipState(object): """A state that will skip a specified number of tokens before reverting to the previous state.""" def __init__(self, tokens_to_skip, old_state): self.tokens_to_skip = tokens_to_skip self.old_state = old_state def parseToken(self, token): self.tokens_to_skip = self.tokens_to_skip - 1 if self.tokens_to_skip < 1: return self.old_state return self class ClassState(object): """A state we go into when we hit a class or interface keyword.""" def __init__(self, outer_state): # outer_state is always an instance of OuterState self.outer_state = outer_state def parseToken(self, token): # the next non-whitespace token should be the name of the class if token == '\n': return self # If that's an inner class which is declared in a method, it # requires an index prepended to the class-name, e.g. # 'Foo$1Inner' (Tigris Issue 2087) if self.outer_state.localClasses and \ self.outer_state.stackBrackets[-1] > \ self.outer_state.stackBrackets[-2]+1: locals = self.outer_state.localClasses[-1] try: idx = locals[token] locals[token] = locals[token]+1 except KeyError: locals[token] = 1 token = str(locals[token]) + token self.outer_state.localClasses.append({}) self.outer_state.listClasses.append(token) self.outer_state.anonStacksStack.append([0]) return self.outer_state class IgnoreState(object): """A state that will ignore all tokens until it gets to a specified token.""" def __init__(self, ignore_until, old_state): self.ignore_until = ignore_until self.old_state = old_state def parseToken(self, token): if self.ignore_until == token: return self.old_state return self class PackageState(object): """The state we enter when we encounter the package keyword. We assume the next token will be the package name.""" def __init__(self, outer_state): # outer_state is always an instance of OuterState self.outer_state = outer_state def parseToken(self, token): self.outer_state.setPackage(token) return self.outer_state def parse_java_file(fn, version=default_java_version): return parse_java(open(fn, 'r').read(), version) def parse_java(contents, version=default_java_version, trace=None): """Parse a .java file and return a double of package directory, plus a list of .class files that compiling that .java file will produce""" package = None initial = OuterState(version) currstate = initial for token in _reToken.findall(contents): # The regex produces a bunch of groups, but only one will # have anything in it. currstate = currstate.parseToken(token) if trace: trace(token, currstate) if initial.package: package = initial.package.replace('.', os.sep) return (package, initial.listOutputs) else: # Don't actually parse Java files for class names. # # We might make this a configurable option in the future if # Java-file parsing takes too long (although it shouldn't relative # to how long the Java compiler itself seems to take...). def parse_java_file(fn): """ "Parse" a .java file. This actually just splits the file name, so the assumption here is that the file name matches the public class name, and that the path to the file is the same as the package name. """ return os.path.split(file) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
gpl-2.0
eriser/picasso-graphic
tools/gyp/pylib/gyp/MSVSNew.py
225
12061
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """New implementation of Visual Studio project generation for SCons.""" import os import random import gyp.common # hashlib is supplied as of Python 2.5 as the replacement interface for md5 # and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if # available, avoiding a deprecation warning under 2.6. Import md5 otherwise, # preserving 2.4 compatibility. try: import hashlib _new_md5 = hashlib.md5 except ImportError: import md5 _new_md5 = md5.new # Initialize random number generator random.seed() # GUIDs for project types ENTRY_TYPE_GUIDS = { 'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}', 'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}', } #------------------------------------------------------------------------------ # Helper functions def MakeGuid(name, seed='msvs_new'): """Returns a GUID for the specified target name. Args: name: Target name. seed: Seed for MD5 hash. Returns: A GUID-line string calculated from the name and seed. This generates something which looks like a GUID, but depends only on the name and seed. This means the same name/seed will always generate the same GUID, so that projects and solutions which refer to each other can explicitly determine the GUID to refer to explicitly. It also means that the GUID will not change when the project for a target is rebuilt. """ # Calculate a MD5 signature for the seed and name. d = _new_md5(str(seed) + str(name)).hexdigest().upper() # Convert most of the signature to GUID form (discard the rest) guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20] + '-' + d[20:32] + '}') return guid #------------------------------------------------------------------------------ class MSVSSolutionEntry(object): def __cmp__(self, other): # Sort by name then guid (so things are in order on vs2008). return cmp((self.name, self.get_guid()), (other.name, other.get_guid())) class MSVSFolder(MSVSSolutionEntry): """Folder in a Visual Studio project or solution.""" def __init__(self, path, name = None, entries = None, guid = None, items = None): """Initializes the folder. Args: path: Full path to the folder. name: Name of the folder. entries: List of folder entries to nest inside this folder. May contain Folder or Project objects. May be None, if the folder is empty. guid: GUID to use for folder, if not None. items: List of solution items to include in the folder project. May be None, if the folder does not directly contain items. """ if name: self.name = name else: # Use last layer. self.name = os.path.basename(path) self.path = path self.guid = guid # Copy passed lists (or set to empty lists) self.entries = sorted(list(entries or [])) self.items = list(items or []) self.entry_type_guid = ENTRY_TYPE_GUIDS['folder'] def get_guid(self): if self.guid is None: # Use consistent guids for folders (so things don't regenerate). self.guid = MakeGuid(self.path, seed='msvs_folder') return self.guid #------------------------------------------------------------------------------ class MSVSProject(MSVSSolutionEntry): """Visual Studio project.""" def __init__(self, path, name = None, dependencies = None, guid = None, spec = None, build_file = None, config_platform_overrides = None, fixpath_prefix = None): """Initializes the project. Args: path: Absolute path to the project file. name: Name of project. If None, the name will be the same as the base name of the project file. dependencies: List of other Project objects this project is dependent upon, if not None. guid: GUID to use for project, if not None. spec: Dictionary specifying how to build this project. build_file: Filename of the .gyp file that the vcproj file comes from. config_platform_overrides: optional dict of configuration platforms to used in place of the default for this target. fixpath_prefix: the path used to adjust the behavior of _fixpath """ self.path = path self.guid = guid self.spec = spec self.build_file = build_file # Use project filename if name not specified self.name = name or os.path.splitext(os.path.basename(path))[0] # Copy passed lists (or set to empty lists) self.dependencies = list(dependencies or []) self.entry_type_guid = ENTRY_TYPE_GUIDS['project'] if config_platform_overrides: self.config_platform_overrides = config_platform_overrides else: self.config_platform_overrides = {} self.fixpath_prefix = fixpath_prefix self.msbuild_toolset = None def set_dependencies(self, dependencies): self.dependencies = list(dependencies or []) def get_guid(self): if self.guid is None: # Set GUID from path # TODO(rspangler): This is fragile. # 1. We can't just use the project filename sans path, since there could # be multiple projects with the same base name (for example, # foo/unittest.vcproj and bar/unittest.vcproj). # 2. The path needs to be relative to $SOURCE_ROOT, so that the project # GUID is the same whether it's included from base/base.sln or # foo/bar/baz/baz.sln. # 3. The GUID needs to be the same each time this builder is invoked, so # that we don't need to rebuild the solution when the project changes. # 4. We should be able to handle pre-built project files by reading the # GUID from the files. self.guid = MakeGuid(self.name) return self.guid def set_msbuild_toolset(self, msbuild_toolset): self.msbuild_toolset = msbuild_toolset #------------------------------------------------------------------------------ class MSVSSolution: """Visual Studio solution.""" def __init__(self, path, version, entries=None, variants=None, websiteProperties=True): """Initializes the solution. Args: path: Path to solution file. version: Format version to emit. entries: List of entries in solution. May contain Folder or Project objects. May be None, if the folder is empty. variants: List of build variant strings. If none, a default list will be used. websiteProperties: Flag to decide if the website properties section is generated. """ self.path = path self.websiteProperties = websiteProperties self.version = version # Copy passed lists (or set to empty lists) self.entries = list(entries or []) if variants: # Copy passed list self.variants = variants[:] else: # Use default self.variants = ['Debug|Win32', 'Release|Win32'] # TODO(rspangler): Need to be able to handle a mapping of solution config # to project config. Should we be able to handle variants being a dict, # or add a separate variant_map variable? If it's a dict, we can't # guarantee the order of variants since dict keys aren't ordered. # TODO(rspangler): Automatically write to disk for now; should delay until # node-evaluation time. self.Write() def Write(self, writer=gyp.common.WriteOnDiff): """Writes the solution file to disk. Raises: IndexError: An entry appears multiple times. """ # Walk the entry tree and collect all the folders and projects. all_entries = set() entries_to_check = self.entries[:] while entries_to_check: e = entries_to_check.pop(0) # If this entry has been visited, nothing to do. if e in all_entries: continue all_entries.add(e) # If this is a folder, check its entries too. if isinstance(e, MSVSFolder): entries_to_check += e.entries all_entries = sorted(all_entries) # Open file and print header f = writer(self.path) f.write('Microsoft Visual Studio Solution File, ' 'Format Version %s\r\n' % self.version.SolutionVersion()) f.write('# %s\r\n' % self.version.Description()) # Project entries sln_root = os.path.split(self.path)[0] for e in all_entries: relative_path = gyp.common.RelativePath(e.path, sln_root) # msbuild does not accept an empty folder_name. # use '.' in case relative_path is empty. folder_name = relative_path.replace('/', '\\') or '.' f.write('Project("%s") = "%s", "%s", "%s"\r\n' % ( e.entry_type_guid, # Entry type GUID e.name, # Folder name folder_name, # Folder name (again) e.get_guid(), # Entry GUID )) # TODO(rspangler): Need a way to configure this stuff if self.websiteProperties: f.write('\tProjectSection(WebsiteProperties) = preProject\r\n' '\t\tDebug.AspNetCompiler.Debug = "True"\r\n' '\t\tRelease.AspNetCompiler.Debug = "False"\r\n' '\tEndProjectSection\r\n') if isinstance(e, MSVSFolder): if e.items: f.write('\tProjectSection(SolutionItems) = preProject\r\n') for i in e.items: f.write('\t\t%s = %s\r\n' % (i, i)) f.write('\tEndProjectSection\r\n') if isinstance(e, MSVSProject): if e.dependencies: f.write('\tProjectSection(ProjectDependencies) = postProject\r\n') for d in e.dependencies: f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid())) f.write('\tEndProjectSection\r\n') f.write('EndProject\r\n') # Global section f.write('Global\r\n') # Configurations (variants) f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n') for v in self.variants: f.write('\t\t%s = %s\r\n' % (v, v)) f.write('\tEndGlobalSection\r\n') # Sort config guids for easier diffing of solution changes. config_guids = [] config_guids_overrides = {} for e in all_entries: if isinstance(e, MSVSProject): config_guids.append(e.get_guid()) config_guids_overrides[e.get_guid()] = e.config_platform_overrides config_guids.sort() f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n') for g in config_guids: for v in self.variants: nv = config_guids_overrides[g].get(v, v) # Pick which project configuration to build for this solution # configuration. f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % ( g, # Project GUID v, # Solution build configuration nv, # Project build config for that solution config )) # Enable project in this solution configuration. f.write('\t\t%s.%s.Build.0 = %s\r\n' % ( g, # Project GUID v, # Solution build configuration nv, # Project build config for that solution config )) f.write('\tEndGlobalSection\r\n') # TODO(rspangler): Should be able to configure this stuff too (though I've # never seen this be any different) f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n') f.write('\t\tHideSolutionNode = FALSE\r\n') f.write('\tEndGlobalSection\r\n') # Folder mappings # TODO(rspangler): Should omit this section if there are no folders f.write('\tGlobalSection(NestedProjects) = preSolution\r\n') for e in all_entries: if not isinstance(e, MSVSFolder): continue # Does not apply to projects, only folders for subentry in e.entries: f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid())) f.write('\tEndGlobalSection\r\n') f.write('EndGlobal\r\n') f.close()
bsd-3-clause
rob-nn/open_gait_analytics
api/oga_api/ml/basic_cmac.py
2
6480
import oga_api.ml.cmac as cmac import numpy as np import matplotlib.pyplot as plt import oga_api.physics.cinematic as c class BasicCMAC(cmac.CMAC): def __init__(self, trajectories, pos_angles, time_frame, markers, angles, activations, output, num_iterations): self._num_iterations = num_iterations confs = [] conf = None data_set = None for marker in markers: if 'xCheckedForInput' in marker and marker['xCheckedForInput'] and 'qx'in marker: data = c.get_vectorial_velocities(trajectories[marker['index'], 0, :], time_frame) conf = cmac.SignalConfiguration(data.min(), data.max(), marker['qx'], marker['description']) if conf != None: confs.append(conf) if data_set == None: data_set = np.reshape(data, (len(data), 1)) else: data_set = np.concatenate((data_set, np.reshape(data, (len(data), 1))), axis=1) if 'yCheckedForInput' in marker and marker['yCheckedForInput'] and 'qy'in marker: data = c.get_vectorial_velocities(trajectories[marker['index'], 1, :], time_frame) conf = cmac.SignalConfiguration(data.min(), data.max(), marker['qy'], marker['description']) if conf != None: confs.append(conf) if data_set == None: data_set = np.reshape(data, (len(data), 1)) else: data_set = np.concatenate((data_set, np.reshape(data, (len(data), 1))), axis=1) if 'zCheckedForInput' in marker and marker['zCheckedForInput'] and 'qz'in marker: data = c.get_vectorial_velocities(trajectories[marker['index'], 2, :], time_frame) conf = cmac.SignalConfiguration(data.min(), data.max(), marker['qz'], marker['description']) if conf != None: confs.append(conf) if data_set == None: data_set = np.reshape(data, (len(data), 1)) else: data_set = np.concatenate((data_set, np.reshape(data, (len(data), 1))), axis=1) super(BasicCMAC, self).__init__(confs, activations) if data_set == None: raise ParameterInvalid('No data do process') if len(confs) == 0: raise ParameterInvalid('No input valid input sginal') self._data_set = data_set self._get_output_data(output, trajectories, pos_angles, time_frame) self._generate_data_for_training_and_test() @property def data_in(self): return self._data_in @property def data_in_test(self): return self._data_in_test @property def data_set(self): return self._data_set @property def out_data(self): return self._out_data @property def data_out(self): return self._data_out @property def data_out_test(self): return self._data_out_test def _get_output_data(self, output, trajectories, pos_angles, time_frame): if output['type'] == 0: #Marker component = 0 if output['component'] =='x': component = 0 elif output['component'] == 'y': component = 1 else: component == 2 # component == z self._out_data = trajectories[output['_id'], component, :] else: #1 Angle #import pdb; pdb.set_trace() angle = pos_angles[int(output['_id'])] origin = trajectories[int(angle['origin']), 0:3, :] component_a = trajectories[int(angle['component_a']), 0:3, :] component_b = trajectories[int(angle['component_b']), 0:3, :] if output['component'] == 'a': # angle self._out_data = c.get_angles(origin.T, component_a.T, component_b.T) else: # v - angular velocities self._out_data = c.calc_angular_velocities(origin.T, component_a.T, component_b.T, time_frame) #import pdb; pdb.set_trace() def _generate_data_for_training_and_test(self): data_in = None data_in_test = None data_out = np.array([]) data_out_test = np.array([]) for i in np.arange(self._data_set.shape[0]): if i % 2 == 0: if data_in == None: data_in = np.reshape(self._data_set[i,:], (1, self._data_set.shape[1])) else: data_in = np.concatenate((data_in, np.reshape(self._data_set[i,:], (1, self._data_set.shape[1])))) data_out = np.append(data_out, np.array([self._out_data[i]])) else: if data_in_test == None: data_in_test = np.reshape(self._data_set[i,:], (1, self._data_set.shape[1])) else: data_in_test = np.concatenate((data_in_test, np.reshape(self._data_set[i,:], (1, self._data_set.shape[1])))) data_out_test = np.append(data_out_test, np.array([self._out_data[i]])) self._data_in = data_in self._data_in_test = data_in_test self._data_out = data_out self._data_out_test = data_out_test def train(self): if self._num_iterations < 1: raise ParameterInvalid('Number of iterations must be greater than 1') t = cmac.Train(self, self._data_in, self._data_out, 1, self._num_iterations) t.train() self.t = t def fire_all(self, inputs): result = [] for data in inputs: result.append(self.fire(data)) return np.array(result) def fire_test(self): return self.fire_all(self._data_in_test) """ def plot_aproximation(self, time = None): real = self._data_test aproximations = self.fire_test) if time == None: t = arange(0, real.shape[0]) * (1./315.) else: t = time plt.figure() plt.plot(self.t.E) plt.figure() plt.hold(True) p1 = plt.plot(t.tolist(), real, 'b', linewidth=4) p2 = plt.plot(t.tolist(), aproximation, 'r', linewidth=2) plt.xlabel('t (sec.)', fontsize=15) plt.ylabel('Angular Velocities (rads/sec.)', fontsize=15) plt.legend(['Human Knee', 'CMAC Prediction']) plt.show() """ class ParameterInvalid(BaseException): def __init__(self, description): self._description = description @property def description(self): return self._description
mit
pdehaye/theming-edx-platform
common/lib/xmodule/xmodule/errortracker.py
74
1472
import logging import sys import traceback from collections import namedtuple log = logging.getLogger(__name__) ErrorLog = namedtuple('ErrorLog', 'tracker errors') def exc_info_to_str(exc_info): """Given some exception info, convert it into a string using the traceback.format_exception() function. """ return ''.join(traceback.format_exception(*exc_info)) def in_exception_handler(): '''Is there an active exception?''' return sys.exc_info() != (None, None, None) def make_error_tracker(): '''Return an ErrorLog (named tuple), with fields (tracker, errors), where the logger appends a tuple (message, exception_str) to the errors on every call. exception_str is in the format returned by traceback.format_exception. error_list is a simple list. If the caller modifies it, info will be lost. ''' errors = [] def error_tracker(msg): '''Log errors''' exc_str = '' if in_exception_handler(): exc_str = exc_info_to_str(sys.exc_info()) # don't display irrelevant gunicorn sync error if (('python2.7/site-packages/gunicorn/workers/sync.py' in exc_str) and ('[Errno 11] Resource temporarily unavailable' in exc_str)): exc_str = '' errors.append((msg, exc_str)) return ErrorLog(error_tracker, errors) def null_error_tracker(msg): '''A dummy error tracker that just ignores the messages''' pass
agpl-3.0
zhiyisun/linux
tools/perf/scripts/python/sched-migration.py
1910
11965
#!/usr/bin/python # # Cpu task migration overview toy # # Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com> # # perf script event handlers have been generated by perf script -g python # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. import os import sys from collections import defaultdict from UserList import UserList sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from SchedGui import * threads = { 0 : "idle"} def thread_name(pid): return "%s:%d" % (threads[pid], pid) class RunqueueEventUnknown: @staticmethod def color(): return None def __repr__(self): return "unknown" class RunqueueEventSleep: @staticmethod def color(): return (0, 0, 0xff) def __init__(self, sleeper): self.sleeper = sleeper def __repr__(self): return "%s gone to sleep" % thread_name(self.sleeper) class RunqueueEventWakeup: @staticmethod def color(): return (0xff, 0xff, 0) def __init__(self, wakee): self.wakee = wakee def __repr__(self): return "%s woke up" % thread_name(self.wakee) class RunqueueEventFork: @staticmethod def color(): return (0, 0xff, 0) def __init__(self, child): self.child = child def __repr__(self): return "new forked task %s" % thread_name(self.child) class RunqueueMigrateIn: @staticmethod def color(): return (0, 0xf0, 0xff) def __init__(self, new): self.new = new def __repr__(self): return "task migrated in %s" % thread_name(self.new) class RunqueueMigrateOut: @staticmethod def color(): return (0xff, 0, 0xff) def __init__(self, old): self.old = old def __repr__(self): return "task migrated out %s" % thread_name(self.old) class RunqueueSnapshot: def __init__(self, tasks = [0], event = RunqueueEventUnknown()): self.tasks = tuple(tasks) self.event = event def sched_switch(self, prev, prev_state, next): event = RunqueueEventUnknown() if taskState(prev_state) == "R" and next in self.tasks \ and prev in self.tasks: return self if taskState(prev_state) != "R": event = RunqueueEventSleep(prev) next_tasks = list(self.tasks[:]) if prev in self.tasks: if taskState(prev_state) != "R": next_tasks.remove(prev) elif taskState(prev_state) == "R": next_tasks.append(prev) if next not in next_tasks: next_tasks.append(next) return RunqueueSnapshot(next_tasks, event) def migrate_out(self, old): if old not in self.tasks: return self next_tasks = [task for task in self.tasks if task != old] return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old)) def __migrate_in(self, new, event): if new in self.tasks: self.event = event return self next_tasks = self.tasks[:] + tuple([new]) return RunqueueSnapshot(next_tasks, event) def migrate_in(self, new): return self.__migrate_in(new, RunqueueMigrateIn(new)) def wake_up(self, new): return self.__migrate_in(new, RunqueueEventWakeup(new)) def wake_up_new(self, new): return self.__migrate_in(new, RunqueueEventFork(new)) def load(self): """ Provide the number of tasks on the runqueue. Don't count idle""" return len(self.tasks) - 1 def __repr__(self): ret = self.tasks.__repr__() ret += self.origin_tostring() return ret class TimeSlice: def __init__(self, start, prev): self.start = start self.prev = prev self.end = start # cpus that triggered the event self.event_cpus = [] if prev is not None: self.total_load = prev.total_load self.rqs = prev.rqs.copy() else: self.rqs = defaultdict(RunqueueSnapshot) self.total_load = 0 def __update_total_load(self, old_rq, new_rq): diff = new_rq.load() - old_rq.load() self.total_load += diff def sched_switch(self, ts_list, prev, prev_state, next, cpu): old_rq = self.prev.rqs[cpu] new_rq = old_rq.sched_switch(prev, prev_state, next) if old_rq is new_rq: return self.rqs[cpu] = new_rq self.__update_total_load(old_rq, new_rq) ts_list.append(self) self.event_cpus = [cpu] def migrate(self, ts_list, new, old_cpu, new_cpu): if old_cpu == new_cpu: return old_rq = self.prev.rqs[old_cpu] out_rq = old_rq.migrate_out(new) self.rqs[old_cpu] = out_rq self.__update_total_load(old_rq, out_rq) new_rq = self.prev.rqs[new_cpu] in_rq = new_rq.migrate_in(new) self.rqs[new_cpu] = in_rq self.__update_total_load(new_rq, in_rq) ts_list.append(self) if old_rq is not out_rq: self.event_cpus.append(old_cpu) self.event_cpus.append(new_cpu) def wake_up(self, ts_list, pid, cpu, fork): old_rq = self.prev.rqs[cpu] if fork: new_rq = old_rq.wake_up_new(pid) else: new_rq = old_rq.wake_up(pid) if new_rq is old_rq: return self.rqs[cpu] = new_rq self.__update_total_load(old_rq, new_rq) ts_list.append(self) self.event_cpus = [cpu] def next(self, t): self.end = t return TimeSlice(t, self) class TimeSliceList(UserList): def __init__(self, arg = []): self.data = arg def get_time_slice(self, ts): if len(self.data) == 0: slice = TimeSlice(ts, TimeSlice(-1, None)) else: slice = self.data[-1].next(ts) return slice def find_time_slice(self, ts): start = 0 end = len(self.data) found = -1 searching = True while searching: if start == end or start == end - 1: searching = False i = (end + start) / 2 if self.data[i].start <= ts and self.data[i].end >= ts: found = i end = i continue if self.data[i].end < ts: start = i elif self.data[i].start > ts: end = i return found def set_root_win(self, win): self.root_win = win def mouse_down(self, cpu, t): idx = self.find_time_slice(t) if idx == -1: return ts = self[idx] rq = ts.rqs[cpu] raw = "CPU: %d\n" % cpu raw += "Last event : %s\n" % rq.event.__repr__() raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000) raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6)) raw += "Load = %d\n" % rq.load() for t in rq.tasks: raw += "%s \n" % thread_name(t) self.root_win.update_summary(raw) def update_rectangle_cpu(self, slice, cpu): rq = slice.rqs[cpu] if slice.total_load != 0: load_rate = rq.load() / float(slice.total_load) else: load_rate = 0 red_power = int(0xff - (0xff * load_rate)) color = (0xff, red_power, red_power) top_color = None if cpu in slice.event_cpus: top_color = rq.event.color() self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end) def fill_zone(self, start, end): i = self.find_time_slice(start) if i == -1: return for i in xrange(i, len(self.data)): timeslice = self.data[i] if timeslice.start > end: return for cpu in timeslice.rqs: self.update_rectangle_cpu(timeslice, cpu) def interval(self): if len(self.data) == 0: return (0, 0) return (self.data[0].start, self.data[-1].end) def nr_rectangles(self): last_ts = self.data[-1] max_cpu = 0 for cpu in last_ts.rqs: if cpu > max_cpu: max_cpu = cpu return max_cpu class SchedEventProxy: def __init__(self): self.current_tsk = defaultdict(lambda : -1) self.timeslices = TimeSliceList() def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio): """ Ensure the task we sched out this cpu is really the one we logged. Otherwise we may have missed traces """ on_cpu_task = self.current_tsk[headers.cpu] if on_cpu_task != -1 and on_cpu_task != prev_pid: print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \ (headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid) threads[prev_pid] = prev_comm threads[next_pid] = next_comm self.current_tsk[headers.cpu] = next_pid ts = self.timeslices.get_time_slice(headers.ts()) ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu) def migrate(self, headers, pid, prio, orig_cpu, dest_cpu): ts = self.timeslices.get_time_slice(headers.ts()) ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu) def wake_up(self, headers, comm, pid, success, target_cpu, fork): if success == 0: return ts = self.timeslices.get_time_slice(headers.ts()) ts.wake_up(self.timeslices, pid, target_cpu, fork) def trace_begin(): global parser parser = SchedEventProxy() def trace_end(): app = wx.App(False) timeslices = parser.timeslices frame = RootFrame(timeslices, "Migration") app.MainLoop() def sched__sched_stat_runtime(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, runtime, vruntime): pass def sched__sched_stat_iowait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, delay): pass def sched__sched_stat_sleep(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, delay): pass def sched__sched_stat_wait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, delay): pass def sched__sched_process_fork(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, parent_comm, parent_pid, child_comm, child_pid): pass def sched__sched_process_wait(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio): pass def sched__sched_process_exit(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio): pass def sched__sched_process_free(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio): pass def sched__sched_migrate_task(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio, orig_cpu, dest_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain) parser.migrate(headers, pid, prio, orig_cpu, dest_cpu) def sched__sched_switch(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain) parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state, next_comm, next_pid, next_prio) def sched__sched_wakeup_new(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio, success, target_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain) parser.wake_up(headers, comm, pid, success, target_cpu, 1) def sched__sched_wakeup(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio, success, target_cpu): headers = EventHeaders(common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain) parser.wake_up(headers, comm, pid, success, target_cpu, 0) def sched__sched_wait_task(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid, prio): pass def sched__sched_kthread_stop_ret(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, ret): pass def sched__sched_kthread_stop(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, common_callchain, comm, pid): pass def trace_unhandled(event_name, context, event_fields_dict): pass
gpl-2.0
proxysh/Safejumper-for-Desktop
buildlinux/env32/lib/python2.7/site-packages/pyasn1/type/namedval.py
25
2380
# # This file is part of pyasn1 software. # # Copyright (c) 2005-2017, Ilya Etingof <etingof@gmail.com> # License: http://pyasn1.sf.net/license.html # # ASN.1 named integers # from pyasn1 import error __all__ = ['NamedValues'] class NamedValues(object): def __init__(self, *namedValues): self.nameToValIdx = {} self.valToNameIdx = {} self.namedValues = () automaticVal = 1 for namedValue in namedValues: if isinstance(namedValue, tuple): name, val = namedValue else: name = namedValue val = automaticVal if name in self.nameToValIdx: raise error.PyAsn1Error('Duplicate name %s' % (name,)) self.nameToValIdx[name] = val if val in self.valToNameIdx: raise error.PyAsn1Error('Duplicate value %s=%s' % (name, val)) self.valToNameIdx[val] = name self.namedValues = self.namedValues + ((name, val),) automaticVal += 1 def __repr__(self): return '%s(%s)' % (self.__class__.__name__, ', '.join([repr(x) for x in self.namedValues])) def __str__(self): return str(self.namedValues) def __eq__(self, other): return tuple(self) == tuple(other) def __ne__(self, other): return tuple(self) != tuple(other) def __lt__(self, other): return tuple(self) < tuple(other) def __le__(self, other): return tuple(self) <= tuple(other) def __gt__(self, other): return tuple(self) > tuple(other) def __ge__(self, other): return tuple(self) >= tuple(other) def __hash__(self): return hash(tuple(self)) def getName(self, value): if value in self.valToNameIdx: return self.valToNameIdx[value] def getValue(self, name): if name in self.nameToValIdx: return self.nameToValIdx[name] def __getitem__(self, i): return self.namedValues[i] def __len__(self): return len(self.namedValues) def __add__(self, namedValues): return self.__class__(*self.namedValues + namedValues) def __radd__(self, namedValues): return self.__class__(*namedValues + tuple(self)) def clone(self, *namedValues): return self.__class__(*tuple(self) + namedValues) # XXX clone/subtype?
gpl-2.0
DNX/django-e1337cms
docs/conf.py
1
7732
# -*- coding: utf-8 -*- # # e1337cms documentation build configuration file, created by # sphinx-quickstart on Fri Jun 1 09:47:07 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'e1337cms' copyright = u'2012, Denis Darii' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.0.3' # The full version, including alpha/beta/rc tags. release = '0.0.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'e1337cmsdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'e1337cms.tex', u'e1337cms Documentation', u'Denis Darii', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'e1337cms', u'e1337cms Documentation', [u'Denis Darii'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'e1337cms', u'e1337cms Documentation', u'Denis Darii', 'e1337cms', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote'
bsd-3-clause
tao12345666333/tornado-zh
tornado/curl_httpclient.py
12
22169
#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Non-blocking HTTP client implementation using pycurl.""" from __future__ import absolute_import, division, print_function, with_statement import collections import functools import logging import pycurl import threading import time from io import BytesIO from tornado import httputil from tornado import ioloop from tornado import stack_context from tornado.escape import utf8, native_str from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main curl_log = logging.getLogger('tornado.curl_httpclient') class CurlAsyncHTTPClient(AsyncHTTPClient): def initialize(self, io_loop, max_clients=10, defaults=None): super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults) self._multi = pycurl.CurlMulti() self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout) self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket) self._curls = [self._curl_create() for i in range(max_clients)] self._free_list = self._curls[:] self._requests = collections.deque() self._fds = {} self._timeout = None # libcurl has bugs that sometimes cause it to not report all # relevant file descriptors and timeouts to TIMERFUNCTION/ # SOCKETFUNCTION. Mitigate the effects of such bugs by # forcing a periodic scan of all active requests. self._force_timeout_callback = ioloop.PeriodicCallback( self._handle_force_timeout, 1000, io_loop=io_loop) self._force_timeout_callback.start() # Work around a bug in libcurl 7.29.0: Some fields in the curl # multi object are initialized lazily, and its destructor will # segfault if it is destroyed without having been used. Add # and remove a dummy handle to make sure everything is # initialized. dummy_curl_handle = pycurl.Curl() self._multi.add_handle(dummy_curl_handle) self._multi.remove_handle(dummy_curl_handle) def close(self): self._force_timeout_callback.stop() if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) for curl in self._curls: curl.close() self._multi.close() super(CurlAsyncHTTPClient, self).close() def fetch_impl(self, request, callback): self._requests.append((request, callback)) self._process_queue() self._set_timeout(0) def _handle_socket(self, event, fd, multi, data): """Called by libcurl when it wants to change the file descriptors it cares about. """ event_map = { pycurl.POLL_NONE: ioloop.IOLoop.NONE, pycurl.POLL_IN: ioloop.IOLoop.READ, pycurl.POLL_OUT: ioloop.IOLoop.WRITE, pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE } if event == pycurl.POLL_REMOVE: if fd in self._fds: self.io_loop.remove_handler(fd) del self._fds[fd] else: ioloop_event = event_map[event] # libcurl sometimes closes a socket and then opens a new # one using the same FD without giving us a POLL_NONE in # between. This is a problem with the epoll IOLoop, # because the kernel can tell when a socket is closed and # removes it from the epoll automatically, causing future # update_handler calls to fail. Since we can't tell when # this has happened, always use remove and re-add # instead of update. if fd in self._fds: self.io_loop.remove_handler(fd) self.io_loop.add_handler(fd, self._handle_events, ioloop_event) self._fds[fd] = ioloop_event def _set_timeout(self, msecs): """Called by libcurl to schedule a timeout.""" if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = self.io_loop.add_timeout( self.io_loop.time() + msecs / 1000.0, self._handle_timeout) def _handle_events(self, fd, events): """Called by IOLoop when there is activity on one of our file descriptors. """ action = 0 if events & ioloop.IOLoop.READ: action |= pycurl.CSELECT_IN if events & ioloop.IOLoop.WRITE: action |= pycurl.CSELECT_OUT while True: try: ret, num_handles = self._multi.socket_action(fd, action) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() def _handle_timeout(self): """Called by IOLoop when the requested timeout has passed.""" with stack_context.NullContext(): self._timeout = None while True: try: ret, num_handles = self._multi.socket_action( pycurl.SOCKET_TIMEOUT, 0) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() # In theory, we shouldn't have to do this because curl will # call _set_timeout whenever the timeout changes. However, # sometimes after _handle_timeout we will need to reschedule # immediately even though nothing has changed from curl's # perspective. This is because when socket_action is # called with SOCKET_TIMEOUT, libcurl decides internally which # timeouts need to be processed by using a monotonic clock # (where available) while tornado uses python's time.time() # to decide when timeouts have occurred. When those clocks # disagree on elapsed time (as they will whenever there is an # NTP adjustment), tornado might call _handle_timeout before # libcurl is ready. After each timeout, resync the scheduled # timeout with libcurl's current state. new_timeout = self._multi.timeout() if new_timeout >= 0: self._set_timeout(new_timeout) def _handle_force_timeout(self): """Called by IOLoop periodically to ask libcurl to process any events it may have forgotten about. """ with stack_context.NullContext(): while True: try: ret, num_handles = self._multi.socket_all() except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() def _finish_pending_requests(self): """Process any requests that were completed by the last call to multi.socket_action. """ while True: num_q, ok_list, err_list = self._multi.info_read() for curl in ok_list: self._finish(curl) for curl, errnum, errmsg in err_list: self._finish(curl, errnum, errmsg) if num_q == 0: break self._process_queue() def _process_queue(self): with stack_context.NullContext(): while True: started = 0 while self._free_list and self._requests: started += 1 curl = self._free_list.pop() (request, callback) = self._requests.popleft() curl.info = { "headers": httputil.HTTPHeaders(), "buffer": BytesIO(), "request": request, "callback": callback, "curl_start_time": time.time(), } try: self._curl_setup_request( curl, request, curl.info["buffer"], curl.info["headers"]) except Exception as e: # If there was an error in setup, pass it on # to the callback. Note that allowing the # error to escape here will appear to work # most of the time since we are still in the # caller's original stack frame, but when # _process_queue() is called from # _finish_pending_requests the exceptions have # nowhere to go. callback(HTTPResponse( request=request, code=599, error=e)) else: self._multi.add_handle(curl) if not started: break def _finish(self, curl, curl_error=None, curl_message=None): info = curl.info curl.info = None self._multi.remove_handle(curl) self._free_list.append(curl) buffer = info["buffer"] if curl_error: error = CurlError(curl_error, curl_message) code = error.code effective_url = None buffer.close() buffer = None else: error = None code = curl.getinfo(pycurl.HTTP_CODE) effective_url = curl.getinfo(pycurl.EFFECTIVE_URL) buffer.seek(0) # the various curl timings are documented at # http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html time_info = dict( queue=info["curl_start_time"] - info["request"].start_time, namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME), connect=curl.getinfo(pycurl.CONNECT_TIME), pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME), starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME), total=curl.getinfo(pycurl.TOTAL_TIME), redirect=curl.getinfo(pycurl.REDIRECT_TIME), ) try: info["callback"](HTTPResponse( request=info["request"], code=code, headers=info["headers"], buffer=buffer, effective_url=effective_url, error=error, reason=info['headers'].get("X-Http-Reason", None), request_time=time.time() - info["curl_start_time"], time_info=time_info)) except Exception: self.handle_callback_exception(info["callback"]) def handle_callback_exception(self, callback): self.io_loop.handle_callback_exception(callback) def _curl_create(self): curl = pycurl.Curl() if curl_log.isEnabledFor(logging.DEBUG): curl.setopt(pycurl.VERBOSE, 1) curl.setopt(pycurl.DEBUGFUNCTION, self._curl_debug) return curl def _curl_setup_request(self, curl, request, buffer, headers): curl.setopt(pycurl.URL, native_str(request.url)) # libcurl's magic "Expect: 100-continue" behavior causes delays # with servers that don't support it (which include, among others, # Google's OpenID endpoint). Additionally, this behavior has # a bug in conjunction with the curl_multi_socket_action API # (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976), # which increases the delays. It's more trouble than it's worth, # so just turn off the feature (yes, setting Expect: to an empty # value is the official way to disable this) if "Expect" not in request.headers: request.headers["Expect"] = "" # libcurl adds Pragma: no-cache by default; disable that too if "Pragma" not in request.headers: request.headers["Pragma"] = "" curl.setopt(pycurl.HTTPHEADER, ["%s: %s" % (native_str(k), native_str(v)) for k, v in request.headers.get_all()]) curl.setopt(pycurl.HEADERFUNCTION, functools.partial(self._curl_header_callback, headers, request.header_callback)) if request.streaming_callback: def write_function(chunk): self.io_loop.add_callback(request.streaming_callback, chunk) else: write_function = buffer.write if bytes is str: # py2 curl.setopt(pycurl.WRITEFUNCTION, write_function) else: # py3 # Upstream pycurl doesn't support py3, but ubuntu 12.10 includes # a fork/port. That version has a bug in which it passes unicode # strings instead of bytes to the WRITEFUNCTION. This means that # if you use a WRITEFUNCTION (which tornado always does), you cannot # download arbitrary binary data. This needs to be fixed in the # ported pycurl package, but in the meantime this lambda will # make it work for downloading (utf8) text. curl.setopt(pycurl.WRITEFUNCTION, lambda s: write_function(utf8(s))) curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects) curl.setopt(pycurl.MAXREDIRS, request.max_redirects) curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout)) curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout)) if request.user_agent: curl.setopt(pycurl.USERAGENT, native_str(request.user_agent)) else: curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)") if request.network_interface: curl.setopt(pycurl.INTERFACE, request.network_interface) if request.decompress_response: curl.setopt(pycurl.ENCODING, "gzip,deflate") else: curl.setopt(pycurl.ENCODING, "none") if request.proxy_host and request.proxy_port: curl.setopt(pycurl.PROXY, request.proxy_host) curl.setopt(pycurl.PROXYPORT, request.proxy_port) if request.proxy_username: credentials = '%s:%s' % (request.proxy_username, request.proxy_password) curl.setopt(pycurl.PROXYUSERPWD, credentials) else: curl.setopt(pycurl.PROXY, '') curl.unsetopt(pycurl.PROXYUSERPWD) if request.validate_cert: curl.setopt(pycurl.SSL_VERIFYPEER, 1) curl.setopt(pycurl.SSL_VERIFYHOST, 2) else: curl.setopt(pycurl.SSL_VERIFYPEER, 0) curl.setopt(pycurl.SSL_VERIFYHOST, 0) if request.ca_certs is not None: curl.setopt(pycurl.CAINFO, request.ca_certs) else: # There is no way to restore pycurl.CAINFO to its default value # (Using unsetopt makes it reject all certificates). # I don't see any way to read the default value from python so it # can be restored later. We'll have to just leave CAINFO untouched # if no ca_certs file was specified, and require that if any # request uses a custom ca_certs file, they all must. pass if request.allow_ipv6 is False: # Curl behaves reasonably when DNS resolution gives an ipv6 address # that we can't reach, so allow ipv6 unless the user asks to disable. curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4) else: curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER) # Set the request method through curl's irritating interface which makes # up names for almost every single method curl_options = { "GET": pycurl.HTTPGET, "POST": pycurl.POST, "PUT": pycurl.UPLOAD, "HEAD": pycurl.NOBODY, } custom_methods = set(["DELETE", "OPTIONS", "PATCH"]) for o in curl_options.values(): curl.setopt(o, False) if request.method in curl_options: curl.unsetopt(pycurl.CUSTOMREQUEST) curl.setopt(curl_options[request.method], True) elif request.allow_nonstandard_methods or request.method in custom_methods: curl.setopt(pycurl.CUSTOMREQUEST, request.method) else: raise KeyError('unknown method ' + request.method) body_expected = request.method in ("POST", "PATCH", "PUT") body_present = request.body is not None if not request.allow_nonstandard_methods: # Some HTTP methods nearly always have bodies while others # almost never do. Fail in this case unless the user has # opted out of sanity checks with allow_nonstandard_methods. if ((body_expected and not body_present) or (body_present and not body_expected)): raise ValueError( 'Body must %sbe None for method %s (unless ' 'allow_nonstandard_methods is true)' % ('not ' if body_expected else '', request.method)) if body_expected or body_present: if request.method == "GET": # Even with `allow_nonstandard_methods` we disallow # GET with a body (because libcurl doesn't allow it # unless we use CUSTOMREQUEST). While the spec doesn't # forbid clients from sending a body, it arguably # disallows the server from doing anything with them. raise ValueError('Body must be None for GET request') request_buffer = BytesIO(utf8(request.body or '')) def ioctl(cmd): if cmd == curl.IOCMD_RESTARTREAD: request_buffer.seek(0) curl.setopt(pycurl.READFUNCTION, request_buffer.read) curl.setopt(pycurl.IOCTLFUNCTION, ioctl) if request.method == "POST": curl.setopt(pycurl.POSTFIELDSIZE, len(request.body or '')) else: curl.setopt(pycurl.UPLOAD, True) curl.setopt(pycurl.INFILESIZE, len(request.body or '')) if request.auth_username is not None: userpwd = "%s:%s" % (request.auth_username, request.auth_password or '') if request.auth_mode is None or request.auth_mode == "basic": curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC) elif request.auth_mode == "digest": curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST) else: raise ValueError("Unsupported auth_mode %s" % request.auth_mode) curl.setopt(pycurl.USERPWD, native_str(userpwd)) curl_log.debug("%s %s (username: %r)", request.method, request.url, request.auth_username) else: curl.unsetopt(pycurl.USERPWD) curl_log.debug("%s %s", request.method, request.url) if request.client_cert is not None: curl.setopt(pycurl.SSLCERT, request.client_cert) if request.client_key is not None: curl.setopt(pycurl.SSLKEY, request.client_key) if request.ssl_options is not None: raise ValueError("ssl_options not supported in curl_httpclient") if threading.activeCount() > 1: # libcurl/pycurl is not thread-safe by default. When multiple threads # are used, signals should be disabled. This has the side effect # of disabling DNS timeouts in some environments (when libcurl is # not linked against ares), so we don't do it when there is only one # thread. Applications that use many short-lived threads may need # to set NOSIGNAL manually in a prepare_curl_callback since # there may not be any other threads running at the time we call # threading.activeCount. curl.setopt(pycurl.NOSIGNAL, 1) if request.prepare_curl_callback is not None: request.prepare_curl_callback(curl) def _curl_header_callback(self, headers, header_callback, header_line): header_line = native_str(header_line) if header_callback is not None: self.io_loop.add_callback(header_callback, header_line) # header_line as returned by curl includes the end-of-line characters. # whitespace at the start should be preserved to allow multi-line headers header_line = header_line.rstrip() if header_line.startswith("HTTP/"): headers.clear() try: (__, __, reason) = httputil.parse_response_start_line(header_line) header_line = "X-Http-Reason: %s" % reason except httputil.HTTPInputError: return if not header_line: return headers.parse_line(header_line) def _curl_debug(self, debug_type, debug_msg): debug_types = ('I', '<', '>', '<', '>') if debug_type == 0: curl_log.debug('%s', debug_msg.strip()) elif debug_type in (1, 2): for line in debug_msg.splitlines(): curl_log.debug('%s %s', debug_types[debug_type], line) elif debug_type == 4: curl_log.debug('%s %r', debug_types[debug_type], debug_msg) class CurlError(HTTPError): def __init__(self, errno, message): HTTPError.__init__(self, 599, message) self.errno = errno if __name__ == "__main__": AsyncHTTPClient.configure(CurlAsyncHTTPClient) main()
mit
ytechie/coreclr
src/pal/automation/tests.py
154
1078
import sys import getopt import os import subprocess import shutil import logging as log def RunPalTests(fullbuilddirpath, workspace): print "\n==================================================\n" print "Running PAL Tests." print "\n==================================================\n" print "Running: " + workspace + "/ProjectK/NDP/clr/src/pal/tests/palsuite/runpaltests.sh " + fullbuilddirpath + " " + fullbuilddirpath + "/PalTestOutput" print "\n==================================================\n" sys.stdout.flush() returncode = subprocess.call(workspace + "/ProjectK/NDP/clr/src/pal/tests/palsuite/runpaltests.sh " + fullbuilddirpath + " " + fullbuilddirpath + "/PalTestOutput", shell=True) if returncode != 0: print "ERROR: there were errors failed with exit code " + str(returncode) return returncode def RunTests(platform, fullbuilddirpath, workspace): returncode = 0 if platform == "linux": # Execute PAL tests returncode = RunPalTests(fullbuilddirpath, workspace) return returncode
mit
quoideneuf/selenium
py/selenium/webdriver/chrome/options.py
80
5289
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os from selenium.webdriver.common.desired_capabilities import DesiredCapabilities import base64 class Options(object): def __init__(self): self._binary_location = '' self._arguments = [] self._extension_files = [] self._extensions = [] self._experimental_options = {} self._debugger_address = None @property def binary_location(self): """ Returns the location of the binary otherwise an empty string """ return self._binary_location @binary_location.setter def binary_location(self, value): """ Allows you to set where the chromium binary lives :Args: - value: path to the Chromium binary """ self._binary_location = value @property def debugger_address(self): """ Returns the address of the remote devtools instance """ return self._debugger_address @debugger_address.setter def debugger_address(self, value): """ Allows you to set the address of the remote devtools instance that the ChromeDriver instance will try to connect to during an active wait. :Args: - value: address of remote devtools instance if any (hostname[:port]) """ self._debugger_address = value @property def arguments(self): """ Returns a list of arguments needed for the browser """ return self._arguments def add_argument(self, argument): """ Adds an argument to the list :Args: - Sets the arguments """ if argument: self._arguments.append(argument) else: raise ValueError("argument can not be null") @property def extensions(self): """ Returns a list of encoded extensions that will be loaded into chrome """ encoded_extensions = [] for ext in self._extension_files: file_ = open(ext, 'rb') # Should not use base64.encodestring() which inserts newlines every # 76 characters (per RFC 1521). Chromedriver has to remove those # unnecessary newlines before decoding, causing performance hit. encoded_extensions.append(base64.b64encode(file_.read()).decode('UTF-8')) file_.close() return encoded_extensions + self._extensions def add_extension(self, extension): """ Adds the path to the extension to a list that will be used to extract it to the ChromeDriver :Args: - extension: path to the *.crx file """ if extension: if os.path.exists(extension): self._extension_files.append(extension) else: raise IOError("Path to the extension doesn't exist") else: raise ValueError("argument can not be null") def add_encoded_extension(self, extension): """ Adds Base64 encoded string with extension data to a list that will be used to extract it to the ChromeDriver :Args: - extension: Base64 encoded string with extension data """ if extension: self._extensions.append(extension) else: raise ValueError("argument can not be null") @property def experimental_options(self): """ Returns a dictionary of experimental options for chrome. """ return self._experimental_options def add_experimental_option(self, name, value): """ Adds an experimental option which is passed to chrome. Args: name: The experimental option name. value: The option value. """ self._experimental_options[name] = value def to_capabilities(self): """ Creates a capabilities with all the options that have been set and returns a dictionary with everything """ chrome = DesiredCapabilities.CHROME.copy() chrome_options = self.experimental_options.copy() chrome_options["extensions"] = self.extensions if self.binary_location: chrome_options["binary"] = self.binary_location chrome_options["args"] = self.arguments if self.debugger_address: chrome_options["debuggerAddress"] = self.debugger_address chrome["chromeOptions"] = chrome_options return chrome
apache-2.0
emi420/sotsiaal
app/urls.py
1
2985
from django.conf.urls import patterns, include, url import settings # Uncomment the next two lines to enable the admin: from django.contrib import admin admin.autodiscover() from django.conf.urls.defaults import * from django.views.generic.simple import direct_to_template urlpatterns = patterns('', (r'^admin/', include(admin.site.urls)), (r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}), (r'^static/(.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_URL}) ) urlpatterns += patterns('app.views', (r'^$','index'), (r'^legal/$', 'legal'), (r'^contact/$', 'contact'), (r'^search/$', 'search'), (r'^tag/$', 'tag'), (r'^cache_flush/$', 'cache_flush'), (r'^new_story/$', 'new_story'), (r'^new_story$', 'new_story'), (r'^login/$', 'login'), (r'^signup/$', 'signup'), (r'^edit_profile/$', 'edit_profile'), (r'^view_profile/(.*)/$', 'view_profile'), (r'^pass_recovery/$', 'pass_recovery'), (r'^pass_recovery$', 'pass_recovery'), (r'^add_user/$', 'add_user'), (r'^do_login/$', 'do_login'), (r'^logout/$', 'logout'), (r'^save_profile/$', 'save_profile'), (r'^save_pass/$', 'save_pass'), (r'^add_story/$', 'add_story'), (r'^delete_story/(.*)/$', 'delete_story'), (r'^add_message/$', 'add_message'), (r'^send_contact_msg/$', 'send_contact_msg'), (r'^send_recovery_pass/$', 'send_recovery_pass'), (r'^send_activation_mail/(.*)/$', 'send_activation_mail'), (r'^send_deletion_mail/(.*)/$', 'send_deletion_mail'), (r'^activate_account/(.*)/$', 'activate_account'), (r'^delete_account/(.*)/$', 'delete_account'), (r'^account_message/$', 'account_message'), (r'^story_img/(.*)/$', 'story_img'), (r'^story_original_img/(.*)/$', 'story_original_img'), (r'^msg_img/(.*)/$', 'msg_img'), (r'^msg_original_img/(.*)/$', 'msg_img'), (r'^msg_original_img_old/(.*)/$', 'msg_img_old'), (r'^user_img/(.*)/(.*)/$', 'user_img'), (r'^user_img/(.*)/$', 'user_img'), (r'^update_karma/$', 'update_karma'), (r'^ajax/add_friend/$', 'add_friend'), (r'^ajax/remove_friend/$', 'remove_friend'), #(r'^ajax/story_wall/$', 'story_wall'), (r'^ajax/story_followers/$', 'story_followers'), (r'^ajax/more_story_messages/$', 'more_story_messages'), (r'^ajax/delete_avatar/$', 'delete_avatar'), (r'^ajax/delete_bg/$', 'delete_bg'), (r'^ajax/delete_banner/$', 'delete_banner'), (r'^ajax/delete_message/$', 'delete_message'), (r'^ajax/delete_reply/$', 'delete_reply'), (r'^ajax/vote_msg/$', 'vote_msg'), (r'^ajax/vote_reply/$', 'vote_reply'), (r'^ajax/vote_story/$', 'vote_story'), (r'^print_story/(.*)/$', 'print_story'), (r'^popular_users/$', 'popular_users'), (r'^invalid_story/$', 'invalid_story'), ('^(.*)/(.*)/', 'story'), # category/story ('^(.*)/', 'index'), # category ('^(.*)', 'view_profile'), # user profile )
gpl-3.0
pantheon-systems/libcloud
libcloud/storage/drivers/azure_blobs.py
30
37231
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import with_statement import base64 import os import binascii from xml.etree.ElementTree import Element, SubElement from libcloud.utils.py3 import PY3 from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlquote from libcloud.utils.py3 import tostring from libcloud.utils.py3 import b from libcloud.utils.xml import fixxpath from libcloud.utils.files import read_in_chunks from libcloud.common.types import LibcloudError from libcloud.common.azure import AzureConnection from libcloud.storage.base import Object, Container, StorageDriver from libcloud.storage.types import ContainerIsNotEmptyError from libcloud.storage.types import ContainerAlreadyExistsError from libcloud.storage.types import InvalidContainerNameError from libcloud.storage.types import ContainerDoesNotExistError from libcloud.storage.types import ObjectDoesNotExistError from libcloud.storage.types import ObjectHashMismatchError if PY3: from io import FileIO as file # Desired number of items in each response inside a paginated request RESPONSES_PER_REQUEST = 100 # As per the Azure documentation, if the upload file size is less than # 64MB, we can upload it in a single request. However, in real life azure # servers seem to disconnect randomly after around 5 MB or 200s of upload. # So, it is better that for file sizes greater than 4MB, we upload it in # chunks. # Also, with large sizes, if we use a lease, the lease will timeout after # 60 seconds, but the upload might still be in progress. This can be # handled in code, but if we use chunked uploads, the lease renewal will # happen automatically. AZURE_BLOCK_MAX_SIZE = 4 * 1024 * 1024 # Azure block blocks must be maximum 4MB # Azure page blobs must be aligned in 512 byte boundaries (4MB fits that) AZURE_CHUNK_SIZE = 4 * 1024 * 1024 # Azure page blob must be aligned in 512 byte boundaries AZURE_PAGE_CHUNK_SIZE = 512 # The time period (in seconds) for which a lease must be obtained. # If set as -1, we get an infinite lease, but that is a bad idea. If # after getting an infinite lease, there was an issue in releasing the # lease, the object will remain 'locked' forever, unless the lease is # released using the lease_id (which is not exposed to the user) AZURE_LEASE_PERIOD = 60 AZURE_STORAGE_HOST_SUFFIX = 'blob.core.windows.net' class AzureBlobLease(object): """ A class to help in leasing an azure blob and renewing the lease """ def __init__(self, driver, object_path, use_lease): """ :param driver: The Azure storage driver that is being used :type driver: :class:`AzureStorageDriver` :param object_path: The path of the object we need to lease :type object_path: ``str`` :param use_lease: Indicates if we must take a lease or not :type use_lease: ``bool`` """ self.object_path = object_path self.driver = driver self.use_lease = use_lease self.lease_id = None self.params = {'comp': 'lease'} def renew(self): """ Renew the lease if it is older than a predefined time period """ if self.lease_id is None: return headers = {'x-ms-lease-action': 'renew', 'x-ms-lease-id': self.lease_id, 'x-ms-lease-duration': '60'} response = self.driver.connection.request(self.object_path, headers=headers, params=self.params, method='PUT') if response.status != httplib.OK: raise LibcloudError('Unable to obtain lease', driver=self) def update_headers(self, headers): """ Update the lease id in the headers """ if self.lease_id: headers['x-ms-lease-id'] = self.lease_id def __enter__(self): if not self.use_lease: return self headers = {'x-ms-lease-action': 'acquire', 'x-ms-lease-duration': '60'} response = self.driver.connection.request(self.object_path, headers=headers, params=self.params, method='PUT') if response.status == httplib.NOT_FOUND: return self elif response.status != httplib.CREATED: raise LibcloudError('Unable to obtain lease', driver=self) self.lease_id = response.headers['x-ms-lease-id'] return self def __exit__(self, type, value, traceback): if self.lease_id is None: return headers = {'x-ms-lease-action': 'release', 'x-ms-lease-id': self.lease_id} response = self.driver.connection.request(self.object_path, headers=headers, params=self.params, method='PUT') if response.status != httplib.OK: raise LibcloudError('Unable to release lease', driver=self) class AzureBlobsConnection(AzureConnection): """ Represents a single connection to Azure Blobs """ class AzureBlobsStorageDriver(StorageDriver): name = 'Microsoft Azure (blobs)' website = 'http://windows.azure.com/' connectionCls = AzureBlobsConnection hash_type = 'md5' supports_chunked_encoding = False ex_blob_type = 'BlockBlob' def __init__(self, key, secret=None, secure=True, host=None, port=None, **kwargs): self._host_argument_set = bool(host) # B64decode() this key and keep it, so that we don't have to do # so for every request. Minor performance improvement secret = base64.b64decode(b(secret)) super(AzureBlobsStorageDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, port=port, **kwargs) def _ex_connection_class_kwargs(self): result = {} # host argument has precedence if not self._host_argument_set: result['host'] = '%s.%s' % (self.key, AZURE_STORAGE_HOST_SUFFIX) return result def _xml_to_container(self, node): """ Converts a container XML node to a container instance :param node: XML info of the container :type node: :class:`xml.etree.ElementTree.Element` :return: A container instance :rtype: :class:`Container` """ name = node.findtext(fixxpath(xpath='Name')) props = node.find(fixxpath(xpath='Properties')) metadata = node.find(fixxpath(xpath='Metadata')) extra = { 'url': node.findtext(fixxpath(xpath='Url')), 'last_modified': node.findtext(fixxpath(xpath='Last-Modified')), 'etag': props.findtext(fixxpath(xpath='Etag')), 'lease': { 'status': props.findtext(fixxpath(xpath='LeaseStatus')), 'state': props.findtext(fixxpath(xpath='LeaseState')), 'duration': props.findtext(fixxpath(xpath='LeaseDuration')), }, 'meta_data': {} } for meta in metadata.getchildren(): extra['meta_data'][meta.tag] = meta.text return Container(name=name, extra=extra, driver=self) def _response_to_container(self, container_name, response): """ Converts a HTTP response to a container instance :param container_name: Name of the container :type container_name: ``str`` :param response: HTTP Response :type node: L{} :return: A container instance :rtype: :class:`Container` """ headers = response.headers extra = { 'url': 'http://%s%s' % (response.connection.host, response.connection.action), 'etag': headers['etag'], 'last_modified': headers['last-modified'], 'lease': { 'status': headers.get('x-ms-lease-status', None), 'state': headers.get('x-ms-lease-state', None), 'duration': headers.get('x-ms-lease-duration', None), }, 'meta_data': {} } for key, value in response.headers.items(): if key.startswith('x-ms-meta-'): key = key.split('x-ms-meta-')[1] extra['meta_data'][key] = value return Container(name=container_name, extra=extra, driver=self) def _xml_to_object(self, container, blob): """ Converts a BLOB XML node to an object instance :param container: Instance of the container holding the blob :type: :class:`Container` :param blob: XML info of the blob :type blob: L{} :return: An object instance :rtype: :class:`Object` """ name = blob.findtext(fixxpath(xpath='Name')) props = blob.find(fixxpath(xpath='Properties')) metadata = blob.find(fixxpath(xpath='Metadata')) etag = props.findtext(fixxpath(xpath='Etag')) size = int(props.findtext(fixxpath(xpath='Content-Length'))) extra = { 'content_type': props.findtext(fixxpath(xpath='Content-Type')), 'etag': etag, 'md5_hash': props.findtext(fixxpath(xpath='Content-MD5')), 'last_modified': props.findtext(fixxpath(xpath='Last-Modified')), 'url': blob.findtext(fixxpath(xpath='Url')), 'hash': props.findtext(fixxpath(xpath='Etag')), 'lease': { 'status': props.findtext(fixxpath(xpath='LeaseStatus')), 'state': props.findtext(fixxpath(xpath='LeaseState')), 'duration': props.findtext(fixxpath(xpath='LeaseDuration')), }, 'content_encoding': props.findtext(fixxpath( xpath='Content-Encoding')), 'content_language': props.findtext(fixxpath( xpath='Content-Language')), 'blob_type': props.findtext(fixxpath(xpath='BlobType')) } if extra['md5_hash']: value = binascii.hexlify(base64.b64decode(b(extra['md5_hash']))) value = value.decode('ascii') extra['md5_hash'] = value meta_data = {} for meta in metadata.getchildren(): meta_data[meta.tag] = meta.text return Object(name=name, size=size, hash=etag, meta_data=meta_data, extra=extra, container=container, driver=self) def _response_to_object(self, object_name, container, response): """ Converts a HTTP response to an object (from headers) :param object_name: Name of the object :type object_name: ``str`` :param container: Instance of the container holding the blob :type: :class:`Container` :param response: HTTP Response :type node: L{} :return: An object instance :rtype: :class:`Object` """ headers = response.headers size = int(headers['content-length']) etag = headers['etag'] extra = { 'url': 'http://%s%s' % (response.connection.host, response.connection.action), 'etag': etag, 'md5_hash': headers.get('content-md5', None), 'content_type': headers.get('content-type', None), 'content_language': headers.get('content-language', None), 'content_encoding': headers.get('content-encoding', None), 'last_modified': headers['last-modified'], 'lease': { 'status': headers.get('x-ms-lease-status', None), 'state': headers.get('x-ms-lease-state', None), 'duration': headers.get('x-ms-lease-duration', None), }, 'blob_type': headers['x-ms-blob-type'] } if extra['md5_hash']: value = binascii.hexlify(base64.b64decode(b(extra['md5_hash']))) value = value.decode('ascii') extra['md5_hash'] = value meta_data = {} for key, value in response.headers.items(): if key.startswith('x-ms-meta-'): key = key.split('x-ms-meta-')[1] meta_data[key] = value return Object(name=object_name, size=size, hash=etag, extra=extra, meta_data=meta_data, container=container, driver=self) def iterate_containers(self): """ @inherits: :class:`StorageDriver.iterate_containers` """ params = {'comp': 'list', 'maxresults': RESPONSES_PER_REQUEST, 'include': 'metadata'} while True: response = self.connection.request('/', params) if response.status != httplib.OK: raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) body = response.parse_body() containers = body.find(fixxpath(xpath='Containers')) containers = containers.findall(fixxpath(xpath='Container')) for container in containers: yield self._xml_to_container(container) params['marker'] = body.findtext('NextMarker') if not params['marker']: break def iterate_container_objects(self, container): """ @inherits: :class:`StorageDriver.iterate_container_objects` """ params = {'restype': 'container', 'comp': 'list', 'maxresults': RESPONSES_PER_REQUEST, 'include': 'metadata'} container_path = self._get_container_path(container) while True: response = self.connection.request(container_path, params=params) if response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value=None, driver=self, container_name=container.name) elif response.status != httplib.OK: raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) body = response.parse_body() blobs = body.find(fixxpath(xpath='Blobs')) blobs = blobs.findall(fixxpath(xpath='Blob')) for blob in blobs: yield self._xml_to_object(container, blob) params['marker'] = body.findtext('NextMarker') if not params['marker']: break def get_container(self, container_name): """ @inherits: :class:`StorageDriver.get_container` """ params = {'restype': 'container'} container_path = '/%s' % (container_name) response = self.connection.request(container_path, params=params, method='HEAD') if response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError('Container %s does not exist' % (container_name), driver=self, container_name=container_name) elif response.status != httplib.OK: raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) return self._response_to_container(container_name, response) def get_object(self, container_name, object_name): """ @inherits: :class:`StorageDriver.get_object` """ container = self.get_container(container_name=container_name) object_path = self._get_object_path(container, object_name) response = self.connection.request(object_path, method='HEAD') if response.status == httplib.OK: obj = self._response_to_object(object_name, container, response) return obj raise ObjectDoesNotExistError(value=None, driver=self, object_name=object_name) def _get_container_path(self, container): """ Return a container path :param container: Container instance :type container: :class:`Container` :return: A path for this container. :rtype: ``str`` """ return '/%s' % (container.name) def _get_object_path(self, container, object_name): """ Return an object's CDN path. :param container: Container instance :type container: :class:`Container` :param object_name: Object name :type object_name: :class:`str` :return: A path for this object. :rtype: ``str`` """ container_url = self._get_container_path(container) object_name_cleaned = urlquote(object_name) object_path = '%s/%s' % (container_url, object_name_cleaned) return object_path def create_container(self, container_name): """ @inherits: :class:`StorageDriver.create_container` """ params = {'restype': 'container'} container_path = '/%s' % (container_name) response = self.connection.request(container_path, params=params, method='PUT') if response.status == httplib.CREATED: return self._response_to_container(container_name, response) elif response.status == httplib.CONFLICT: raise ContainerAlreadyExistsError( value='Container with this name already exists. The name must ' 'be unique among all the containers in the system', container_name=container_name, driver=self) elif response.status == httplib.BAD_REQUEST: raise InvalidContainerNameError(value='Container name contains ' + 'invalid characters.', container_name=container_name, driver=self) raise LibcloudError('Unexpected status code: %s' % (response.status), driver=self) def delete_container(self, container): """ @inherits: :class:`StorageDriver.delete_container` """ # Azure does not check if the container is empty. So, we will do # a check to ensure that the behaviour is similar to other drivers for obj in container.iterate_objects(): raise ContainerIsNotEmptyError( value='Container must be empty before it can be deleted.', container_name=container.name, driver=self) params = {'restype': 'container'} container_path = self._get_container_path(container) # Note: All the objects in the container must be deleted first response = self.connection.request(container_path, params=params, method='DELETE') if response.status == httplib.ACCEPTED: return True elif response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value=None, driver=self, container_name=container.name) return False def download_object(self, obj, destination_path, overwrite_existing=False, delete_on_failure=True): """ @inherits: :class:`StorageDriver.download_object` """ obj_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(obj_path, raw=True, data=None) return self._get_object(obj=obj, callback=self._save_object, response=response, callback_kwargs={ 'obj': obj, 'response': response.response, 'destination_path': destination_path, 'overwrite_existing': overwrite_existing, 'delete_on_failure': delete_on_failure}, success_status_code=httplib.OK) def download_object_as_stream(self, obj, chunk_size=None): """ @inherits: :class:`StorageDriver.download_object_as_stream` """ obj_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(obj_path, raw=True, data=None) return self._get_object(obj=obj, callback=read_in_chunks, response=response, callback_kwargs={'iterator': response.response, 'chunk_size': chunk_size}, success_status_code=httplib.OK) def _upload_in_chunks(self, response, data, iterator, object_path, blob_type, lease, calculate_hash=True): """ Uploads data from an interator in fixed sized chunks to S3 :param response: Response object from the initial POST request :type response: :class:`RawResponse` :param data: Any data from the initial POST request :type data: ``str`` :param iterator: The generator for fetching the upload data :type iterator: ``generator`` :param object_path: The path of the object to which we are uploading :type object_name: ``str`` :param blob_type: The blob type being uploaded :type blob_type: ``str`` :param lease: The lease object to be used for renewal :type lease: :class:`AzureBlobLease` :keyword calculate_hash: Indicates if we must calculate the data hash :type calculate_hash: ``bool`` :return: A tuple of (status, checksum, bytes transferred) :rtype: ``tuple`` """ # Get the upload id from the response xml if response.status != httplib.CREATED: raise LibcloudError('Error initializing upload. Code: %d' % (response.status), driver=self) data_hash = None if calculate_hash: data_hash = self._get_hash_function() bytes_transferred = 0 count = 1 chunks = [] headers = {} lease.update_headers(headers) if blob_type == 'BlockBlob': params = {'comp': 'block'} else: params = {'comp': 'page'} # Read the input data in chunk sizes suitable for AWS for data in read_in_chunks(iterator, AZURE_CHUNK_SIZE): data = b(data) content_length = len(data) offset = bytes_transferred bytes_transferred += content_length if calculate_hash: data_hash.update(data) chunk_hash = self._get_hash_function() chunk_hash.update(data) chunk_hash = base64.b64encode(b(chunk_hash.digest())) headers['Content-MD5'] = chunk_hash.decode('utf-8') headers['Content-Length'] = content_length if blob_type == 'BlockBlob': # Block id can be any unique string that is base64 encoded # A 10 digit number can hold the max value of 50000 blocks # that are allowed for azure block_id = base64.b64encode(b('%10d' % (count))) block_id = block_id.decode('utf-8') params['blockid'] = block_id # Keep this data for a later commit chunks.append(block_id) else: headers['x-ms-page-write'] = 'update' headers['x-ms-range'] = 'bytes=%d-%d' % \ (offset, (bytes_transferred - 1)) # Renew lease before updating lease.renew() resp = self.connection.request(object_path, method='PUT', data=data, headers=headers, params=params) if resp.status != httplib.CREATED: resp.parse_error() raise LibcloudError('Error uploading chunk %d. Code: %d' % (count, resp.status), driver=self) count += 1 if calculate_hash: data_hash = data_hash.hexdigest() if blob_type == 'BlockBlob': self._commit_blocks(object_path, chunks, lease) # The Azure service does not return a hash immediately for # chunked uploads. It takes some time for the data to get synced response.headers['content-md5'] = None return (True, data_hash, bytes_transferred) def _commit_blocks(self, object_path, chunks, lease): """ Makes a final commit of the data. :param object_path: Server side object path. :type object_path: ``str`` :param upload_id: A list of (chunk_number, chunk_hash) tuples. :type upload_id: ``list`` """ root = Element('BlockList') for block_id in chunks: part = SubElement(root, 'Uncommitted') part.text = str(block_id) data = tostring(root) params = {'comp': 'blocklist'} headers = {} lease.update_headers(headers) lease.renew() response = self.connection.request(object_path, data=data, params=params, headers=headers, method='PUT') if response.status != httplib.CREATED: raise LibcloudError('Error in blocklist commit', driver=self) def _check_values(self, blob_type, object_size): """ Checks if extension arguments are valid :param blob_type: The blob type that is being uploaded :type blob_type: ``str`` :param object_size: The (max) size of the object being uploaded :type object_size: ``int`` """ if blob_type not in ['BlockBlob', 'PageBlob']: raise LibcloudError('Invalid blob type', driver=self) if blob_type == 'PageBlob': if not object_size: raise LibcloudError('Max blob size is mandatory for page blob', driver=self) if object_size % AZURE_PAGE_CHUNK_SIZE: raise LibcloudError('Max blob size is not aligned to ' 'page boundary', driver=self) def upload_object(self, file_path, container, object_name, extra=None, verify_hash=True, ex_blob_type=None, ex_use_lease=False): """ Upload an object currently located on a disk. @inherits: :class:`StorageDriver.upload_object` :param ex_blob_type: Storage class :type ex_blob_type: ``str`` :param ex_use_lease: Indicates if we must take a lease before upload :type ex_use_lease: ``bool`` """ if ex_blob_type is None: ex_blob_type = self.ex_blob_type # Get the size of the file file_size = os.stat(file_path).st_size # The presumed size of the object object_size = file_size self._check_values(ex_blob_type, file_size) with file(file_path, 'rb') as file_handle: iterator = iter(file_handle) # If size is greater than 64MB or type is Page, upload in chunks if ex_blob_type == 'PageBlob' or file_size > AZURE_BLOCK_MAX_SIZE: # For chunked upload of block blobs, the initial size must # be 0. if ex_blob_type == 'BlockBlob': object_size = None object_path = self._get_object_path(container, object_name) upload_func = self._upload_in_chunks upload_func_kwargs = {'iterator': iterator, 'object_path': object_path, 'blob_type': ex_blob_type, 'lease': None} else: upload_func = self._stream_data upload_func_kwargs = {'iterator': iterator, 'chunked': False, 'calculate_hash': verify_hash} return self._put_object(container=container, object_name=object_name, object_size=object_size, upload_func=upload_func, upload_func_kwargs=upload_func_kwargs, file_path=file_path, extra=extra, verify_hash=verify_hash, blob_type=ex_blob_type, use_lease=ex_use_lease) def upload_object_via_stream(self, iterator, container, object_name, verify_hash=False, extra=None, ex_use_lease=False, ex_blob_type=None, ex_page_blob_size=None): """ @inherits: :class:`StorageDriver.upload_object_via_stream` :param ex_blob_type: Storage class :type ex_blob_type: ``str`` :param ex_page_blob_size: The maximum size to which the page blob can grow to :type ex_page_blob_size: ``int`` :param ex_use_lease: Indicates if we must take a lease before upload :type ex_use_lease: ``bool`` """ if ex_blob_type is None: ex_blob_type = self.ex_blob_type self._check_values(ex_blob_type, ex_page_blob_size) object_path = self._get_object_path(container, object_name) upload_func = self._upload_in_chunks upload_func_kwargs = {'iterator': iterator, 'object_path': object_path, 'blob_type': ex_blob_type, 'lease': None} return self._put_object(container=container, object_name=object_name, object_size=ex_page_blob_size, upload_func=upload_func, upload_func_kwargs=upload_func_kwargs, extra=extra, verify_hash=verify_hash, blob_type=ex_blob_type, use_lease=ex_use_lease) def delete_object(self, obj): """ @inherits: :class:`StorageDriver.delete_object` """ object_path = self._get_object_path(obj.container, obj.name) response = self.connection.request(object_path, method='DELETE') if response.status == httplib.ACCEPTED: return True elif response.status == httplib.NOT_FOUND: raise ObjectDoesNotExistError(value=None, driver=self, object_name=obj.name) return False def _update_metadata(self, headers, meta_data): """ Update the given metadata in the headers :param headers: The headers dictionary to be updated :type headers: ``dict`` :param meta_data: Metadata key value pairs :type meta_data: ``dict`` """ for key, value in list(meta_data.items()): key = 'x-ms-meta-%s' % (key) headers[key] = value def _prepare_upload_headers(self, object_name, object_size, extra, meta_data, blob_type): """ Prepare headers for uploading an object :param object_name: The full name of the object being updated :type object_name: ``str`` :param object_size: The size of the object. In case of PageBlobs, this indicates the maximum size the blob can grow to :type object_size: ``int`` :param extra: Extra control data for the upload :type extra: ``dict`` :param meta_data: Metadata key value pairs :type meta_data: ``dict`` :param blob_type: Page or Block blob type :type blob_type: ``str`` """ headers = {} if blob_type is None: blob_type = self.ex_blob_type headers['x-ms-blob-type'] = blob_type self._update_metadata(headers, meta_data) if object_size is not None: headers['Content-Length'] = object_size if blob_type == 'PageBlob': headers['Content-Length'] = 0 headers['x-ms-blob-content-length'] = object_size return headers def _put_object(self, container, object_name, object_size, upload_func, upload_func_kwargs, file_path=None, extra=None, verify_hash=True, blob_type=None, use_lease=False): """ Control function that does the real job of uploading data to a blob """ extra = extra or {} meta_data = extra.get('meta_data', {}) content_type = extra.get('content_type', None) headers = self._prepare_upload_headers(object_name, object_size, extra, meta_data, blob_type) object_path = self._get_object_path(container, object_name) # Get a lease if required and do the operations with AzureBlobLease(self, object_path, use_lease) as lease: if 'lease' in upload_func_kwargs: upload_func_kwargs['lease'] = lease lease.update_headers(headers) iterator = iter('') result_dict = self._upload_object(object_name, content_type, upload_func, upload_func_kwargs, object_path, headers=headers, file_path=file_path, iterator=iterator) response = result_dict['response'] bytes_transferred = result_dict['bytes_transferred'] data_hash = result_dict['data_hash'] headers = response.headers response = response.response if response.status != httplib.CREATED: raise LibcloudError( 'Unexpected status code, status_code=%s' % (response.status), driver=self) server_hash = headers['content-md5'] if server_hash: server_hash = binascii.hexlify(base64.b64decode(b(server_hash))) server_hash = server_hash.decode('utf-8') else: # TODO: HACK - We could poll the object for a while and get # the hash pass if (verify_hash and server_hash and data_hash != server_hash): raise ObjectHashMismatchError( value='MD5 hash checksum does not match', object_name=object_name, driver=self) return Object(name=object_name, size=bytes_transferred, hash=headers['etag'], extra=None, meta_data=meta_data, container=container, driver=self) def ex_set_object_metadata(self, obj, meta_data): """ Set metadata for an object :param obj: The blob object :type obj: :class:`Object` :param meta_data: Metadata key value pairs :type meta_data: ``dict`` """ object_path = self._get_object_path(obj.container, obj.name) params = {'comp': 'metadata'} headers = {} self._update_metadata(headers, meta_data) response = self.connection.request(object_path, method='PUT', params=params, headers=headers) if response.status != httplib.OK: response.parse_error('Setting metadata')
apache-2.0
lmazuel/azure-sdk-for-python
azure-mgmt-authorization/azure/mgmt/authorization/models/role_assignment_create_parameters.py
1
1625
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class RoleAssignmentCreateParameters(Model): """Role assignment create parameters. :param role_definition_id: The role definition ID used in the role assignment. :type role_definition_id: str :param principal_id: The principal ID assigned to the role. This maps to the ID inside the Active Directory. It can point to a user, service principal, or security group. :type principal_id: str :param can_delegate: The delgation flag used for creating a role assignment :type can_delegate: bool """ _attribute_map = { 'role_definition_id': {'key': 'properties.roleDefinitionId', 'type': 'str'}, 'principal_id': {'key': 'properties.principalId', 'type': 'str'}, 'can_delegate': {'key': 'properties.canDelegate', 'type': 'bool'}, } def __init__(self, **kwargs): super(RoleAssignmentCreateParameters, self).__init__(**kwargs) self.role_definition_id = kwargs.get('role_definition_id', None) self.principal_id = kwargs.get('principal_id', None) self.can_delegate = kwargs.get('can_delegate', None)
mit
astrofrog/numpy
doc/sphinxext/numpydoc.py
47
5680
""" ======== numpydoc ======== Sphinx extension that handles docstrings in the Numpy standard format. [1] It will: - Convert Parameters etc. sections to field lists. - Convert See Also section to a See also entry. - Renumber references. - Extract the signature from the docstring, if it can't be determined otherwise. .. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt """ import sphinx if sphinx.__version__ < '1.0.1': raise RuntimeError("Sphinx 1.0.1 or newer is required") import os, re, pydoc from docscrape_sphinx import get_doc_object, SphinxDocString from sphinx.util.compat import Directive import inspect def mangle_docstrings(app, what, name, obj, options, lines, reference_offset=[0]): cfg = dict(use_plots=app.config.numpydoc_use_plots, show_class_members=app.config.numpydoc_show_class_members) if what == 'module': # Strip top title title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', re.I|re.S) lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n") else: doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg) lines[:] = unicode(doc).split(u"\n") if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ obj.__name__: if hasattr(obj, '__module__'): v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__)) else: v = dict(full_name=obj.__name__) lines += [u'', u'.. htmlonly::', ''] lines += [u' %s' % x for x in (app.config.numpydoc_edit_link % v).split("\n")] # replace reference numbers so that there are no duplicates references = [] for line in lines: line = line.strip() m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I) if m: references.append(m.group(1)) # start renaming from the longest string, to avoid overwriting parts references.sort(key=lambda x: -len(x)) if references: for i, line in enumerate(lines): for r in references: if re.match(ur'^\d+$', r): new_r = u"R%d" % (reference_offset[0] + int(r)) else: new_r = u"%s%d" % (r, reference_offset[0]) lines[i] = lines[i].replace(u'[%s]_' % r, u'[%s]_' % new_r) lines[i] = lines[i].replace(u'.. [%s]' % r, u'.. [%s]' % new_r) reference_offset[0] += len(references) def mangle_signature(app, what, name, obj, options, sig, retann): # Do not try to inspect classes that don't define `__init__` if (inspect.isclass(obj) and (not hasattr(obj, '__init__') or 'initializes x; see ' in pydoc.getdoc(obj.__init__))): return '', '' if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return if not hasattr(obj, '__doc__'): return doc = SphinxDocString(pydoc.getdoc(obj)) if doc['Signature']: sig = re.sub(u"^[^(]*", u"", doc['Signature']) return sig, u'' def setup(app, get_doc_object_=get_doc_object): global get_doc_object get_doc_object = get_doc_object_ app.connect('autodoc-process-docstring', mangle_docstrings) app.connect('autodoc-process-signature', mangle_signature) app.add_config_value('numpydoc_edit_link', None, False) app.add_config_value('numpydoc_use_plots', None, False) app.add_config_value('numpydoc_show_class_members', True, True) # Extra mangling domains app.add_domain(NumpyPythonDomain) app.add_domain(NumpyCDomain) #------------------------------------------------------------------------------ # Docstring-mangling domains #------------------------------------------------------------------------------ from docutils.statemachine import ViewList from sphinx.domains.c import CDomain from sphinx.domains.python import PythonDomain class ManglingDomainBase(object): directive_mangling_map = {} def __init__(self, *a, **kw): super(ManglingDomainBase, self).__init__(*a, **kw) self.wrap_mangling_directives() def wrap_mangling_directives(self): for name, objtype in self.directive_mangling_map.items(): self.directives[name] = wrap_mangling_directive( self.directives[name], objtype) class NumpyPythonDomain(ManglingDomainBase, PythonDomain): name = 'np' directive_mangling_map = { 'function': 'function', 'class': 'class', 'exception': 'class', 'method': 'function', 'classmethod': 'function', 'staticmethod': 'function', 'attribute': 'attribute', } class NumpyCDomain(ManglingDomainBase, CDomain): name = 'np-c' directive_mangling_map = { 'function': 'function', 'member': 'attribute', 'macro': 'function', 'type': 'class', 'var': 'object', } def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): env = self.state.document.settings.env name = None if self.arguments: m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) name = m.group(2).strip() if not name: name = self.arguments[0] lines = list(self.content) mangle_docstrings(env.app, objtype, name, None, None, lines) self.content = ViewList(lines, self.content.parent) return base_directive.run(self) return directive
bsd-3-clause
noroutine/ansible
test/units/parsing/utils/test_jsonify.py
119
1499
# -*- coding: utf-8 -*- # (c) 2016, James Cammarata <jimi@sngx.net> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.compat.tests import unittest from ansible.parsing.utils.jsonify import jsonify class TestJsonify(unittest.TestCase): def test_jsonify_simple(self): self.assertEqual(jsonify(dict(a=1, b=2, c=3)), '{"a": 1, "b": 2, "c": 3}') def test_jsonify_simple_format(self): res = jsonify(dict(a=1, b=2, c=3), format=True) cleaned = "".join([x.strip() for x in res.splitlines()]) self.assertEqual(cleaned, '{"a": 1,"b": 2,"c": 3}') def test_jsonify_unicode(self): self.assertEqual(jsonify(dict(toshio=u'くらとみ')), u'{"toshio": "くらとみ"}') def test_jsonify_empty(self): self.assertEqual(jsonify(None), '{}')
gpl-3.0
pombredanne/pattern
pattern/server/cherrypy/cherrypy/lib/reprconf.py
36
14521
"""Generic configuration system using unrepr. Configuration data may be supplied as a Python dictionary, as a filename, or as an open file object. When you supply a filename or file, Python's builtin ConfigParser is used (with some extensions). Namespaces ---------- Configuration keys are separated into namespaces by the first "." in the key. The only key that cannot exist in a namespace is the "environment" entry. This special entry 'imports' other config entries from a template stored in the Config.environments dict. You can define your own namespaces to be called when new config is merged by adding a named handler to Config.namespaces. The name can be any string, and the handler must be either a callable or a context manager. """ try: # Python 3.0+ from configparser import ConfigParser except ImportError: from ConfigParser import ConfigParser try: set except NameError: from sets import Set as set try: basestring except NameError: basestring = str try: # Python 3 import builtins except ImportError: # Python 2 import __builtin__ as builtins import operator as _operator import sys def as_dict(config): """Return a dict from 'config' whether it is a dict, file, or filename.""" if isinstance(config, basestring): config = Parser().dict_from_file(config) elif hasattr(config, 'read'): config = Parser().dict_from_file(config) return config class NamespaceSet(dict): """A dict of config namespace names and handlers. Each config entry should begin with a namespace name; the corresponding namespace handler will be called once for each config entry in that namespace, and will be passed two arguments: the config key (with the namespace removed) and the config value. Namespace handlers may be any Python callable; they may also be Python 2.5-style 'context managers', in which case their __enter__ method should return a callable to be used as the handler. See cherrypy.tools (the Toolbox class) for an example. """ def __call__(self, config): """Iterate through config and pass it to each namespace handler. config A flat dict, where keys use dots to separate namespaces, and values are arbitrary. The first name in each config key is used to look up the corresponding namespace handler. For example, a config entry of {'tools.gzip.on': v} will call the 'tools' namespace handler with the args: ('gzip.on', v) """ # Separate the given config into namespaces ns_confs = {} for k in config: if "." in k: ns, name = k.split(".", 1) bucket = ns_confs.setdefault(ns, {}) bucket[name] = config[k] # I chose __enter__ and __exit__ so someday this could be # rewritten using Python 2.5's 'with' statement: # for ns, handler in self.iteritems(): # with handler as callable: # for k, v in ns_confs.get(ns, {}).iteritems(): # callable(k, v) for ns, handler in self.items(): exit = getattr(handler, "__exit__", None) if exit: callable = handler.__enter__() no_exc = True try: try: for k, v in ns_confs.get(ns, {}).items(): callable(k, v) except: # The exceptional case is handled here no_exc = False if exit is None: raise if not exit(*sys.exc_info()): raise # The exception is swallowed if exit() returns true finally: # The normal and non-local-goto cases are handled here if no_exc and exit: exit(None, None, None) else: for k, v in ns_confs.get(ns, {}).items(): handler(k, v) def __repr__(self): return "%s.%s(%s)" % (self.__module__, self.__class__.__name__, dict.__repr__(self)) def __copy__(self): newobj = self.__class__() newobj.update(self) return newobj copy = __copy__ class Config(dict): """A dict-like set of configuration data, with defaults and namespaces. May take a file, filename, or dict. """ defaults = {} environments = {} namespaces = NamespaceSet() def __init__(self, file=None, **kwargs): self.reset() if file is not None: self.update(file) if kwargs: self.update(kwargs) def reset(self): """Reset self to default values.""" self.clear() dict.update(self, self.defaults) def update(self, config): """Update self from a dict, file or filename.""" if isinstance(config, basestring): # Filename config = Parser().dict_from_file(config) elif hasattr(config, 'read'): # Open file object config = Parser().dict_from_file(config) else: config = config.copy() self._apply(config) def _apply(self, config): """Update self from a dict.""" which_env = config.get('environment') if which_env: env = self.environments[which_env] for k in env: if k not in config: config[k] = env[k] dict.update(self, config) self.namespaces(config) def __setitem__(self, k, v): dict.__setitem__(self, k, v) self.namespaces({k: v}) class Parser(ConfigParser): """Sub-class of ConfigParser that keeps the case of options and that raises an exception if the file cannot be read. """ def optionxform(self, optionstr): return optionstr def read(self, filenames): if isinstance(filenames, basestring): filenames = [filenames] for filename in filenames: # try: # fp = open(filename) # except IOError: # continue fp = open(filename) try: self._read(fp, filename) finally: fp.close() def as_dict(self, raw=False, vars=None): """Convert an INI file to a dictionary""" # Load INI file into a dict result = {} for section in self.sections(): if section not in result: result[section] = {} for option in self.options(section): value = self.get(section, option, raw=raw, vars=vars) try: value = unrepr(value) except Exception: x = sys.exc_info()[1] msg = ("Config error in section: %r, option: %r, " "value: %r. Config values must be valid Python." % (section, option, value)) raise ValueError(msg, x.__class__.__name__, x.args) result[section][option] = value return result def dict_from_file(self, file): if hasattr(file, 'read'): self.readfp(file) else: self.read(file) return self.as_dict() # public domain "unrepr" implementation, found on the web and then improved. class _Builder2: def build(self, o): m = getattr(self, 'build_' + o.__class__.__name__, None) if m is None: raise TypeError("unrepr does not recognize %s" % repr(o.__class__.__name__)) return m(o) def astnode(self, s): """Return a Python2 ast Node compiled from a string.""" try: import compiler except ImportError: # Fallback to eval when compiler package is not available, # e.g. IronPython 1.0. return eval(s) p = compiler.parse("__tempvalue__ = " + s) return p.getChildren()[1].getChildren()[0].getChildren()[1] def build_Subscript(self, o): expr, flags, subs = o.getChildren() expr = self.build(expr) subs = self.build(subs) return expr[subs] def build_CallFunc(self, o): children = map(self.build, o.getChildren()) callee = children.pop(0) kwargs = children.pop() or {} starargs = children.pop() or () args = tuple(children) + tuple(starargs) return callee(*args, **kwargs) def build_List(self, o): return map(self.build, o.getChildren()) def build_Const(self, o): return o.value def build_Dict(self, o): d = {} i = iter(map(self.build, o.getChildren())) for el in i: d[el] = i.next() return d def build_Tuple(self, o): return tuple(self.build_List(o)) def build_Name(self, o): name = o.name if name == 'None': return None if name == 'True': return True if name == 'False': return False # See if the Name is a package or module. If it is, import it. try: return modules(name) except ImportError: pass # See if the Name is in builtins. try: return getattr(builtins, name) except AttributeError: pass raise TypeError("unrepr could not resolve the name %s" % repr(name)) def build_Add(self, o): left, right = map(self.build, o.getChildren()) return left + right def build_Mul(self, o): left, right = map(self.build, o.getChildren()) return left * right def build_Getattr(self, o): parent = self.build(o.expr) return getattr(parent, o.attrname) def build_NoneType(self, o): return None def build_UnarySub(self, o): return -self.build(o.getChildren()[0]) def build_UnaryAdd(self, o): return self.build(o.getChildren()[0]) class _Builder3: def build(self, o): m = getattr(self, 'build_' + o.__class__.__name__, None) if m is None: raise TypeError("unrepr does not recognize %s" % repr(o.__class__.__name__)) return m(o) def astnode(self, s): """Return a Python3 ast Node compiled from a string.""" try: import ast except ImportError: # Fallback to eval when ast package is not available, # e.g. IronPython 1.0. return eval(s) p = ast.parse("__tempvalue__ = " + s) return p.body[0].value def build_Subscript(self, o): return self.build(o.value)[self.build(o.slice)] def build_Index(self, o): return self.build(o.value) def build_Call(self, o): callee = self.build(o.func) if o.args is None: args = () else: args = tuple([self.build(a) for a in o.args]) if o.starargs is None: starargs = () else: starargs = self.build(o.starargs) if o.kwargs is None: kwargs = {} else: kwargs = self.build(o.kwargs) return callee(*(args + starargs), **kwargs) def build_List(self, o): return list(map(self.build, o.elts)) def build_Str(self, o): return o.s def build_Num(self, o): return o.n def build_Dict(self, o): return dict([(self.build(k), self.build(v)) for k, v in zip(o.keys, o.values)]) def build_Tuple(self, o): return tuple(self.build_List(o)) def build_Name(self, o): name = o.id if name == 'None': return None if name == 'True': return True if name == 'False': return False # See if the Name is a package or module. If it is, import it. try: return modules(name) except ImportError: pass # See if the Name is in builtins. try: import builtins return getattr(builtins, name) except AttributeError: pass raise TypeError("unrepr could not resolve the name %s" % repr(name)) def build_UnaryOp(self, o): op, operand = map(self.build, [o.op, o.operand]) return op(operand) def build_BinOp(self, o): left, op, right = map(self.build, [o.left, o.op, o.right]) return op(left, right) def build_Add(self, o): return _operator.add def build_Mult(self, o): return _operator.mul def build_USub(self, o): return _operator.neg def build_Attribute(self, o): parent = self.build(o.value) return getattr(parent, o.attr) def build_NoneType(self, o): return None def unrepr(s): """Return a Python object compiled from a string.""" if not s: return s if sys.version_info < (3, 0): b = _Builder2() else: b = _Builder3() obj = b.astnode(s) return b.build(obj) def modules(modulePath): """Load a module and retrieve a reference to that module.""" try: mod = sys.modules[modulePath] if mod is None: raise KeyError() except KeyError: __import__(modulePath) mod = sys.modules[modulePath] return mod def attributes(full_attribute_name): """Load a module and retrieve an attribute of that module.""" # Parse out the path, module, and attribute last_dot = full_attribute_name.rfind(".") attr_name = full_attribute_name[last_dot + 1:] mod_path = full_attribute_name[:last_dot] mod = modules(mod_path) # Let an AttributeError propagate outward. try: attr = getattr(mod, attr_name) except AttributeError: raise AttributeError("'%s' object has no attribute '%s'" % (mod_path, attr_name)) # Return a reference to the attribute. return attr
bsd-3-clause
FescueFungiShare/hydroshare
hs_core/tests/api/native/test_get_citation.py
2
5860
from unittest import TestCase from datetime import date from hs_core.hydroshare import resource from django.contrib.auth.models import Group, User from hs_core.models import GenericResource, Creator from hs_core import hydroshare from hs_core.testing import MockIRODSTestCaseMixin class TestGetCitation(MockIRODSTestCaseMixin, TestCase): def setUp(self): super(TestGetCitation, self).setUp() self.group, _ = Group.objects.get_or_create(name='Hydroshare Author') self.user = hydroshare.create_account( 'user1@nowhere.com', username='user1', first_name='Creator_FirstName', last_name='Creator_LastName', superuser=False, groups=[] ) self.res = hydroshare.create_resource( resource_type='GenericResource', owner=self.user, title='Generic resource', keywords=['kw1', 'kw2'] ) def tearDown(self): super(TestGetCitation, self).tearDown() User.objects.all().delete() Group.objects.all().delete() GenericResource.objects.all().delete() Creator.objects.all().delete() def test_one_author(self): citation = self.res.get_citation() hs_identifier = self.res.metadata.identifiers.all().filter(name="hydroShareIdentifier")[0] hs_url = hs_identifier.url hs_date = str(date.today().year) correct_citation = 'Creator_LastName, C. ({}). Generic resource, HydroShare, {}'\ .format(hs_date, hs_url) self.assertEqual(citation, correct_citation) def test_two_authors_no_comma(self): # add a creator element resource.create_metadata_element(self.res.short_id, 'creator', name='John Smith') citation = self.res.get_citation() hs_identifier = self.res.metadata.identifiers.all().filter(name="hydroShareIdentifier")[0] hs_url = hs_identifier.url hs_date = str(date.today().year) correct_citation = 'Creator_LastName, C., ' \ 'J. Smith ({}). Generic resource, HydroShare, {}'.format(hs_date, hs_url) self.assertEqual(citation, correct_citation) def test_two_authors_comma(self): # add a creator element resource.create_metadata_element(self.res.short_id, 'creator', name='Smith, John') citation = self.res.get_citation() hs_identifier = self.res.metadata.identifiers.all().filter(name="hydroShareIdentifier")[0] hs_url = hs_identifier.url hs_date = str(date.today().year) correct_citation = 'Creator_LastName, C., ' \ 'J. Smith ({}). Generic resource, HydroShare, {}'.format(hs_date, hs_url) self.assertEqual(citation, correct_citation) def test_two_authors_multiple_first_and_last_names_comma(self): # add a creator element resource.create_metadata_element(self.res.short_id, 'creator', name='Smith William, John Mason Jingle') citation = self.res.get_citation() hs_identifier = self.res.metadata.identifiers.all().filter(name="hydroShareIdentifier")[0] hs_url = hs_identifier.url hs_date = str(date.today().year) correct_citation = 'Creator_LastName, C., ' \ 'J. M. J. Smith William ' \ '({}). Generic resource, HydroShare, {}'.format(hs_date, hs_url) self.assertEqual(citation, correct_citation) def test_two_authors_multiple_first_and_last_names_no_comma(self): # add a creator element resource.create_metadata_element(self.res.short_id, 'creator', name='John Mason Jingle Smith William') citation = self.res.get_citation() hs_identifier = self.res.metadata.identifiers.all().filter(name="hydroShareIdentifier")[0] hs_url = hs_identifier.url hs_date = str(date.today().year) correct_citation = 'Creator_LastName, C., ' \ 'J. M. J. S. William ' \ '({}). Generic resource, HydroShare, {}'.format(hs_date, hs_url) self.assertEqual(citation, correct_citation) def test_two_authors_and_organization(self): # add a creator element resource.create_metadata_element(self.res.short_id, 'creator', name='Smith William, John Mason Jingle') resource.create_metadata_element(self.res.short_id, 'creator', organization='U.S. Geological Survey') citation = self.res.get_citation() hs_identifier = self.res.metadata.identifiers.all().filter(name="hydroShareIdentifier")[0] hs_url = hs_identifier.url hs_date = str(date.today().year) correct_citation = 'Creator_LastName, C., ' \ 'J. M. J. Smith William, ' \ 'U.S. Geological Survey ' \ '({}). Generic resource, HydroShare, {}'.format(hs_date, hs_url) self.assertEqual(citation, correct_citation) def test_parse_citation_name(self): name = "John Morley Smith" parsed_name = self.res.parse_citation_name(name, first_author=True) self.assertEqual(parsed_name, 'Smith, J. M., ') name = "John Morley Smith" parsed_name = self.res.parse_citation_name(name) self.assertEqual(parsed_name, 'J. M. Smith, ') name = "Smith Tanner, John Morley" parsed_name = self.res.parse_citation_name(name, first_author=True) self.assertEqual(parsed_name, 'Smith Tanner, J. M., ') name = "Smith Tanner, John Morley" parsed_name = self.res.parse_citation_name(name) self.assertEqual(parsed_name, 'J. M. Smith Tanner, ')
bsd-3-clause
spbguru/repo1
nupic/encoders/geospatial_coordinate.py
3
4081
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2014, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import math import numpy from pyproj import Proj from nupic.encoders.coordinate import CoordinateEncoder PROJ = Proj(init="epsg:3785") # Spherical Mercator # From http://spatialreference.org/ref/epsg/popular-visualisation-crs-mercator/ PROJ_RANGE=(20037508.3428, 19971868.8804) # in meters class GeospatialCoordinateEncoder(CoordinateEncoder): """ Given a GPS coordinate and a speed reading, the Geospatial Coordinate Encoder returns an SDR representation of that position. """ def __init__(self, scale, timestep, w=21, n=1000, name=None, verbosity=0): """ See `nupic.encoders.base.Encoder` for more information. @param scale (int) Scale of the map, as measured by distance between two coordinates (in meters per dimensional unit) @param timestep (int) Time between readings (in seconds) """ super(GeospatialCoordinateEncoder, self).__init__(w=w, n=n, name=name, verbosity=verbosity) self.scale = scale self.timestep = timestep def getDescription(self): """See `nupic.encoders.base.Encoder` for more information.""" return [('longitude', 0), ('latitude', 1), ('speed', 2)] def encodeIntoArray(self, inputData, output): """ See `nupic.encoders.base.Encoder` for more information. @param inputData (tuple) Contains longitude (float), latitude (float), speed (float) @param output (numpy.array) Stores encoded SDR in this numpy array """ (longitude, latitude, speed) = inputData coordinate = self.coordinateForPosition(longitude, latitude) radius = self.radiusForSpeed(speed) super(GeospatialCoordinateEncoder, self).encodeIntoArray( (coordinate, radius), output) def coordinateForPosition(self, longitude, latitude): """ Returns coordinate for given GPS position. @param longitude (float) Longitude of position @param latitude (float) Latitude of position @return (numpy.array) Coordinate that the given GPS position maps to """ coordinate = numpy.array(PROJ(longitude, latitude)) coordinate = coordinate / self.scale return coordinate.astype(int) def radiusForSpeed(self, speed): """ Returns radius for given speed. Tries to get the encodings of consecutive readings to be adjacent with some overlap. @param speed (float) Speed (in meters per second) @return (int) Radius for given speed """ overlap = 1.5 coordinatesPerTimestep = speed * self.timestep / self.scale radius = int(round(float(coordinatesPerTimestep) / 2 * overlap)) minRadius = int(math.ceil((math.sqrt(self.w) - 1) / 2)) return max(radius, minRadius) def dump(self): print "GeospatialCoordinateEncoder:" print " w: %d" % self.w print " n: %d" % self.n
gpl-3.0
lucidmotifs/newtopia
newtopia/ntgame/models/effect.py
1
3504
# python modules from enum import Enum # django modules from django.db import models # nt modules from .province import Province from .infrastructure import Building # meta from ntmeta.models import Entity class Effect(models.Model): """ The core component of province change """ """ e.g. Peasant Growth - would signify that applying this effect, with a given magnitude would impact how fast peasants grow per turn.""" name = models.CharField(max_length=40, unique=False) """ The entity that generated the effect """ entity = models.ForeignKey(Entity, on_delete=models.CASCADE, null=False, blank=False) """ Code used to identify the effect, like a key. HASH? """ tag = models.CharField(max_length=40, unique=True) def __str__(self): return self.name class Instance(models.Model): """ An instance of an effect that can be applied to a building or spell. """ class EffectType(Enum): DELAYED = 1 IMMEDIATE = 2 OVER_TIME = 3 NEXT_TURN = 4 """ The related effect """ effect = models.ForeignKey(Effect, on_delete=models.CASCADE, null=False, blank=False) """ Determines the type of application produced """ effect_type = models.IntegerField( choices=EffectType.__members__.items(), default=EffectType.IMMEDIATE) """ How long effect persists. Ignore when `effect_type` is immediate and determines when the delayed effect pops when `effect_type` is DELAYED. Measured in ntdays """ duration = models.IntegerField(default=1) """ Size of the effect. Set to 100 if using raw value. """ magnitude = models.FloatField(default=0.0) """ Raw value increase/decrease will be converted to a percentage if used with a subentity, such as a growth rate. When Provided, magnitude will only be applied to the raw_value. Exception: can be used as minimum value if base_is_min == True """ base_value = models.IntegerField(default=None) """ When True, magnitude works as usual, and base_value is only applied if the resulting Application value would be less than the base_value """ base_is_min = models.BooleanField(default=False) """ Denotes negative or positive version of effect """ is_negative = models.BooleanField(default=False) def apply(self, province): app = Application() app.instance = self app.province = province def __str__(self): return "{} with mag. {}".format(self.effect.name, self.magnitude) EffectType = Instance.EffectType class Application(models.Model): """ Used to apply effects to provinces """ instance = models.ForeignKey(Instance, on_delete=models.CASCADE, null=True, blank=True) applied_to = models.ForeignKey( Province, on_delete=models.CASCADE, null=False, blank=False, related_name='to') applied_by = models.ForeignKey( Province, on_delete=models.CASCADE, null=False, blank=False, related_name='by') """ Type of effect; alters how the effect is applied. """ # Round the effect was applied (ntdate) applied_on = models.IntegerField() # Round the effect expires (ntdate) (NULL permanent, immediate) expires_at = models.IntegerField(default=None) # Round the effect is applied (ntdate) # (NULL immediate, 0 every tick till expires) applies_at = models.IntegerField(default=None)
gpl-3.0
nurmd2/nurmd
openerp/addons/base/module/wizard/base_export_language.py
43
2692
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import base64 import contextlib import cStringIO from openerp import tools from openerp.osv import fields,osv from openerp.tools.translate import _ from openerp.tools.misc import get_iso_codes NEW_LANG_KEY = '__new__' class base_language_export(osv.osv_memory): _name = "base.language.export" def _get_languages(self, cr, uid, context): lang_obj = self.pool.get('res.lang') ids = lang_obj.search(cr, uid, [('translatable', '=', True)]) langs = lang_obj.browse(cr, uid, ids) return [(NEW_LANG_KEY, _('New Language (Empty translation template)'))] + [(lang.code, lang.name) for lang in langs] _columns = { 'name': fields.char('File Name', readonly=True), 'lang': fields.selection(_get_languages, 'Language', required=True), 'format': fields.selection([('csv','CSV File'), ('po','PO File'), ('tgz', 'TGZ Archive')], 'File Format', required=True), 'modules': fields.many2many('ir.module.module', 'rel_modules_langexport', 'wiz_id', 'module_id', 'Apps To Export', domain=[('state','=','installed')]), 'data': fields.binary('File', readonly=True), 'state': fields.selection([('choose', 'choose'), # choose language ('get', 'get')]) # get the file } _defaults = { 'state': 'choose', 'lang': NEW_LANG_KEY, 'format': 'csv', } def act_getfile(self, cr, uid, ids, context=None): this = self.browse(cr, uid, ids, context=context)[0] lang = this.lang if this.lang != NEW_LANG_KEY else False mods = sorted(map(lambda m: m.name, this.modules)) or ['all'] with contextlib.closing(cStringIO.StringIO()) as buf: tools.trans_export(lang, mods, buf, this.format, cr) out = base64.encodestring(buf.getvalue()) filename = 'new' if lang: filename = get_iso_codes(lang) elif len(mods) == 1: filename = mods[0] extension = this.format if not lang and extension == 'po': extension = 'pot' name = "%s.%s" % (filename, extension) this.write({ 'state': 'get', 'data': out, 'name': name }) return { 'type': 'ir.actions.act_window', 'res_model': 'base.language.export', 'view_mode': 'form', 'view_type': 'form', 'res_id': this.id, 'views': [(False, 'form')], 'target': 'new', }
gpl-3.0
ogenstad/ansible
lib/ansible/modules/cloud/amazon/ec2_vol.py
33
20090
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: ec2_vol short_description: create and attach a volume, return volume id and device map description: - creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto. version_added: "1.1" options: instance: description: - instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach. name: description: - volume Name tag if you wish to attach an existing volume (requires instance) version_added: "1.6" id: description: - volume id if you wish to attach an existing volume (requires instance) or remove an existing volume version_added: "1.6" volume_size: description: - size of volume (in GB) to create. volume_type: description: - Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS), st1 (Throughput Optimized HDD), sc1 (Cold HDD). "Standard" is the old EBS default and continues to remain the Ansible default for backwards compatibility. default: standard version_added: "1.9" iops: description: - the provisioned IOPs you want to associate with this volume (integer). default: 100 version_added: "1.3" encrypted: description: - Enable encryption at rest for this volume. default: 'no' version_added: "1.8" kms_key_id: description: - Specify the id of the KMS key to use. version_added: "2.3" device_name: description: - device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows. delete_on_termination: description: - When set to "yes", the volume will be deleted upon instance termination. type: bool default: 'no' version_added: "2.1" zone: description: - zone in which to create the volume, if unset uses the zone the instance is in (if set) aliases: ['aws_zone', 'ec2_zone'] snapshot: description: - snapshot ID on which to base the volume version_added: "1.5" validate_certs: description: - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. type: bool default: 'yes' version_added: "1.5" state: description: - whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8). default: present choices: ['absent', 'present', 'list'] version_added: "1.6" tags: description: - tag:value pairs to add to the volume after creation default: {} version_added: "2.3" author: "Lester Wade (@lwade)" extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Simple attachment action - ec2_vol: instance: XXXXXX volume_size: 5 device_name: sdd # Example using custom iops params - ec2_vol: instance: XXXXXX volume_size: 5 iops: 100 device_name: sdd # Example using snapshot id - ec2_vol: instance: XXXXXX snapshot: "{{ snapshot }}" # Playbook example combined with instance launch - ec2: keypair: "{{ keypair }}" image: "{{ image }}" wait: yes count: 3 register: ec2 - ec2_vol: instance: "{{ item.id }}" volume_size: 5 with_items: "{{ ec2.instances }}" register: ec2_vol # Example: Launch an instance and then add a volume if not already attached # * Volume will be created with the given name if not already created. # * Nothing will happen if the volume is already attached. # * Requires Ansible 2.0 - ec2: keypair: "{{ keypair }}" image: "{{ image }}" zone: YYYYYY id: my_instance wait: yes count: 1 register: ec2 - ec2_vol: instance: "{{ item.id }}" name: my_existing_volume_Name_tag device_name: /dev/xvdf with_items: "{{ ec2.instances }}" register: ec2_vol # Remove a volume - ec2_vol: id: vol-XXXXXXXX state: absent # Detach a volume (since 1.9) - ec2_vol: id: vol-XXXXXXXX instance: None # List volumes for an instance - ec2_vol: instance: i-XXXXXX state: list # Create new volume using SSD storage - ec2_vol: instance: XXXXXX volume_size: 50 volume_type: gp2 device_name: /dev/xvdf # Attach an existing volume to instance. The volume will be deleted upon instance termination. - ec2_vol: instance: XXXXXX id: XXXXXX device_name: /dev/sdf delete_on_termination: yes ''' RETURN = ''' device: description: device name of attached volume returned: when success type: string sample: "/def/sdf" volume_id: description: the id of volume returned: when success type: string sample: "vol-35b333d9" volume_type: description: the volume type returned: when success type: string sample: "standard" volume: description: a dictionary containing detailed attributes of the volume returned: when success type: string sample: { "attachment_set": { "attach_time": "2015-10-23T00:22:29.000Z", "deleteOnTermination": "false", "device": "/dev/sdf", "instance_id": "i-8356263c", "status": "attached" }, "create_time": "2015-10-21T14:36:08.870Z", "encrypted": false, "id": "vol-35b333d9", "iops": null, "size": 1, "snapshot_id": "", "status": "in-use", "tags": { "env": "dev" }, "type": "standard", "zone": "us-east-1b" } ''' import time from distutils.version import LooseVersion try: import boto import boto.ec2 import boto.exception from boto.exception import BotoServerError from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping except ImportError: pass # Taken care of by ec2.HAS_BOTO from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import (HAS_BOTO, AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info) def get_volume(module, ec2): name = module.params.get('name') id = module.params.get('id') zone = module.params.get('zone') filters = {} volume_ids = None # If no name or id supplied, just try volume creation based on module parameters if id is None and name is None: return None if zone: filters['availability_zone'] = zone if name: filters = {'tag:Name': name} if id: volume_ids = [id] try: vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters) except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) if not vols: if id: msg = "Could not find the volume with id: %s" % id if name: msg += (" and name: %s" % name) module.fail_json(msg=msg) else: return None if len(vols) > 1: module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name) return vols[0] def get_volumes(module, ec2): instance = module.params.get('instance') try: if not instance: vols = ec2.get_all_volumes() else: vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance}) except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) return vols def delete_volume(module, ec2): volume_id = module.params['id'] try: ec2.delete_volume(volume_id) module.exit_json(changed=True) except boto.exception.EC2ResponseError as ec2_error: if ec2_error.code == 'InvalidVolume.NotFound': module.exit_json(changed=False) module.fail_json(msg=ec2_error.message) def boto_supports_volume_encryption(): """ Check if Boto library supports encryption of EBS volumes (added in 2.29.0) Returns: True if boto library has the named param as an argument on the request_spot_instances method, else False """ return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0') def boto_supports_kms_key_id(): """ Check if Boto library supports kms_key_ids (added in 2.39.0) Returns: True if version is equal to or higher then the version needed, else False """ return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.39.0') def create_volume(module, ec2, zone): changed = False name = module.params.get('name') iops = module.params.get('iops') encrypted = module.params.get('encrypted') kms_key_id = module.params.get('kms_key_id') volume_size = module.params.get('volume_size') volume_type = module.params.get('volume_type') snapshot = module.params.get('snapshot') tags = module.params.get('tags') # If custom iops is defined we use volume_type "io1" rather than the default of "standard" if iops: volume_type = 'io1' volume = get_volume(module, ec2) if volume is None: try: if boto_supports_volume_encryption(): if kms_key_id is not None: volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted, kms_key_id) else: volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted) changed = True else: volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops) changed = True while volume.status != 'available': time.sleep(3) volume.update() if name: tags["Name"] = name if tags: ec2.create_tags([volume.id], tags) except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) return volume, changed def attach_volume(module, ec2, volume, instance): device_name = module.params.get('device_name') delete_on_termination = module.params.get('delete_on_termination') changed = False # If device_name isn't set, make a choice based on best practices here: # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html # In future this needs to be more dynamic but combining block device mapping best practices # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;) # Use password data attribute to tell whether the instance is Windows or Linux if device_name is None: try: if not ec2.get_password_data(instance.id): device_name = '/dev/sdf' else: device_name = '/dev/xvdf' except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) if volume.attachment_state() is not None: adata = volume.attach_data if adata.instance_id != instance.id: module.fail_json(msg="Volume %s is already attached to another instance: %s" % (volume.id, adata.instance_id)) else: # Volume is already attached to right instance changed = modify_dot_attribute(module, ec2, instance, device_name) else: try: volume.attach(instance.id, device_name) while volume.attachment_state() != 'attached': time.sleep(3) volume.update() changed = True except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) modify_dot_attribute(module, ec2, instance, device_name) return volume, changed def modify_dot_attribute(module, ec2, instance, device_name): """ Modify delete_on_termination attribute """ delete_on_termination = module.params.get('delete_on_termination') changed = False try: instance.update() dot = instance.block_device_mapping[device_name].delete_on_termination except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) if delete_on_termination != dot: try: bdt = BlockDeviceType(delete_on_termination=delete_on_termination) bdm = BlockDeviceMapping() bdm[device_name] = bdt ec2.modify_instance_attribute(instance_id=instance.id, attribute='blockDeviceMapping', value=bdm) while instance.block_device_mapping[device_name].delete_on_termination != delete_on_termination: time.sleep(3) instance.update() changed = True except boto.exception.BotoServerError as e: module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) return changed def detach_volume(module, ec2, volume): changed = False if volume.attachment_state() is not None: adata = volume.attach_data volume.detach() while volume.attachment_state() is not None: time.sleep(3) volume.update() changed = True return volume, changed def get_volume_info(volume, state): # If we're just listing volumes then do nothing, else get the latest update for the volume if state != 'list': volume.update() volume_info = {} attachment = volume.attach_data volume_info = { 'create_time': volume.create_time, 'encrypted': volume.encrypted, 'id': volume.id, 'iops': volume.iops, 'size': volume.size, 'snapshot_id': volume.snapshot_id, 'status': volume.status, 'type': volume.type, 'zone': volume.zone, 'attachment_set': { 'attach_time': attachment.attach_time, 'device': attachment.device, 'instance_id': attachment.instance_id, 'status': attachment.status }, 'tags': volume.tags } if hasattr(attachment, 'deleteOnTermination'): volume_info['attachment_set']['deleteOnTermination'] = attachment.deleteOnTermination return volume_info def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( instance=dict(), id=dict(), name=dict(), volume_size=dict(), volume_type=dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'), iops=dict(), encrypted=dict(type='bool', default=False), kms_key_id=dict(), device_name=dict(), delete_on_termination=dict(type='bool', default=False), zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), snapshot=dict(), state=dict(choices=['absent', 'present', 'list'], default='present'), tags=dict(type='dict', default={}) ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') id = module.params.get('id') name = module.params.get('name') instance = module.params.get('instance') volume_size = module.params.get('volume_size') encrypted = module.params.get('encrypted') kms_key_id = module.params.get('kms_key_id') device_name = module.params.get('device_name') zone = module.params.get('zone') snapshot = module.params.get('snapshot') state = module.params.get('state') tags = module.params.get('tags') # Ensure we have the zone or can get the zone if instance is None and zone is None and state == 'present': module.fail_json(msg="You must specify either instance or zone") # Set volume detach flag if instance == 'None' or instance == '': instance = None detach_vol_flag = True else: detach_vol_flag = False # Set changed flag changed = False region, ec2_url, aws_connect_params = get_aws_connection_info(module) if region: try: ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") if state == 'list': returned_volumes = [] vols = get_volumes(module, ec2) for v in vols: attachment = v.attach_data returned_volumes.append(get_volume_info(v, state)) module.exit_json(changed=False, volumes=returned_volumes) if encrypted and not boto_supports_volume_encryption(): module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes") if kms_key_id is not None and not boto_supports_kms_key_id(): module.fail_json(msg="You must use boto >= v2.39.0 to use kms_key_id") # Here we need to get the zone info for the instance. This covers situation where # instance is specified but zone isn't. # Useful for playbooks chaining instance launch with volume create + attach and where the # zone doesn't matter to the user. inst = None if instance: try: reservation = ec2.get_all_instances(instance_ids=instance) except BotoServerError as e: module.fail_json(msg=e.message) inst = reservation[0].instances[0] zone = inst.placement # Check if there is a volume already mounted there. if device_name: if device_name in inst.block_device_mapping: module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance), volume_id=inst.block_device_mapping[device_name].volume_id, device=device_name, changed=False) # Delaying the checks until after the instance check allows us to get volume ids for existing volumes # without needing to pass an unused volume_size if not volume_size and not (id or name or snapshot): module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot") if volume_size and id: module.fail_json(msg="Cannot specify volume_size together with id") if state == 'present': volume, changed = create_volume(module, ec2, zone) if detach_vol_flag: volume, changed = detach_volume(module, ec2, volume) elif inst is not None: volume, changed = attach_volume(module, ec2, volume, inst) # Add device, volume_id and volume_type parameters separately to maintain backward compatibility volume_info = get_volume_info(volume, state) # deleteOnTermination is not correctly reflected on attachment if module.params.get('delete_on_termination'): for attempt in range(0, 8): if volume_info['attachment_set'].get('deleteOnTermination') == 'true': break time.sleep(5) volume = ec2.get_all_volumes(volume_ids=volume.id)[0] volume_info = get_volume_info(volume, state) module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'], volume_id=volume_info['id'], volume_type=volume_info['type']) elif state == 'absent': delete_volume(module, ec2) if __name__ == '__main__': main()
gpl-3.0
Knewton/lentil
lentil/viztools.py
2
9752
""" Module for visualizing skill embeddings @author Siddharth Reddy <sgr45@cornell.edu> """ import logging import matplotlib from matplotlib import pyplot as plt import numpy as np from . import models _logger = logging.getLogger(__name__) def plot_embedding( model, timestep=-1, show_students=True, show_assessments=True, show_lessons=None, show_prereqs=None, show_concepts=None, show_student_ids=False, show_assessment_ids=False, show_lesson_ids=False, show_concept_ids=False, id_padding_x=0.01, id_padding_y=0.01, alpha=0.5, size=20, title='', show_legend=True, force_invariant_axis_limits=True, axis_limit_padding=0.1, show_pass_rates=False, x_axis_limits=None, y_axis_limits=None): """ Plot students, assessments, lessons, and prereqs in a two-dimensional skill embedding Students, assessments, prereqs = points Lessons = vectors See nb/toy_examples.ipynb for example invocations :param EmbeddingModel model: A skill embedding model :param int timestep: A timestep. By default, timestep=-1 => latest snapshot :param float id_padding_x: Padding between object and id along x-axis :param float id_padding_y: Padding between object and id along y-axis :param float alpha: Alpha level for scatterplot points' color :param int size: Size of scatterplot points :param str|None title: Title of plot :param bool show_legend: True => show legend in upper left corner False => do not show legend :param bool force_invariant_axis_limits: True => plot will have same axes limits regardless of timestep, False => plot may have different axes limits depending on timestep :param float axis_limit_padding: Padding for axis limits (to prevent points from being stuck at the edges of the plot) :param bool show_pass_rates: True => color assessments by pass rate, False => don't color assessments :param list[int,int]|None x_axis_limits: [x_min, x_max] :param list[int,int]|None y_axis_limits: [y_min, y_max] """ if model.embedding_dimension != 2: raise ValueError('Invalid embedding dimension!') if timestep<-1 or timestep>=model.history.duration(): raise ValueError('Invalid timestep!') if size<=0: raise ValueError('Invalid scatterplot point size!') if axis_limit_padding<0: raise ValueError('Invalid axis limit padding!') if show_lessons is None: show_lessons = model.using_lessons if show_prereqs is None: show_prereqs = model.using_prereqs if show_lessons and not model.using_lessons: raise ValueError( 'Cannot show lessons because model does not use lessons!') if show_prereqs and not model.using_prereqs: raise ValueError( 'Cannot show prereqs because model does not use prereqs!') if show_concepts and not model.using_graph_prior: raise ValueError( 'Cannot show concepts because model does not use a graph prior!') if show_student_ids and not show_students: raise ValueError('Cannot show student_ids without students!') if show_assessment_ids and not show_assessments: raise ValueError('Cannot show assessment_ids without assessments!') if show_lesson_ids and not show_lessons and not show_prereqs: raise ValueError('Cannot show lesson_ids without lessons and/or prereqs!') if show_pass_rates and not show_assessments: raise ValueError('Cannot show pass rates without assessments!') if show_concept_ids and not show_concepts: raise ValueError('Cannot show concept_ids without concepts!') if show_pass_rates and model.history.num_students() > 1: _logger.warning('Showing pass rates for more than one student!') _, ax = plt.subplots() if show_students: student_embeddings_x = model.student_embeddings[:, 0, timestep] student_embeddings_y = model.student_embeddings[:, 1, timestep] ax.scatter( student_embeddings_x, student_embeddings_y, alpha=alpha, marker='o', s=size, label='student') if show_student_ids: for student_id in model.history.iter_students(): student_idx = model.history.idx_of_student_id(student_id) student_x = student_embeddings_x[student_idx] student_y = student_embeddings_y[student_idx] student_id_x = student_x + id_padding_x student_id_y = student_y + id_padding_y ax.annotate(student_id, xy=( student_x, student_y), xytext=( student_id_x, student_id_y)) if show_assessments: assessment_embeddings_x = model.assessment_embeddings[:, 0] assessment_embeddings_y = model.assessment_embeddings[:, 1] if show_pass_rates: num_assessments = model.history.num_assessments() pass_rates = [model.history.assessment_pass_rate( model.history.id_of_assessment_idx( i), timestep if timestep!=-1 else None) for i in xrange( num_assessments)] ax.scatter( assessment_embeddings_x, assessment_embeddings_y, c=pass_rates, alpha=alpha, marker='s', s=size, label='assessment', cmap=matplotlib.cm.cool) else: ax.scatter( assessment_embeddings_x, assessment_embeddings_y, alpha=alpha, marker='s', s=size, label='assessment') if show_assessment_ids: for assessment_id in model.history.iter_assessments(): assessment_idx = model.history.idx_of_assessment_id(assessment_id) assessment_x = assessment_embeddings_x[assessment_idx] assessment_y = assessment_embeddings_y[assessment_idx] assessment_id_x = assessment_x + id_padding_x assessment_id_y = assessment_y + id_padding_y ax.annotate(assessment_id, xy=( assessment_x, assessment_y), xytext=( assessment_id_x, assessment_id_y)) if show_concepts: concept_embeddings_x = model.concept_embeddings[:, 0] concept_embeddings_y = model.concept_embeddings[:, 1] ax.scatter( concept_embeddings_x, concept_embeddings_y, alpha=alpha, marker='^', s=size, label='concept') if show_concept_ids: for concept_id, concept_idx in model.graph.idx_of_concept_id.iteritems(): concept_x = concept_embeddings_x[concept_idx] concept_y = concept_embeddings_y[concept_idx] concept_id_x = concept_x + id_padding_x concept_id_y = concept_y + id_padding_y ax.annotate(concept_id, xy=( concept_x, concept_y), xytext=( concept_id_x, concept_id_y)) if show_lessons: if model.using_prereqs and show_prereqs: prereq_embeddings_x = model.prereq_embeddings[:, 0] prereq_embeddings_y = model.prereq_embeddings[:, 1] else: prereq_embeddings_x = prereq_embeddings_y = [0] * ( model.history.num_lessons()) lesson_embeddings_x = model.lesson_embeddings[:, 0] lesson_embeddings_y = model.lesson_embeddings[:, 1] ax.quiver( prereq_embeddings_x, prereq_embeddings_y, lesson_embeddings_x, lesson_embeddings_y, pivot='tail') if show_lesson_ids: for lesson_id in model.history.iter_lessons(): lesson_idx = model.history.idx_of_lesson_id(lesson_id) lesson_x = prereq_embeddings_x[lesson_idx] if model.using_prereqs else 0 lesson_y = prereq_embeddings_y[lesson_idx] if model.using_prereqs else 0 lesson_id_x = lesson_x + id_padding_x lesson_id_y = lesson_y + id_padding_y ax.annotate(lesson_id, xy=( lesson_x, lesson_y), xytext=( lesson_id_x, lesson_id_y)) if show_legend: ax.legend(loc='upper left') if force_invariant_axis_limits: x = [] y = [] if show_students: x += np.unique(model.student_embeddings[:, 0, :]).tolist() y += np.unique(model.student_embeddings[:, 1, :]).tolist() if show_assessments: x += np.unique(model.assessment_embeddings[:, 0]).tolist() y += np.unique(model.assessment_embeddings[:, 1]).tolist() if show_lessons: x += np.unique(model.lesson_embeddings[:, 0] + ( model.prereq_embeddings[:, 0] if show_prereqs else 0)).tolist() y += np.unique(model.lesson_embeddings[:, 1] + ( model.prereq_embeddings[:, 1] if show_prereqs else 0)).tolist() if show_concepts: x += np.unique(model.concept_embeddings[:, 0]).tolist() y += np.unique(model.concept_embeddings[:, 1]).tolist() ax.set_xlim([min(x)-axis_limit_padding, max(x)+axis_limit_padding]) ax.set_ylim([min(y)-axis_limit_padding, max(y)+axis_limit_padding]) if x_axis_limits is not None: ax.set_xlim(x_axis_limits) if y_axis_limits is not None: ax.set_ylim(y_axis_limits) if title is None: title = 'Latent Skill Space' ax.set_title(title) ax.set_xlabel('Skill 1') ax.set_ylabel('Skill 2') plt.show()
apache-2.0
pidydx/grr
grr/lib/log_test.py
1
2267
#!/usr/bin/env python """Tests for logging classes.""" import logging import time from werkzeug import wrappers as werkzeug_wrappers from grr.gui import wsgiapp from grr.lib import flags from grr.lib import log from grr.lib import stats from grr.lib import test_lib from grr.lib import utils from grr.proto import jobs_pb2 class ApplicationLoggerTests(test_lib.GRRBaseTest): """Store tests.""" def Log(self, msg, *args): if args: self.log += msg % (args) else: self.log += msg def setUp(self): super(ApplicationLoggerTests, self).setUp() self.l = log.GrrApplicationLogger() self.log = "" self.log_stubber = utils.Stubber(logging, "info", self.Log) self.log_stubber.Start() def tearDown(self): super(ApplicationLoggerTests, self).tearDown() self.log_stubber.Stop() def testGetEventId(self): self.assertGreater( len(self.l.GetNewEventId()), 20, "Invalid event ID generated") self.assertGreater( len(self.l.GetNewEventId(int(time.time() * 1e6))), 20, "Invalid event ID generated") def testLogHttpAdminUIAccess(self): stats.STATS.RegisterCounterMetric("grr_gin_request_count") request = wsgiapp.HttpRequest({ "wsgi.url_scheme": "http", "SERVER_NAME": "foo.bar", "SERVER_PORT": "1234" }) request.user = "testuser" response = werkzeug_wrappers.Response( status=202, headers={"X-GRR-Reason": "foo/test1234", "X-API-Method": "TestMethod"}) self.l.LogHttpAdminUIAccess(request, response) self.assertIn("foo/test1234", self.log) def testLogHttpFrontendAccess(self): request = self._GenHttpRequestProto() self.l.LogHttpFrontendAccess(request) self.assertIn("/test?omg=11%45x%20%20", self.log) def _GenHttpRequestProto(self): """Create a valid request object.""" request = jobs_pb2.HttpRequest() request.source_ip = "127.0.0.1" request.user_agent = "Firefox or something" request.url = "http://test.com/test?omg=11%45x%20%20" request.user = "anonymous" request.timestamp = int(time.time() * 1e6) request.size = 1000 return request def main(argv): test_lib.main(argv=argv) if __name__ == "__main__": flags.StartMain(main)
apache-2.0
pgmillon/ansible
lib/ansible/modules/network/f5/bigip_sys_db.py
38
11001
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2016, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_sys_db short_description: Manage BIG-IP system database variables description: - Manage BIG-IP system database variables version_added: 2.2 options: key: description: - The database variable to manipulate. type: str required: True state: description: - The state of the variable on the system. When C(present), guarantees that an existing variable is set to C(value). When C(reset) sets the variable back to the default value. At least one of value and state C(reset) are required. type: str choices: - present - reset default: present value: description: - The value to set the key to. At least one of value and state C(reset) are required. type: str notes: - Requires BIG-IP version 12.0.0 or greater extends_documentation_fragment: f5 author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Set the boot.quiet DB variable on the BIG-IP bigip_sys_db: key: boot.quiet value: disable provider: user: admin password: secret server: lb.mydomain.com delegate_to: localhost - name: Disable the initial setup screen bigip_sys_db: key: setup.run value: false provider: user: admin password: secret server: lb.mydomain.com delegate_to: localhost - name: Reset the initial setup screen bigip_sys_db: key: setup.run state: reset provider: user: admin password: secret server: lb.mydomain.com delegate_to: localhost ''' RETURN = r''' name: description: The key in the system database that was specified returned: changed and success type: str sample: setup.run default_value: description: The default value of the key returned: changed and success type: str sample: true value: description: The value that you set the key to returned: changed and success type: str sample: false ''' from ansible.module_utils.basic import AnsibleModule try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import f5_argument_spec except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import f5_argument_spec class Parameters(AnsibleF5Parameters): api_map = { 'defaultValue': 'default_value' } api_attributes = [ 'value', ] updatables = [ 'value', ] returnables = [ 'name', 'value', 'default_value', ] class ApiParameters(Parameters): pass class ModuleParameters(Parameters): pass class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: change = getattr(self, returnable) if isinstance(change, dict): result.update(change) else: result[returnable] = change result = self._filter_params(result) except Exception: pass return result class UsableChanges(Changes): pass class ReportableChanges(Changes): pass class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 @property def value(self): if self.want.state == 'reset': if str(self.have.value) != str(self.have.default_value): return self.have.default_value if self.want.value != self.have.value: return self.want.value class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.pop('module', None) self.client = F5RestClient(**self.module.params) self.want = ModuleParameters(params=self.module.params) self.have = ApiParameters() self.changes = UsableChanges() def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.module.deprecate( msg=warning['msg'], version=warning['version'] ) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: changed['name'] = self.want.key changed['default_value'] = self.have.default_value self.changes = UsableChanges(params=changed) return True return False def exec_module(self): changed = False result = dict() state = self.want.state if state == "present": changed = self.present() elif state == "reset": changed = self.reset() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) return result def present(self): if self.exists(): return False else: return self.update() def reset(self): self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True self.reset_on_device() self.want.update({'key': self.want.key}) self.want.update({'value': self.have.default_value}) if self.exists(): return True else: raise F5ModuleError( "Failed to reset the DB variable" ) def update(self): if self.want.value is None: raise F5ModuleError( "When setting a key, a value must be supplied" ) self.have = self.read_current_from_device() if not self.should_update(): return False if self.module.check_mode: return True self.update_on_device() return True def should_update(self): result = self._update_changed_options() if result: return True return False def exists(self): uri = "https://{0}:{1}/mgmt/tm/sys/db/{2}".format( self.client.provider['server'], self.client.provider['server_port'], self.want.key ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if str(response['value']) == str(self.want.value): return True return False def read_current_from_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/db/{2}".format( self.client.provider['server'], self.client.provider['server_port'], self.want.key ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return ApiParameters(params=response) def update_on_device(self): params = self.changes.api_params() uri = "https://{0}:{1}/mgmt/tm/sys/db/{2}".format( self.client.provider['server'], self.client.provider['server_port'], self.want.key ) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def reset_on_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/db/{2}".format( self.client.provider['server'], self.client.provider['server_port'], self.want.key ) params = dict( value=self.have.default_value ) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( key=dict(required=True), state=dict( default='present', choices=['present', 'reset'] ), value=dict() ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode ) try: mm = ModuleManager(module=module) results = mm.exec_module() module.exit_json(**results) except F5ModuleError as ex: module.fail_json(msg=str(ex)) if __name__ == '__main__': main()
gpl-3.0
rkvsraman/gitinspector
gitinspector/timeline.py
47
8918
# coding: utf-8 # # Copyright © 2012-2013 Ejwa Software. All rights reserved. # # This file is part of gitinspector. # # gitinspector is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # gitinspector is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gitinspector. If not, see <http://www.gnu.org/licenses/>. from __future__ import print_function from __future__ import unicode_literals from localization import N_ from outputable import Outputable import datetime import format import gravatar import terminal import textwrap class TimelineData: def __init__(self, changes, useweeks): authordateinfo_list = sorted(changes.get_authordateinfo_list().items()) self.changes = changes self.entries = {} self.total_changes_by_period = {} self.useweeks = useweeks for i in authordateinfo_list: key = None if useweeks: yearweek = datetime.date(int(i[0][0][0:4]), int(i[0][0][5:7]), int(i[0][0][8:10])).isocalendar() key = (i[0][1], str(yearweek[0]) + "W" + "{0:02d}".format(yearweek[1])) else: key = (i[0][1], i[0][0][0:7]) if self.entries.get(key, None) == None: self.entries[key] = i[1] else: self.entries[key].insertions += i[1].insertions self.entries[key].deletions += i[1].deletions for period in self.get_periods(): total_insertions = 0 total_deletions = 0 for author in self.get_authors(): entry = self.entries.get((author[0], period), None) if entry != None: total_insertions += entry.insertions total_deletions += entry.deletions self.total_changes_by_period[period] = (total_insertions, total_deletions, total_insertions + total_deletions) def get_periods(self): return sorted(set([i[1] for i in self.entries])) def get_total_changes_in_period(self, period): return self.total_changes_by_period[period] def get_authors(self): return sorted(set([(i[0][0], self.changes.get_latest_email_by_author(i[0][0])) for i in self.entries.items()])) def get_author_signs_in_period(self, author, period, multiplier): authorinfo = self.entries.get((author, period), None) total = float(self.total_changes_by_period[period][2]) if authorinfo: i = multiplier * (self.entries[(author, period)].insertions / total) j = multiplier * (self.entries[(author, period)].deletions / total) return (int(i), int(j)) else: return (0, 0) def get_multiplier(self, period, max_width): multiplier = 0 while True: for i in self.entries: entry = self.entries.get(i) if period == i[1]: changes_in_period = float(self.total_changes_by_period[i[1]][2]) if multiplier * (entry.insertions + entry.deletions) / changes_in_period > max_width: return multiplier multiplier += 0.25 def is_author_in_period(self, period, author): return self.entries.get((author, period), None) != None def is_author_in_periods(self, periods, author): for period in periods: if self.is_author_in_period(period, author): return True return False TIMELINE_INFO_TEXT = N_("The following history timeline has been gathered from the repository") MODIFIED_ROWS_TEXT = N_("Modified Rows:") def __output_row__text__(timeline_data, periods, names): print("\n" + terminal.__bold__ + terminal.ljust(_("Author"), 20), end=" ") for period in periods: print(terminal.rjust(period, 10), end=" ") print(terminal.__normal__) for name in names: if timeline_data.is_author_in_periods(periods, name[0]): print(terminal.ljust(name[0], 20)[0:20 - terminal.get_excess_column_count(name[0])], end=" ") for period in periods: multiplier = timeline_data.get_multiplier(period, 9) signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier) signs_str = (signs[1] * "-" + signs[0] * "+") print (("." if timeline_data.is_author_in_period(period, name[0]) and len(signs_str) == 0 else signs_str).rjust(10), end=" ") print("") print(terminal.__bold__ + terminal.ljust(_(MODIFIED_ROWS_TEXT), 20) + terminal.__normal__, end=" ") for period in periods: total_changes = str(timeline_data.get_total_changes_in_period(period)[2]) if hasattr(total_changes, 'decode'): total_changes = total_changes.decode("utf-8", "replace") print(terminal.rjust(total_changes, 10), end=" ") print("") def __output_row__html__(timeline_data, periods, names): timeline_xml = "<table class=\"git full\"><thead><tr><th>" + _("Author") + "</th>" for period in periods: timeline_xml += "<th>" + str(period) + "</th>" timeline_xml += "</tr></thead><tbody>" i = 0 for name in names: if timeline_data.is_author_in_periods(periods, name[0]): timeline_xml += "<tr" + (" class=\"odd\">" if i % 2 == 1 else ">") if format.get_selected() == "html": timeline_xml += "<td><img src=\"{0}\"/>{1}</td>".format(gravatar.get_url(name[1]), name[0]) else: timeline_xml += "<td>" + name[0] + "</td>" for period in periods: multiplier = timeline_data.get_multiplier(period, 18) signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier) signs_str = (signs[1] * "<div class=\"remove\">&nbsp;</div>" + signs[0] * "<div class=\"insert\">&nbsp;</div>") timeline_xml += "<td>" + ("." if timeline_data.is_author_in_period(period, name[0]) and len(signs_str) == 0 else signs_str) timeline_xml += "</td>" timeline_xml += "</tr>" i = i + 1 timeline_xml += "<tfoot><tr><td><strong>" + _(MODIFIED_ROWS_TEXT) + "</strong></td>" for period in periods: total_changes = timeline_data.get_total_changes_in_period(period) timeline_xml += "<td>" + str(total_changes[2]) + "</td>" timeline_xml += "</tr></tfoot></tbody></table>" print(timeline_xml) class Timeline(Outputable): def __init__(self, changes, useweeks): self.changes = changes self.useweeks = useweeks Outputable.__init__(self) def output_text(self): if self.changes.get_commits(): print("\n" + textwrap.fill(_(TIMELINE_INFO_TEXT) + ":", width=terminal.get_size()[0])) timeline_data = TimelineData(self.changes, self.useweeks) periods = timeline_data.get_periods() names = timeline_data.get_authors() (width, _unused) = terminal.get_size() max_periods_per_row = int((width - 21) / 11) for i in range(0, len(periods), max_periods_per_row): __output_row__text__(timeline_data, periods[i:i+max_periods_per_row], names) def output_html(self): if self.changes.get_commits(): timeline_data = TimelineData(self.changes, self.useweeks) periods = timeline_data.get_periods() names = timeline_data.get_authors() max_periods_per_row = 8 timeline_xml = "<div><div id=\"timeline\" class=\"box\">" timeline_xml += "<p>" + _(TIMELINE_INFO_TEXT) + ".</p>" print(timeline_xml) for i in range(0, len(periods), max_periods_per_row): __output_row__html__(timeline_data, periods[i:i+max_periods_per_row], names) timeline_xml = "</div></div>" print(timeline_xml) def output_xml(self): if self.changes.get_commits(): message_xml = "\t\t<message>" + _(TIMELINE_INFO_TEXT) + "</message>\n" timeline_xml = "" periods_xml = "\t\t<periods length=\"{0}\">\n".format("week" if self.useweeks else "month") timeline_data = TimelineData(self.changes, self.useweeks) periods = timeline_data.get_periods() names = timeline_data.get_authors() for period in periods: name_xml = "\t\t\t\t<name>" + str(period) + "</name>\n" authors_xml = "\t\t\t\t<authors>\n" for name in names: if timeline_data.is_author_in_period(period, name[0]): multiplier = timeline_data.get_multiplier(period, 24) signs = timeline_data.get_author_signs_in_period(name[0], period, multiplier) signs_str = (signs[1] * "-" + signs[0] * "+") if len(signs_str) == 0: signs_str = "." authors_xml += "\t\t\t\t\t<author>\n\t\t\t\t\t\t<name>" + name[0] + "</name>\n" authors_xml += "\t\t\t\t\t\t<gravatar>" + gravatar.get_url(name[1]) + "</gravatar>\n" authors_xml += "\t\t\t\t\t\t<work>" + signs_str + "</work>\n\t\t\t\t\t</author>\n" authors_xml += "\t\t\t\t</authors>\n" modified_rows_xml = "\t\t\t\t<modified_rows>" + \ str(timeline_data.get_total_changes_in_period(period)[2]) + "</modified_rows>\n" timeline_xml += "\t\t\t<period>\n" + name_xml + authors_xml + modified_rows_xml + "\t\t\t</period>\n" print("\t<timeline>\n" + message_xml + periods_xml + timeline_xml + "\t\t</periods>\n\t</timeline>")
gpl-3.0
suutari/shoop
shuup/simple_supplier/models.py
1
3252
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from django.conf import settings from django.db import models from django.utils.functional import cached_property from django.utils.translation import ugettext_lazy as _ from enumfields import EnumIntegerField from shuup.core.fields import MoneyValueField, QuantityField from shuup.core.suppliers.enums import StockAdjustmentType from shuup.utils.properties import PriceProperty def _get_currency(): from shuup.core.models import Shop if not settings.SHUUP_ENABLE_MULTIPLE_SHOPS: return Shop.objects.first().currency return settings.SHUUP_HOME_CURRENCY def _get_prices_include_tax(): from shuup.core.models import Shop if not settings.SHUUP_ENABLE_MULTIPLE_SHOPS: return Shop.objects.first().prices_include_tax return False class StockAdjustment(models.Model): product = models.ForeignKey("shuup.Product", related_name="+", on_delete=models.CASCADE, verbose_name=_("product")) supplier = models.ForeignKey("shuup.Supplier", on_delete=models.CASCADE, verbose_name=_("supplier")) created_on = models.DateTimeField(auto_now_add=True, editable=False, db_index=True, verbose_name=_("created on")) created_by = models.ForeignKey( settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.PROTECT, verbose_name=_("created by")) delta = QuantityField(default=0, verbose_name=_("delta")) purchase_price_value = MoneyValueField(default=0) purchase_price = PriceProperty("purchase_price_value", "currency", "includes_tax") type = EnumIntegerField( StockAdjustmentType, db_index=True, default=StockAdjustmentType.INVENTORY, verbose_name=_("type")) @cached_property def currency(self): return _get_currency() @cached_property def includes_tax(self): return _get_prices_include_tax() class StockCount(models.Model): alert_limit = QuantityField(default=0, editable=False, verbose_name=_("alert limit")) product = models.ForeignKey( "shuup.Product", related_name="+", editable=False, on_delete=models.CASCADE, verbose_name=_("product")) supplier = models.ForeignKey( "shuup.Supplier", editable=False, on_delete=models.CASCADE, verbose_name=_("supplier")) logical_count = QuantityField(default=0, editable=False, verbose_name=_("logical count")) physical_count = QuantityField(default=0, editable=False, verbose_name=_("physical count")) stock_value_value = MoneyValueField(default=0) stock_value = PriceProperty("stock_value_value", "currency", "includes_tax") stock_unit_price = PriceProperty("stock_unit_price_value", "currency", "includes_tax") class Meta: unique_together = [("product", "supplier")] @cached_property def currency(self): return _get_currency() @cached_property def includes_tax(self): return _get_prices_include_tax() @property def stock_unit_price_value(self): return (self.stock_value_value / self.logical_count if self.logical_count else 0)
agpl-3.0
andaag/scikit-learn
sklearn/naive_bayes.py
128
28358
# -*- coding: utf-8 -*- """ The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These are supervised learning methods based on applying Bayes' theorem with strong (naive) feature independence assumptions. """ # Author: Vincent Michel <vincent.michel@inria.fr> # Minor fixes by Fabian Pedregosa # Amit Aides <amitibo@tx.technion.ac.il> # Yehuda Finkelstein <yehudaf@tx.technion.ac.il> # Lars Buitinck <L.J.Buitinck@uva.nl> # Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # (parts based on earlier work by Mathieu Blondel) # # License: BSD 3 clause from abc import ABCMeta, abstractmethod import numpy as np from scipy.sparse import issparse from .base import BaseEstimator, ClassifierMixin from .preprocessing import binarize from .preprocessing import LabelBinarizer from .preprocessing import label_binarize from .utils import check_X_y, check_array from .utils.extmath import safe_sparse_dot, logsumexp from .utils.multiclass import _check_partial_fit_first_call from .utils.fixes import in1d from .utils.validation import check_is_fitted from .externals import six __all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB'] class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)): """Abstract base class for naive Bayes estimators""" @abstractmethod def _joint_log_likelihood(self, X): """Compute the unnormalized posterior log probability of X I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of shape [n_classes, n_samples]. Input is passed to _joint_log_likelihood as-is by predict, predict_proba and predict_log_proba. """ def predict(self, X): """ Perform classification on an array of test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] Predicted target values for X """ jll = self._joint_log_likelihood(X) return self.classes_[np.argmax(jll, axis=1)] def predict_log_proba(self, X): """ Return log-probability estimates for the test vector X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array-like, shape = [n_samples, n_classes] Returns the log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ jll = self._joint_log_likelihood(X) # normalize by P(x) = P(f_1, ..., f_n) log_prob_x = logsumexp(jll, axis=1) return jll - np.atleast_2d(log_prob_x).T def predict_proba(self, X): """ Return probability estimates for the test vector X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array-like, shape = [n_samples, n_classes] Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ return np.exp(self.predict_log_proba(X)) class GaussianNB(BaseNB): """ Gaussian Naive Bayes (GaussianNB) Can perform online updates to model parameters via `partial_fit` method. For details on algorithm used to update feature means and variance online, see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Read more in the :ref:`User Guide <gaussian_naive_bayes>`. Attributes ---------- class_prior_ : array, shape (n_classes,) probability of each class. class_count_ : array, shape (n_classes,) number of training samples observed in each class. theta_ : array, shape (n_classes, n_features) mean of each feature per class sigma_ : array, shape (n_classes, n_features) variance of each feature per class Examples -------- >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> Y = np.array([1, 1, 1, 2, 2, 2]) >>> from sklearn.naive_bayes import GaussianNB >>> clf = GaussianNB() >>> clf.fit(X, Y) GaussianNB() >>> print(clf.predict([[-0.8, -1]])) [1] >>> clf_pf = GaussianNB() >>> clf_pf.partial_fit(X, Y, np.unique(Y)) GaussianNB() >>> print(clf_pf.predict([[-0.8, -1]])) [1] """ def fit(self, X, y, sample_weight=None): """Fit Gaussian Naive Bayes according to X, y Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ X, y = check_X_y(X, y) return self._partial_fit(X, y, np.unique(y), _refit=True, sample_weight=sample_weight) @staticmethod def _update_mean_variance(n_past, mu, var, X, sample_weight=None): """Compute online update of Gaussian mean and variance. Given starting sample count, mean, and variance, a new set of points X, and optionally sample weights, return the updated mean and variance. (NB - each dimension (column) in X is treated as independent -- you get variance, not covariance). Can take scalar mean and variance, or vector mean and variance to simultaneously update a number of independent Gaussians. See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Parameters ---------- n_past : int Number of samples represented in old mean and variance. If sample weights were given, this should contain the sum of sample weights represented in old mean and variance. mu : array-like, shape (number of Gaussians,) Means for Gaussians in original set. var : array-like, shape (number of Gaussians,) Variances for Gaussians in original set. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- total_mu : array-like, shape (number of Gaussians,) Updated mean for each Gaussian over the combined set. total_var : array-like, shape (number of Gaussians,) Updated variance for each Gaussian over the combined set. """ if X.shape[0] == 0: return mu, var # Compute (potentially weighted) mean and variance of new datapoints if sample_weight is not None: n_new = float(sample_weight.sum()) new_mu = np.average(X, axis=0, weights=sample_weight / n_new) new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight / n_new) else: n_new = X.shape[0] new_var = np.var(X, axis=0) new_mu = np.mean(X, axis=0) if n_past == 0: return new_mu, new_var n_total = float(n_past + n_new) # Combine mean of old and new data, taking into consideration # (weighted) number of observations total_mu = (n_new * new_mu + n_past * mu) / n_total # Combine variance of old and new data, taking into consideration # (weighted) number of observations. This is achieved by combining # the sum-of-squared-differences (ssd) old_ssd = n_past * var new_ssd = n_new * new_var total_ssd = (old_ssd + new_ssd + (n_past / float(n_new * n_total)) * (n_new * mu - n_new * new_mu) ** 2) total_var = total_ssd / n_total return total_mu, total_var def partial_fit(self, X, y, classes=None, sample_weight=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance and numerical stability overhead, hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. classes : array-like, shape (n_classes,) List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ return self._partial_fit(X, y, classes, _refit=False, sample_weight=sample_weight) def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None): """Actual implementation of Gaussian NB fitting. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. classes : array-like, shape (n_classes,) List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. _refit: bool If true, act as though this were the first time we called _partial_fit (ie, throw away any past fitting and start over). sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ X, y = check_X_y(X, y) epsilon = 1e-9 if _refit: self.classes_ = None if _check_partial_fit_first_call(self, classes): # This is the first call to partial_fit: # initialize various cumulative counters n_features = X.shape[1] n_classes = len(self.classes_) self.theta_ = np.zeros((n_classes, n_features)) self.sigma_ = np.zeros((n_classes, n_features)) self.class_prior_ = np.zeros(n_classes) self.class_count_ = np.zeros(n_classes) else: if X.shape[1] != self.theta_.shape[1]: msg = "Number of features %d does not match previous data %d." raise ValueError(msg % (X.shape[1], self.theta_.shape[1])) # Put epsilon back in each time self.sigma_[:, :] -= epsilon classes = self.classes_ unique_y = np.unique(y) unique_y_in_classes = in1d(unique_y, classes) if not np.all(unique_y_in_classes): raise ValueError("The target label(s) %s in y do not exist in the " "initial classes %s" % (y[~unique_y_in_classes], classes)) for y_i in unique_y: i = classes.searchsorted(y_i) X_i = X[y == y_i, :] if sample_weight is not None: sw_i = sample_weight[y == y_i] N_i = sw_i.sum() else: sw_i = None N_i = X_i.shape[0] new_theta, new_sigma = self._update_mean_variance( self.class_count_[i], self.theta_[i, :], self.sigma_[i, :], X_i, sw_i) self.theta_[i, :] = new_theta self.sigma_[i, :] = new_sigma self.class_count_[i] += N_i self.sigma_[:, :] += epsilon self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_) return self def _joint_log_likelihood(self, X): check_is_fitted(self, "classes_") X = check_array(X) joint_log_likelihood = [] for i in range(np.size(self.classes_)): jointi = np.log(self.class_prior_[i]) n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :])) n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) / (self.sigma_[i, :]), 1) joint_log_likelihood.append(jointi + n_ij) joint_log_likelihood = np.array(joint_log_likelihood).T return joint_log_likelihood class BaseDiscreteNB(BaseNB): """Abstract base class for naive Bayes on discrete/categorical data Any estimator based on this class should provide: __init__ _joint_log_likelihood(X) as per BaseNB """ def _update_class_log_prior(self, class_prior=None): n_classes = len(self.classes_) if class_prior is not None: if len(class_prior) != n_classes: raise ValueError("Number of priors must match number of" " classes.") self.class_log_prior_ = np.log(class_prior) elif self.fit_prior: # empirical prior, with sample_weight taken into account self.class_log_prior_ = (np.log(self.class_count_) - np.log(self.class_count_.sum())) else: self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes) def partial_fit(self, X, y, classes=None, sample_weight=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance overhead hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. classes : array-like, shape = [n_classes] List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like, shape = [n_samples], optional Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ X = check_array(X, accept_sparse='csr', dtype=np.float64) _, n_features = X.shape if _check_partial_fit_first_call(self, classes): # This is the first call to partial_fit: # initialize various cumulative counters n_effective_classes = len(classes) if len(classes) > 1 else 2 self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64) self.feature_count_ = np.zeros((n_effective_classes, n_features), dtype=np.float64) elif n_features != self.coef_.shape[1]: msg = "Number of features %d does not match previous data %d." raise ValueError(msg % (n_features, self.coef_.shape[-1])) Y = label_binarize(y, classes=self.classes_) if Y.shape[1] == 1: Y = np.concatenate((1 - Y, Y), axis=1) n_samples, n_classes = Y.shape if X.shape[0] != Y.shape[0]: msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible." raise ValueError(msg % (X.shape[0], y.shape[0])) # label_binarize() returns arrays with dtype=np.int64. # We convert it to np.float64 to support sample_weight consistently Y = Y.astype(np.float64) if sample_weight is not None: Y *= check_array(sample_weight).T class_prior = self.class_prior # Count raw events from data before updating the class log prior # and feature log probas self._count(X, Y) # XXX: OPTIM: we could introduce a public finalization method to # be called by the user explicitly just once after several consecutive # calls to partial_fit and prior any call to predict[_[log_]proba] # to avoid computing the smooth log probas at each call to partial fit self._update_feature_log_prob() self._update_class_log_prior(class_prior=class_prior) return self def fit(self, X, y, sample_weight=None): """Fit Naive Bayes classifier according to X, y Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ X, y = check_X_y(X, y, 'csr') _, n_features = X.shape labelbin = LabelBinarizer() Y = labelbin.fit_transform(y) self.classes_ = labelbin.classes_ if Y.shape[1] == 1: Y = np.concatenate((1 - Y, Y), axis=1) # LabelBinarizer().fit_transform() returns arrays with dtype=np.int64. # We convert it to np.float64 to support sample_weight consistently; # this means we also don't have to cast X to floating point Y = Y.astype(np.float64) if sample_weight is not None: Y *= check_array(sample_weight).T class_prior = self.class_prior # Count raw events from data before updating the class log prior # and feature log probas n_effective_classes = Y.shape[1] self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64) self.feature_count_ = np.zeros((n_effective_classes, n_features), dtype=np.float64) self._count(X, Y) self._update_feature_log_prob() self._update_class_log_prior(class_prior=class_prior) return self # XXX The following is a stopgap measure; we need to set the dimensions # of class_log_prior_ and feature_log_prob_ correctly. def _get_coef(self): return (self.feature_log_prob_[1:] if len(self.classes_) == 2 else self.feature_log_prob_) def _get_intercept(self): return (self.class_log_prior_[1:] if len(self.classes_) == 2 else self.class_log_prior_) coef_ = property(_get_coef) intercept_ = property(_get_intercept) class MultinomialNB(BaseDiscreteNB): """ Naive Bayes classifier for multinomial models The multinomial Naive Bayes classifier is suitable for classification with discrete features (e.g., word counts for text classification). The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work. Read more in the :ref:`User Guide <multinomial_naive_bayes>`. Parameters ---------- alpha : float, optional (default=1.0) Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing). fit_prior : boolean Whether to learn class prior probabilities or not. If false, a uniform prior will be used. class_prior : array-like, size (n_classes,) Prior probabilities of the classes. If specified the priors are not adjusted according to the data. Attributes ---------- class_log_prior_ : array, shape (n_classes, ) Smoothed empirical log probability for each class. intercept_ : property Mirrors ``class_log_prior_`` for interpreting MultinomialNB as a linear model. feature_log_prob_ : array, shape (n_classes, n_features) Empirical log probability of features given a class, ``P(x_i|y)``. coef_ : property Mirrors ``feature_log_prob_`` for interpreting MultinomialNB as a linear model. class_count_ : array, shape (n_classes,) Number of samples encountered for each class during fitting. This value is weighted by the sample weight when provided. feature_count_ : array, shape (n_classes, n_features) Number of samples encountered for each (class, feature) during fitting. This value is weighted by the sample weight when provided. Examples -------- >>> import numpy as np >>> X = np.random.randint(5, size=(6, 100)) >>> y = np.array([1, 2, 3, 4, 5, 6]) >>> from sklearn.naive_bayes import MultinomialNB >>> clf = MultinomialNB() >>> clf.fit(X, y) MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True) >>> print(clf.predict(X[2])) [3] Notes ----- For the rationale behind the names `coef_` and `intercept_`, i.e. naive Bayes as a linear classifier, see J. Rennie et al. (2003), Tackling the poor assumptions of naive Bayes text classifiers, ICML. References ---------- C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 234-265. http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html """ def __init__(self, alpha=1.0, fit_prior=True, class_prior=None): self.alpha = alpha self.fit_prior = fit_prior self.class_prior = class_prior def _count(self, X, Y): """Count and smooth feature occurrences.""" if np.any((X.data if issparse(X) else X) < 0): raise ValueError("Input X must be non-negative") self.feature_count_ += safe_sparse_dot(Y.T, X) self.class_count_ += Y.sum(axis=0) def _update_feature_log_prob(self): """Apply smoothing to raw counts and recompute log probabilities""" smoothed_fc = self.feature_count_ + self.alpha smoothed_cc = smoothed_fc.sum(axis=1) self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1))) def _joint_log_likelihood(self, X): """Calculate the posterior log probability of the samples X""" check_is_fitted(self, "classes_") X = check_array(X, accept_sparse='csr') return (safe_sparse_dot(X, self.feature_log_prob_.T) + self.class_log_prior_) class BernoulliNB(BaseDiscreteNB): """Naive Bayes classifier for multivariate Bernoulli models. Like MultinomialNB, this classifier is suitable for discrete data. The difference is that while MultinomialNB works with occurrence counts, BernoulliNB is designed for binary/boolean features. Read more in the :ref:`User Guide <bernoulli_naive_bayes>`. Parameters ---------- alpha : float, optional (default=1.0) Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing). binarize : float or None, optional Threshold for binarizing (mapping to booleans) of sample features. If None, input is presumed to already consist of binary vectors. fit_prior : boolean Whether to learn class prior probabilities or not. If false, a uniform prior will be used. class_prior : array-like, size=[n_classes,] Prior probabilities of the classes. If specified the priors are not adjusted according to the data. Attributes ---------- class_log_prior_ : array, shape = [n_classes] Log probability of each class (smoothed). feature_log_prob_ : array, shape = [n_classes, n_features] Empirical log probability of features given a class, P(x_i|y). class_count_ : array, shape = [n_classes] Number of samples encountered for each class during fitting. This value is weighted by the sample weight when provided. feature_count_ : array, shape = [n_classes, n_features] Number of samples encountered for each (class, feature) during fitting. This value is weighted by the sample weight when provided. Examples -------- >>> import numpy as np >>> X = np.random.randint(2, size=(6, 100)) >>> Y = np.array([1, 2, 3, 4, 4, 5]) >>> from sklearn.naive_bayes import BernoulliNB >>> clf = BernoulliNB() >>> clf.fit(X, Y) BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True) >>> print(clf.predict(X[2])) [3] References ---------- C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 234-265. http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html A. McCallum and K. Nigam (1998). A comparison of event models for naive Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for Text Categorization, pp. 41-48. V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS). """ def __init__(self, alpha=1.0, binarize=.0, fit_prior=True, class_prior=None): self.alpha = alpha self.binarize = binarize self.fit_prior = fit_prior self.class_prior = class_prior def _count(self, X, Y): """Count and smooth feature occurrences.""" if self.binarize is not None: X = binarize(X, threshold=self.binarize) self.feature_count_ += safe_sparse_dot(Y.T, X) self.class_count_ += Y.sum(axis=0) def _update_feature_log_prob(self): """Apply smoothing to raw counts and recompute log probabilities""" smoothed_fc = self.feature_count_ + self.alpha smoothed_cc = self.class_count_ + self.alpha * 2 self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1))) def _joint_log_likelihood(self, X): """Calculate the posterior log probability of the samples X""" check_is_fitted(self, "classes_") X = check_array(X, accept_sparse='csr') if self.binarize is not None: X = binarize(X, threshold=self.binarize) n_classes, n_features = self.feature_log_prob_.shape n_samples, n_features_X = X.shape if n_features_X != n_features: raise ValueError("Expected input with %d features, got %d instead" % (n_features, n_features_X)) neg_prob = np.log(1 - np.exp(self.feature_log_prob_)) # Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T) jll += self.class_log_prior_ + neg_prob.sum(axis=1) return jll
bsd-3-clause
Yas3r/Empire
lib/modules/privesc/powerup/find_dllhijack.py
10
3098
from lib.common import helpers class Module: def __init__(self, mainMenu, params=[]): self.info = { 'Name': 'Invoke-FindDLLHijack', 'Author': ['@harmj0y'], 'Description': ('Finds generic .DLL hijacking opportunities.'), 'Background' : True, 'OutputExtension' : None, 'NeedsAdmin' : False, 'OpsecSafe' : True, 'MinPSVersion' : '2', 'Comments': [ 'https://github.com/PowerShellEmpire/PowerTools/tree/master/PowerUp' ] } # any options needed by the module, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Agent' : { 'Description' : 'Agent to run module on.', 'Required' : True, 'Value' : '' }, 'ExcludeWindows' : { 'Description' : "Switch. Exclude paths from C:\Windows\* instead of just C:\Windows\System32\*", 'Required' : False, 'Value' : '' }, 'ExcludeProgramFiles' : { 'Description' : "Switch. Exclude paths from C:\Program Files\* and C:\Program Files (x86)\*", 'Required' : False, 'Value' : '' }, 'ExcludeOwned' : { 'Description' : "Switch. Exclude processes the current user owns.", 'Required' : False, 'Value' : '' } } # save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu = mainMenu for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self): # read in the common module source code moduleSource = self.mainMenu.installPath + "/data/module_source/privesc/powerup/Invoke-FindDLLHijack.ps1" try: f = open(moduleSource, 'r') except: print helpers.color("[!] Could not read module source path at: " + str(moduleSource)) return "" moduleCode = f.read() f.close() script = moduleCode # build the dump command with whatever options we want script += "Invoke-FindDLLHijack" for option,values in self.options.iteritems(): if option.lower() != "agent": if values['Value'] and values['Value'] != '': if values['Value'].lower() == "true": # if we're just adding a switch script += " -" + str(option) else: script += " -" + str(option) + " " + str(values['Value']) return script
bsd-3-clause
trishnaguha/ansible
lib/ansible/modules/network/f5/bigip_ssl_ocsp.py
14
24105
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: (c) 2018, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: bigip_ssl_ocsp short_description: Manage OCSP configurations on BIG-IP description: - Manage OCSP configurations on BIG-IP. version_added: 2.8 options: name: description: - Specifies the name of the OCSP certificate validator. required: True cache_error_timeout: description: - Specifies the lifetime of an error response in the cache, in seconds. proxy_server_pool: description: - Specifies the proxy server pool the BIG-IP system uses to fetch the OCSP response. - This involves creating a pool with proxy-servers. - Use this option when either the OCSP responder cannot be reached on any of BIG-IP system's interfaces or one or more servers can proxy an HTTP request to an external server and fetch the response. cache_timeout: description: - Specifies the lifetime of the OCSP response in the cache, in seconds. clock_skew: description: - Specifies the tolerable absolute difference in the clocks of the responder and the BIG-IP system, in seconds. connections_limit: description: - Specifies the maximum number of connections per second allowed for the OCSP certificate validator. dns_resolver: description: - Specifies the internal DNS resolver the BIG-IP system uses to fetch the OCSP response. - This involves specifying one or more DNS servers in the DNS resolver configuration. - Use this option when either there is a DNS server that can do the name-resolution of the OCSP responders or the OCSP responder can be reached on one of BIG-IP system's interfaces. route_domain: description: - Specifies the route domain for fetching an OCSP response using HTTP forward proxy. hash_algorithm: description: - Specifies a hash algorithm used to sign an OCSP request. choices: - sha256 - sha1 certificate: description: - Specifies a certificate used to sign an OCSP request. key: description: - Specifies a key used to sign an OCSP request. passphrase: description: - Specifies a passphrase used to sign an OCSP request. status_age: description: - Specifies the maximum allowed lag time that the BIG-IP system accepts for the 'thisUpdate' time in the OCSP response. strict_responder_checking: description: - Specifies whether the responder's certificate is checked for an OCSP signing extension. type: bool connection_timeout: description: - Specifies the time interval that the BIG-IP system waits for before ending the connection to the OCSP responder, in seconds. trusted_responders: description: - Specifies the certificates used for validating the OCSP response when the responder's certificate has been omitted from the response. responder_url: description: - Specifies the absolute URL that overrides the OCSP responder URL obtained from the certificate's AIA extensions. This should be an HTTP-based URL. update_password: description: - C(always) will allow to update passwords if the user chooses to do so. C(on_create) will only set the password for newly created OCSP validators. default: always choices: - always - on_create partition: description: - Device partition to manage resources on. default: Common version_added: 2.5 state: description: - When C(present), ensures that the resource exists. - When C(absent), ensures that the resource does not exist. default: present choices: - present - absent extends_documentation_fragment: f5 notes: - Requires BIG-IP >= 13.x. author: - Tim Rupp (@caphrim007) ''' EXAMPLES = r''' - name: Create a OCSP validator bigip_ssl_ocsp: name: foo proxy_server_pool: validators-pool provider: password: secret server: lb.mydomain.com user: admin delegate_to: localhost ''' RETURN = r''' cache_error_timeout: description: The new Response Caching Error Timeout value. returned: changed type: int sample: 3600 cache_timeout: description: The new Response Caching Timeout value. returned: changed type: str sample: indefinite clock_skew: description: The new Response Validation Clock Skew value. returned: changed type: int sample: 300 connections_limit: description: The new Concurrent Connections Limit value. returned: changed type: int sample: 50 dns_resolver: description: The new DNS Resolver value. returned: changed type: str sample: /Common/resolver1 route_domain: description: The new Route Domain value. returned: changed type: str sample: /Common/0 hash_algorithm: description: The new Request Signing Hash Algorithm value. returned: changed type: str sample: sha256 certificate: description: The new Request Signing Certificate value. returned: changed type: str sample: /Common/cert1 key: description: The new Request Signing Key value. returned: changed type: str sample: /Common/key1 proxy_server_pool: description: The new Proxy Server Pool value. returned: changed type: str sample: /Common/pool1 responder_url: description: The new Connection Responder URL value. returned: changed type: str sample: "http://responder.site.com" status_age: description: The new Response Validation Status Age value. returned: changed type: int sample: 0 strict_responder_checking: description: The new Response Validation Strict Responder Certificate Checking value. returned: changed type: bool sample: yes connection_timeout: description: The new Connection Timeout value. returned: changed type: int sample: 8 trusted_responders: description: The new Response Validation Trusted Responders value. returned: changed type: int sample: /Common/default ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback from distutils.version import LooseVersion try: from library.module_utils.network.f5.bigip import F5RestClient from library.module_utils.network.f5.common import F5ModuleError from library.module_utils.network.f5.common import AnsibleF5Parameters from library.module_utils.network.f5.common import cleanup_tokens from library.module_utils.network.f5.common import fq_name from library.module_utils.network.f5.common import f5_argument_spec from library.module_utils.network.f5.common import exit_json from library.module_utils.network.f5.common import fail_json from library.module_utils.network.f5.common import transform_name from library.module_utils.network.f5.common import flatten_boolean from library.module_utils.network.f5.icontrol import tmos_version except ImportError: from ansible.module_utils.network.f5.bigip import F5RestClient from ansible.module_utils.network.f5.common import F5ModuleError from ansible.module_utils.network.f5.common import AnsibleF5Parameters from ansible.module_utils.network.f5.common import cleanup_tokens from ansible.module_utils.network.f5.common import fq_name from ansible.module_utils.network.f5.common import f5_argument_spec from ansible.module_utils.network.f5.common import exit_json from ansible.module_utils.network.f5.common import fail_json from ansible.module_utils.network.f5.common import transform_name from ansible.module_utils.network.f5.common import flatten_boolean from ansible.module_utils.network.f5.icontrol import tmos_version class Parameters(AnsibleF5Parameters): api_map = { 'cacheErrorTimeout': 'cache_error_timeout', 'cacheTimeout': 'cache_timeout', 'clockSkew': 'clock_skew', 'concurrentConnectionsLimit': 'connections_limit', 'dnsResolver': 'dns_resolver', 'proxyServerPool': 'proxy_server_pool', 'responderUrl': 'responder_url', 'routeDomain': 'route_domain', 'signHash': 'hash_algorithm', 'signerCert': 'certificate', 'signerKey': 'key', 'signerKeyPassphrase': 'passphrase', 'statusAge': 'status_age', 'strictRespCertCheck': 'strict_responder_checking', 'timeout': 'connection_timeout', 'trustedResponders': 'trusted_responders', } api_attributes = [ 'cacheErrorTimeout', 'cacheTimeout', 'clockSkew', 'concurrentConnectionsLimit', 'dnsResolver', 'routeDomain', 'proxyServerPool', 'responderUrl', 'signHash', 'signerCert', 'signerKey', 'signerKeyPassphrase', 'statusAge', 'strictRespCertCheck', 'timeout', 'trustedResponders', ] returnables = [ 'cache_error_timeout', 'cache_timeout', 'clock_skew', 'connections_limit', 'dns_resolver', 'route_domain', 'hash_algorithm', 'certificate', 'key', 'passphrase', 'proxy_server_pool', 'responder_url', 'status_age', 'strict_responder_checking', 'connection_timeout', 'trusted_responders', ] updatables = [ 'cache_error_timeout', 'cache_timeout', 'clock_skew', 'connections_limit', 'dns_resolver', 'route_domain', 'hash_algorithm', 'certificate', 'key', 'passphrase', 'proxy_server_pool', 'responder_url', 'status_age', 'strict_responder_checking', 'connection_timeout', 'trusted_responders', ] @property def strict_responder_checking(self): return flatten_boolean(self._values['strict_responder_checking']) @property def cache_timeout(self): if self._values['cache_timeout'] is None: return None try: return int(self._values['cache_timeout']) except ValueError: return self._values['cache_timeout'] class ApiParameters(Parameters): pass class ModuleParameters(Parameters): @property def route_domain(self): if self._values['route_domain'] is None: return None result = fq_name(self.partition, self._values['route_domain']) return result @property def dns_resolver(self): if self._values['dns_resolver'] is None: return None result = fq_name(self.partition, self._values['dns_resolver']) return result @property def proxy_server_pool(self): if self._values['proxy_server_pool'] is None: return None result = fq_name(self.partition, self._values['proxy_server_pool']) return result @property def responder_url(self): if self._values['responder_url'] is None: return None if self._values['responder_url'] in ['', 'none']: return '' return self._values['responder_url'] @property def certificate(self): if self._values['certificate'] is None: return None if self._values['certificate'] in ['', 'none']: return '' result = fq_name(self.partition, self._values['certificate']) return result @property def key(self): if self._values['key'] is None: return None if self._values['key'] in ['', 'none']: return '' result = fq_name(self.partition, self._values['key']) return result @property def trusted_responders(self): if self._values['trusted_responders'] is None: return None if self._values['trusted_responders'] in ['', 'none']: return '' result = fq_name(self.partition, self._values['trusted_responders']) return result class Changes(Parameters): def to_return(self): result = {} try: for returnable in self.returnables: result[returnable] = getattr(self, returnable) result = self._filter_params(result) except Exception: pass return result class UsableChanges(Changes): @property def strict_responder_checking(self): if self._values['strict_responder_checking'] == 'yes': return 'enabled' elif self._values['strict_responder_checking'] == 'no': return 'disabled' class ReportableChanges(Changes): @property def strict_responder_checking(self): result = flatten_boolean(self._values['strict_responder_checking']) return result @property def passphrase(self): return None class Difference(object): def __init__(self, want, have=None): self.want = want self.have = have def compare(self, param): try: result = getattr(self, param) return result except AttributeError: return self.__default(param) def __default(self, param): attr1 = getattr(self.want, param) try: attr2 = getattr(self.have, param) if attr1 != attr2: return attr1 except AttributeError: return attr1 @property def responder_url(self): if self.want.responder_url is None: return None if self.want.responder_url == '' and self.have.responder_url is None: return None if self.want.responder_url != self.have.responder_url: return self.want.responder_url @property def certificate(self): if self.want.certificate is None: return None if self.want.certificate == '' and self.have.certificate is None: return None if self.want.certificate != self.have.certificate: return self.want.certificate @property def key(self): if self.want.key is None: return None if self.want.key == '' and self.have.key is None: return None if self.want.key != self.have.key: return self.want.key @property def trusted_responders(self): if self.want.trusted_responders is None: return None if self.want.trusted_responders == '' and self.have.trusted_responders is None: return None if self.want.trusted_responders != self.have.trusted_responders: return self.want.trusted_responders class ModuleManager(object): def __init__(self, *args, **kwargs): self.module = kwargs.get('module', None) self.client = kwargs.get('client', None) self.want = ModuleParameters(params=self.module.params) self.have = ApiParameters() self.changes = UsableChanges() def _set_changed_options(self): changed = {} for key in Parameters.returnables: if getattr(self.want, key) is not None: changed[key] = getattr(self.want, key) if changed: self.changes = UsableChanges(params=changed) def _update_changed_options(self): diff = Difference(self.want, self.have) updatables = Parameters.updatables changed = dict() for k in updatables: change = diff.compare(k) if change is None: continue else: if isinstance(change, dict): changed.update(change) else: changed[k] = change if changed: self.changes = UsableChanges(params=changed) return True return False def should_update(self): result = self._update_changed_options() if result: return True return False def exec_module(self): tmos = tmos_version(self.client) if LooseVersion(tmos) < LooseVersion('13.0.0'): raise F5ModuleError( "BIG-IP v13 or greater is required to use this module." ) changed = False result = dict() state = self.want.state if state == "present": changed = self.present() elif state == "absent": changed = self.absent() reportable = ReportableChanges(params=self.changes.to_return()) changes = reportable.to_return() result.update(**changes) result.update(dict(changed=changed)) self._announce_deprecations(result) return result def _announce_deprecations(self, result): warnings = result.pop('__warnings', []) for warning in warnings: self.client.module.deprecate( msg=warning['msg'], version=warning['version'] ) def present(self): if self.exists(): return self.update() else: return self.create() def exists(self): uri = "https://{0}:{1}/mgmt/tm/sys/crypto/cert-validator/ocsp/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError: return False if resp.status == 404 or 'code' in response and response['code'] == 404: return False return True def update(self): self.have = self.read_current_from_device() if self.want.update_password == 'always': self.want.update({'passphrase': self.want.passphrase}) else: if self.want.passphrase: del self.want._values['passphrase'] if not self.should_update(): return False # these two params are mutually exclusive, and so one must be zeroed # out so that the other can be set. This zeros the non-specified values # out so that the PATCH can happen if self.want.dns_resolver: self.changes.update({'proxy_server_pool': ''}) if self.want.proxy_server_pool: self.changes.update({'dns_resolver': ''}) if self.module.check_mode: return True self.update_on_device() return True def remove(self): if self.module.check_mode: return True self.remove_from_device() if self.exists(): raise F5ModuleError("Failed to delete the resource.") return True def create(self): self._set_changed_options() if self.module.check_mode: return True self.create_on_device() return True def create_on_device(self): params = self.changes.api_params() params['name'] = self.want.name params['partition'] = self.want.partition uri = "https://{0}:{1}/mgmt/tm/sys/crypto/cert-validator/ocsp/".format( self.client.provider['server'], self.client.provider['server_port'] ) resp = self.client.api.post(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] in [400, 403]: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def update_on_device(self): params = self.changes.api_params() uri = "https://{0}:{1}/mgmt/tm/sys/crypto/cert-validator/ocsp/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.patch(uri, json=params) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) def absent(self): if self.exists(): return self.remove() return False def remove_from_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/crypto/cert-validator/ocsp/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) response = self.client.api.delete(uri) if response.status == 200: return True raise F5ModuleError(response.content) def read_current_from_device(self): uri = "https://{0}:{1}/mgmt/tm/sys/crypto/cert-validator/ocsp/{2}".format( self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.name) ) resp = self.client.api.get(uri) try: response = resp.json() except ValueError as ex: raise F5ModuleError(str(ex)) if 'code' in response and response['code'] == 400: if 'message' in response: raise F5ModuleError(response['message']) else: raise F5ModuleError(resp.content) return ApiParameters(params=response) class ArgumentSpec(object): def __init__(self): self.supports_check_mode = True argument_spec = dict( name=dict(required=True), cache_error_timeout=dict(type='int'), proxy_server_pool=dict(), cache_timeout=dict(), clock_skew=dict(type='int'), connections_limit=dict(type='int'), dns_resolver=dict(), route_domain=dict(), hash_algorithm=dict( choices=['sha256', 'sha1'] ), certificate=dict(), key=dict(), passphrase=dict(no_log=True), status_age=dict(type='int'), strict_responder_checking=dict(type='bool'), connection_timeout=dict(type='int'), trusted_responders=dict(), responder_url=dict(), update_password=dict( default='always', choices=['always', 'on_create'] ), state=dict( default='present', choices=['present', 'absent'] ), partition=dict( default='Common', fallback=(env_fallback, ['F5_PARTITION']) ) ) self.argument_spec = {} self.argument_spec.update(f5_argument_spec) self.argument_spec.update(argument_spec) self.mutually_exclusive = [ ['dns_resolver', 'proxy_server_pool'] ] self.required_together = [ ['certificate', 'key'] ] def main(): spec = ArgumentSpec() module = AnsibleModule( argument_spec=spec.argument_spec, supports_check_mode=spec.supports_check_mode, mutually_exclusive=spec.mutually_exclusive, required_together=spec.required_together, ) client = F5RestClient(**module.params) try: mm = ModuleManager(module=module, client=client) results = mm.exec_module() cleanup_tokens(client) exit_json(module, results, client) except F5ModuleError as ex: cleanup_tokens(client) fail_json(module, ex, client) if __name__ == '__main__': main()
gpl-3.0
Acidburn0zzz/servo
tests/wpt/web-platform-tests/tools/third_party/html5lib/html5lib/serializer.py
45
15746
from __future__ import absolute_import, division, unicode_literals from six import text_type import re from codecs import register_error, xmlcharrefreplace_errors from .constants import voidElements, booleanAttributes, spaceCharacters from .constants import rcdataElements, entities, xmlEntities from . import treewalkers, _utils from xml.sax.saxutils import escape _quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`" _quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]") _quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n" "\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15" "\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" "\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000" "\u2001\u2002\u2003\u2004\u2005\u2006\u2007" "\u2008\u2009\u200a\u2028\u2029\u202f\u205f" "\u3000]") _encode_entity_map = {} _is_ucs4 = len("\U0010FFFF") == 1 for k, v in list(entities.items()): # skip multi-character entities if ((_is_ucs4 and len(v) > 1) or (not _is_ucs4 and len(v) > 2)): continue if v != "&": if len(v) == 2: v = _utils.surrogatePairToCodepoint(v) else: v = ord(v) if v not in _encode_entity_map or k.islower(): # prefer &lt; over &LT; and similarly for &amp;, &gt;, etc. _encode_entity_map[v] = k def htmlentityreplace_errors(exc): if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): res = [] codepoints = [] skip = False for i, c in enumerate(exc.object[exc.start:exc.end]): if skip: skip = False continue index = i + exc.start if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]): codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2]) skip = True else: codepoint = ord(c) codepoints.append(codepoint) for cp in codepoints: e = _encode_entity_map.get(cp) if e: res.append("&") res.append(e) if not e.endswith(";"): res.append(";") else: res.append("&#x%s;" % (hex(cp)[2:])) return ("".join(res), exc.end) else: return xmlcharrefreplace_errors(exc) register_error("htmlentityreplace", htmlentityreplace_errors) def serialize(input, tree="etree", encoding=None, **serializer_opts): """Serializes the input token stream using the specified treewalker :arg input: the token stream to serialize :arg tree: the treewalker to use :arg encoding: the encoding to use :arg serializer_opts: any options to pass to the :py:class:`html5lib.serializer.HTMLSerializer` that gets created :returns: the tree serialized as a string Example: >>> from html5lib.html5parser import parse >>> from html5lib.serializer import serialize >>> token_stream = parse('<html><body><p>Hi!</p></body></html>') >>> serialize(token_stream, omit_optional_tags=False) '<html><head></head><body><p>Hi!</p></body></html>' """ # XXX: Should we cache this? walker = treewalkers.getTreeWalker(tree) s = HTMLSerializer(**serializer_opts) return s.render(walker(input), encoding) class HTMLSerializer(object): # attribute quoting options quote_attr_values = "legacy" # be secure by default quote_char = '"' use_best_quote_char = True # tag syntax options omit_optional_tags = True minimize_boolean_attributes = True use_trailing_solidus = False space_before_trailing_solidus = True # escaping options escape_lt_in_attrs = False escape_rcdata = False resolve_entities = True # miscellaneous options alphabetical_attributes = False inject_meta_charset = True strip_whitespace = False sanitize = False options = ("quote_attr_values", "quote_char", "use_best_quote_char", "omit_optional_tags", "minimize_boolean_attributes", "use_trailing_solidus", "space_before_trailing_solidus", "escape_lt_in_attrs", "escape_rcdata", "resolve_entities", "alphabetical_attributes", "inject_meta_charset", "strip_whitespace", "sanitize") def __init__(self, **kwargs): """Initialize HTMLSerializer :arg inject_meta_charset: Whether or not to inject the meta charset. Defaults to ``True``. :arg quote_attr_values: Whether to quote attribute values that don't require quoting per legacy browser behavior (``"legacy"``), when required by the standard (``"spec"``), or always (``"always"``). Defaults to ``"legacy"``. :arg quote_char: Use given quote character for attribute quoting. Defaults to ``"`` which will use double quotes unless attribute value contains a double quote, in which case single quotes are used. :arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute values. Defaults to ``False``. :arg escape_rcdata: Whether to escape characters that need to be escaped within normal elements within rcdata elements such as style. Defaults to ``False``. :arg resolve_entities: Whether to resolve named character entities that appear in the source tree. The XML predefined entities &lt; &gt; &amp; &quot; &apos; are unaffected by this setting. Defaults to ``True``. :arg strip_whitespace: Whether to remove semantically meaningless whitespace. (This compresses all whitespace to a single space except within ``pre``.) Defaults to ``False``. :arg minimize_boolean_attributes: Shortens boolean attributes to give just the attribute value, for example:: <input disabled="disabled"> becomes:: <input disabled> Defaults to ``True``. :arg use_trailing_solidus: Includes a close-tag slash at the end of the start tag of void elements (empty elements whose end tag is forbidden). E.g. ``<hr/>``. Defaults to ``False``. :arg space_before_trailing_solidus: Places a space immediately before the closing slash in a tag using a trailing solidus. E.g. ``<hr />``. Requires ``use_trailing_solidus=True``. Defaults to ``True``. :arg sanitize: Strip all unsafe or unknown constructs from output. See :py:class:`html5lib.filters.sanitizer.Filter`. Defaults to ``False``. :arg omit_optional_tags: Omit start/end tags that are optional. Defaults to ``True``. :arg alphabetical_attributes: Reorder attributes to be in alphabetical order. Defaults to ``False``. """ unexpected_args = frozenset(kwargs) - frozenset(self.options) if len(unexpected_args) > 0: raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args))) if 'quote_char' in kwargs: self.use_best_quote_char = False for attr in self.options: setattr(self, attr, kwargs.get(attr, getattr(self, attr))) self.errors = [] self.strict = False def encode(self, string): assert(isinstance(string, text_type)) if self.encoding: return string.encode(self.encoding, "htmlentityreplace") else: return string def encodeStrict(self, string): assert(isinstance(string, text_type)) if self.encoding: return string.encode(self.encoding, "strict") else: return string def serialize(self, treewalker, encoding=None): # pylint:disable=too-many-nested-blocks self.encoding = encoding in_cdata = False self.errors = [] if encoding and self.inject_meta_charset: from .filters.inject_meta_charset import Filter treewalker = Filter(treewalker, encoding) # Alphabetical attributes is here under the assumption that none of # the later filters add or change order of attributes; it needs to be # before the sanitizer so escaped elements come out correctly if self.alphabetical_attributes: from .filters.alphabeticalattributes import Filter treewalker = Filter(treewalker) # WhitespaceFilter should be used before OptionalTagFilter # for maximum efficiently of this latter filter if self.strip_whitespace: from .filters.whitespace import Filter treewalker = Filter(treewalker) if self.sanitize: from .filters.sanitizer import Filter treewalker = Filter(treewalker) if self.omit_optional_tags: from .filters.optionaltags import Filter treewalker = Filter(treewalker) for token in treewalker: type = token["type"] if type == "Doctype": doctype = "<!DOCTYPE %s" % token["name"] if token["publicId"]: doctype += ' PUBLIC "%s"' % token["publicId"] elif token["systemId"]: doctype += " SYSTEM" if token["systemId"]: if token["systemId"].find('"') >= 0: if token["systemId"].find("'") >= 0: self.serializeError("System identifer contains both single and double quote characters") quote_char = "'" else: quote_char = '"' doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char) doctype += ">" yield self.encodeStrict(doctype) elif type in ("Characters", "SpaceCharacters"): if type == "SpaceCharacters" or in_cdata: if in_cdata and token["data"].find("</") >= 0: self.serializeError("Unexpected </ in CDATA") yield self.encode(token["data"]) else: yield self.encode(escape(token["data"])) elif type in ("StartTag", "EmptyTag"): name = token["name"] yield self.encodeStrict("<%s" % name) if name in rcdataElements and not self.escape_rcdata: in_cdata = True elif in_cdata: self.serializeError("Unexpected child element of a CDATA element") for (_, attr_name), attr_value in token["data"].items(): # TODO: Add namespace support here k = attr_name v = attr_value yield self.encodeStrict(' ') yield self.encodeStrict(k) if not self.minimize_boolean_attributes or \ (k not in booleanAttributes.get(name, tuple()) and k not in booleanAttributes.get("", tuple())): yield self.encodeStrict("=") if self.quote_attr_values == "always" or len(v) == 0: quote_attr = True elif self.quote_attr_values == "spec": quote_attr = _quoteAttributeSpec.search(v) is not None elif self.quote_attr_values == "legacy": quote_attr = _quoteAttributeLegacy.search(v) is not None else: raise ValueError("quote_attr_values must be one of: " "'always', 'spec', or 'legacy'") v = v.replace("&", "&amp;") if self.escape_lt_in_attrs: v = v.replace("<", "&lt;") if quote_attr: quote_char = self.quote_char if self.use_best_quote_char: if "'" in v and '"' not in v: quote_char = '"' elif '"' in v and "'" not in v: quote_char = "'" if quote_char == "'": v = v.replace("'", "&#39;") else: v = v.replace('"', "&quot;") yield self.encodeStrict(quote_char) yield self.encode(v) yield self.encodeStrict(quote_char) else: yield self.encode(v) if name in voidElements and self.use_trailing_solidus: if self.space_before_trailing_solidus: yield self.encodeStrict(" /") else: yield self.encodeStrict("/") yield self.encode(">") elif type == "EndTag": name = token["name"] if name in rcdataElements: in_cdata = False elif in_cdata: self.serializeError("Unexpected child element of a CDATA element") yield self.encodeStrict("</%s>" % name) elif type == "Comment": data = token["data"] if data.find("--") >= 0: self.serializeError("Comment contains --") yield self.encodeStrict("<!--%s-->" % token["data"]) elif type == "Entity": name = token["name"] key = name + ";" if key not in entities: self.serializeError("Entity %s not recognized" % name) if self.resolve_entities and key not in xmlEntities: data = entities[key] else: data = "&%s;" % name yield self.encodeStrict(data) else: self.serializeError(token["data"]) def render(self, treewalker, encoding=None): """Serializes the stream from the treewalker into a string :arg treewalker: the treewalker to serialize :arg encoding: the string encoding to use :returns: the serialized tree Example: >>> from html5lib import parse, getTreeWalker >>> from html5lib.serializer import HTMLSerializer >>> token_stream = parse('<html><body>Hi!</body></html>') >>> walker = getTreeWalker('etree') >>> serializer = HTMLSerializer(omit_optional_tags=False) >>> serializer.render(walker(token_stream)) '<html><head></head><body>Hi!</body></html>' """ if encoding: return b"".join(list(self.serialize(treewalker, encoding))) else: return "".join(list(self.serialize(treewalker))) def serializeError(self, data="XXX ERROR MESSAGE NEEDED"): # XXX The idea is to make data mandatory. self.errors.append(data) if self.strict: raise SerializeError class SerializeError(Exception): """Error in serialized tree""" pass
mpl-2.0
BjoernT/python-openstackclient
openstackclient/identity/v3/unscoped_saml.py
2
2818
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Identity v3 unscoped SAML auth action implementations. The first step of federated auth is to fetch an unscoped token. From there, the user can list domains and projects they are allowed to access, and request a scoped token.""" import logging from cliff import lister from openstackclient.common import exceptions from openstackclient.common import utils UNSCOPED_AUTH_PLUGINS = ['v3unscopedsaml', 'v3unscopedadfs', 'v3oidc'] def auth_with_unscoped_saml(func): """Check the unscoped federated context""" def _decorated(self, parsed_args): auth_plugin_name = self.app.client_manager.auth_plugin_name if auth_plugin_name in UNSCOPED_AUTH_PLUGINS: return func(self, parsed_args) else: msg = ('This command requires the use of an unscoped SAML ' 'authentication plugin. Please use argument ' '--os-auth-type with one of the following ' 'plugins: ' + ', '.join(UNSCOPED_AUTH_PLUGINS)) raise exceptions.CommandError(msg) return _decorated class ListAccessibleDomains(lister.Lister): """List accessible domains""" log = logging.getLogger(__name__ + '.ListAccessibleDomains') @auth_with_unscoped_saml @utils.log_method(log) def take_action(self, parsed_args): columns = ('ID', 'Enabled', 'Name', 'Description') identity_client = self.app.client_manager.identity data = identity_client.federation.domains.list() return (columns, (utils.get_item_properties( s, columns, formatters={}, ) for s in data)) class ListAccessibleProjects(lister.Lister): """List accessible projects""" log = logging.getLogger(__name__ + '.ListAccessibleProjects') @auth_with_unscoped_saml @utils.log_method(log) def take_action(self, parsed_args): columns = ('ID', 'Domain ID', 'Enabled', 'Name') identity_client = self.app.client_manager.identity data = identity_client.federation.projects.list() return (columns, (utils.get_item_properties( s, columns, formatters={}, ) for s in data))
apache-2.0
pitunti/alfaPitunti
plugin.video.alfa/channels/seriesblanco.py
1
13145
# -*- coding: utf-8 -*- import re import urlparse from channels import filtertools from channelselector import get_thumb from core import httptools from core import scrapertoolsV2 from core import servertools from core.item import Item from platformcode import config, logger from channels import autoplay HOST = "https://seriesblanco.com/" IDIOMAS = {'es': 'Español', 'en': 'Inglés', 'la': 'Latino', 'vo': 'VO', 'vos': 'VOS', 'vosi': 'VOSI', 'otro': 'OVOS'} list_idiomas = IDIOMAS.values() list_language = ['default'] CALIDADES = ['SD', 'HDiTunes', 'Micro-HD-720p', 'Micro-HD-1080p', '1080p', '720p'] list_quality = CALIDADES list_servers = ['streamix', 'powvideo', 'streamcloud', 'openload', 'flashx', 'streamplay', 'nowvideo', 'gamovideo', 'kingvid', 'vidabc' ] def mainlist(item): logger.info() thumb_series = get_thumb("channels_tvshow.png") thumb_series_az = get_thumb("channels_tvshow_az.png") thumb_buscar = get_thumb("search.png") itemlist = list() autoplay.init(item.channel, list_servers, list_quality) itemlist.append(Item(channel=item.channel, title="Listado alfabético", action="series_listado_alfabetico", thumbnail=thumb_series_az)) itemlist.append(Item(channel=item.channel, title="Todas las series", action="series", url=urlparse.urljoin(HOST, "listado/"), thumbnail=thumb_series)) itemlist.append( Item(channel=item.channel, title="Capítulos estrenados recientemente", action="home_section", extra="Series Online : Capítulos estrenados recientemente", url=HOST, thumbnail=thumb_series)) itemlist.append(Item(channel=item.channel, title="Series más vistas", action="series", extra="Series Más vistas", url=urlparse.urljoin(HOST, "listado-visto/"), thumbnail=thumb_series)) itemlist.append(Item(channel=item.channel, title="Últimas fichas creadas", action="series", url=urlparse.urljoin(HOST, "fichas_creadas/"), thumbnail=thumb_series)) itemlist.append(Item(channel=item.channel, title="Series por género", action="generos", url=HOST, thumbnail=thumb_series)) itemlist.append( Item(channel=item.channel, title="Buscar...", action="search", url=urlparse.urljoin(HOST, "finder.php"), thumbnail=thumb_buscar)) itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, CALIDADES) autoplay.show_option(item.channel, itemlist) return itemlist def home_section(item): logger.info("section = %s" % item.extra) pattern = "['\"]panel-title['\"]>[^/]*%s(.*?)(?:panel-title|\Z)" % item.extra # logger.debug("pattern = %s" % pattern) data = httptools.downloadpage(item.url).data result = re.search(pattern, data, re.MULTILINE | re.DOTALL) if result: # logger.debug("found section: {0}".format(result.group(1))) item.extra = 1 return extract_series_from_data(item, result.group(1)) logger.debug("No match") return [] def extract_series_from_data(item, data): itemlist = [] episode_pattern = re.compile('/capitulo-([0-9]+)/') shows = re.findall("<a.+?href=['\"](?P<url>/serie[^'\"]+)[^<]*<img[^>]*src=['\"](?P<img>http[^'\"]+).*?" "(?:alt|title)=['\"](?P<name>[^'\"]+)", data) for url, img, name in shows: try: name.decode('utf-8') except UnicodeError: name = unicode(name, "iso-8859-1", errors="replace").encode("utf-8") # logger.debug("Show found: %s -> %s (%s)" % (name, url, img)) if not episode_pattern.search(url): action = "episodios" else: action = "findvideos" context1=[filtertools.context(item, list_idiomas, CALIDADES), autoplay.context] itemlist.append(item.clone(title=name, url=urlparse.urljoin(HOST, url), action=action, show=name, thumbnail=img, context=context1)) more_pages = re.search('pagina=([0-9]+)">>>', data) if more_pages: # logger.debug("Adding next page item") itemlist.append(item.clone(title="Siguiente >>", extra=item.extra + 1)) if item.extra > 1: # logger.debug("Adding previous page item") itemlist.append(item.clone(title="<< Anterior", extra=item.extra - 1)) return itemlist def series(item): logger.info() if not hasattr(item, 'extra') or not isinstance(item.extra, int): item.extra = 1 if '?' in item.url: merger = '&' else: merger = '?' page_url = "%s%spagina=%s" % (item.url, merger, item.extra) logger.info("url = %s" % page_url) data = scrapertoolsV2.decodeHtmlentities(httptools.downloadpage(page_url).data) return extract_series_from_data(item, data) def series_listado_alfabetico(item): logger.info() return [item.clone(action="series", title=letra, url=urlparse.urljoin(HOST, "listado-%s/" % letra)) for letra in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"] def generos(item): logger.info() data = httptools.downloadpage(item.url).data result = re.findall("href=['\"](?P<url>/listado/[^'\"]+)['\"][^/]+/i>\s*(?P<genero>[^<]+)", data) return [item.clone(action="series", title=genero, url=urlparse.urljoin(item.url, url)) for url, genero in result] def newest(categoria): logger.info("categoria: %s" % categoria) itemlist = [] try: if categoria == 'series': itemlist = home_section(Item(extra=CAPITULOS_DE_ESTRENO_STR, url=HOST)) # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] return itemlist def search(item, texto): logger.info("%s" % texto) texto = texto.replace(" ", "+") itemlist = [] try: post = "query=%s" % texto data = httptools.downloadpage(item.url, post=post).data data = re.sub(r"\n|\r|\t|\s{2}", "", data) shows = re.findall("<a href=['\"](?P<url>/serie[^'\"]+)['\"].*?<img src=['\"](?P<img>[^'\"]+)['\"].*?" "id=['\"]q2[1\"] name=['\"]q2['\"] value=['\"](?P<title>.*?)['\"]", data) for url, img, title in shows: itemlist.append(item.clone(title=title, url=urlparse.urljoin(HOST, url), action="episodios", show=title, thumbnail=img, context=filtertools.context(item, list_idiomas, CALIDADES))) # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return itemlist def episodios(item): logger.info("%s - %s" % (item.title, item.url)) itemlist = [] # Descarga la página data = httptools.downloadpage(item.url).data fanart = scrapertoolsV2.find_single_match(data, "background-image[^'\"]+['\"]([^'\"]+)") plot = scrapertoolsV2.find_single_match(data, "id=['\"]profile2['\"]>\s*(.*?)\s*</div>") # logger.debug("fanart: %s" % fanart) # logger.debug("plot: %s" % plot) episodes = re.findall("<tr.*?href=['\"](?P<url>[^'\"]+).+?>(?P<title>.+?)</a>.*?<td>(?P<flags>.*?)</td>", data, re.MULTILINE | re.DOTALL) for url, title, flags in episodes: title = re.sub("<span[^>]+>", "", title).replace("</span>", "") idiomas = " ".join(["[%s]" % IDIOMAS.get(language, "OVOS") for language in re.findall("banderas/([^\.]+)", flags, re.MULTILINE)]) filter_lang = idiomas.replace("[", "").replace("]", "").split(" ") display_title = "%s - %s %s" % (item.show, title, idiomas) # logger.debug("Episode found %s: %s" % (display_title, urlparse.urljoin(HOST, url))) itemlist.append(item.clone(title=display_title, url=urlparse.urljoin(HOST, url), action="findvideos", plot=plot, fanart=fanart, language=filter_lang)) itemlist = filtertools.get_links(itemlist, item, list_idiomas, CALIDADES) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios")) return itemlist def parse_videos(item, type_str, data): video_patterns_str = [ '<tr.+?<span>(?P<date>.+?)</span>.*?banderas/(?P<language>[^\.]+).+?href="(?P<link>[^"]+).+?servidores/' '(?P<server>[^\.]+).*?</td>.*?<td>.*?<span>(?P<uploader>.+?)</span>.*?<span>(?P<quality>.*?)</span>', '<tr.+?banderas/(?P<language>[^\.]+).+?<td[^>]*>(?P<date>.+?)</td>.+?href=[\'"](?P<link>[^\'"]+)' '.+?servidores/(?P<server>[^\.]+).*?</td>.*?<td[^>]*>.*?<a[^>]+>(?P<uploader>.+?)</a>.*?</td>.*?<td[^>]*>' '(?P<quality>.*?)</td>.*?</tr>' ] for v_pat_str in video_patterns_str: v_patt_iter = re.compile(v_pat_str, re.MULTILINE | re.DOTALL).finditer(data) itemlist = [] for vMatch in v_patt_iter: v_fields = vMatch.groupdict() quality = v_fields.get("quality") # FIX para veces que añaden el idioma en los comentarios regex = re.compile('sub-inglés-?', re.I) quality = regex.sub("", quality) # quality = re.sub(r"sub-inglés-?", "", quality, flags=re.IGNORECASE) if not quality: quality = "SD" # FIX para los guiones en la calidad y no tener que añadir otra opción en la lista de calidades if quality.startswith("MicroHD"): regex = re.compile('microhd', re.I) quality = regex.sub("Micro-HD-", quality) # quality = re.sub(r"microhd", "Micro-HD-", quality, flags=re.IGNORECASE) server = v_fields.get("server") title = "%s en %s [%s] [%s] (%s: %s)" % (type_str, v_fields.get("server"), IDIOMAS.get(v_fields.get("language"), "OVOS"), quality, v_fields.get("uploader"), v_fields.get("date")) itemlist.append( item.clone(title=title, fulltitle=item.title, url=urlparse.urljoin(HOST, v_fields.get("link")), action="play", language=IDIOMAS.get(v_fields.get("language"), "OVOS"), quality=quality, server= server)) if len(itemlist) > 0: return itemlist return [] def extract_videos_section(data): return re.findall("panel-title[^>]*>\s*([VvDd].+?)</div>[^<]*</div>[^<]*</div>", data, re.MULTILINE | re.DOTALL) def findvideos(item): logger.info("%s = %s" % (item.show, item.url)) # Descarga la página data = httptools.downloadpage(item.url).data # logger.info(data) online = extract_videos_section(data) try: filtro_enlaces = config.get_setting("filterlinks", item.channel) except: filtro_enlaces = 2 list_links = [] if filtro_enlaces != 0: list_links.extend(parse_videos(item, "Ver", online[-2])) if filtro_enlaces != 1: list_links.extend(parse_videos(item, "Descargar", online[-1])) list_links = filtertools.get_links(list_links, item, list_idiomas, CALIDADES) for i in range(len(list_links)): a=list_links[i].title b=a.lstrip('Ver en') c=b.split('[') d=c[0].rstrip( ) d=d.lstrip( ) list_links[i].server=d autoplay.start(list_links, item) return list_links def play(item): logger.info("%s - %s = %s" % (item.show, item.title, item.url)) if item.url.startswith(HOST): data = httptools.downloadpage(item.url).data ajax_link = re.findall("loadEnlace\((\d+),(\d+),(\d+),(\d+)\)", data) ajax_data = "" for serie, temp, cap, linkID in ajax_link: # logger.debug( # "Ajax link request: Serie = %s - Temp = %s - Cap = %s - Link = %s" % (serie, temp, cap, linkID)) ajax_data += httptools.downloadpage( HOST + '/ajax/load_enlace.php?serie=' + serie + '&temp=' + temp + '&cap=' + cap + '&id=' + linkID).data if ajax_data: data = ajax_data patron = "window.location.href\s*=\s*[\"']([^\"']+)'" url = scrapertoolsV2.find_single_match(data, patron) else: url = item.url itemlist = servertools.find_video_items(data=url) titulo = scrapertoolsV2.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$") if titulo: titulo += " [%s]" % item.language for videoitem in itemlist: if titulo: videoitem.title = titulo else: videoitem.title = item.title videoitem.channel = item.channel return itemlist
gpl-3.0
joisig/grit-i18n
grit/pseudo.py
62
4072
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. '''Pseudotranslation support. Our pseudotranslations are based on the P-language, which is a simple vowel-extending language. Examples of P: - "hello" becomes "hepellopo" - "howdie" becomes "hopowdiepie" - "because" becomes "bepecaupause" (but in our implementation we don't handle the silent e at the end so it actually would return "bepecaupausepe" The P-language has the excellent quality of increasing the length of text by around 30-50% which is great for pseudotranslations, to stress test any GUI layouts etc. To make the pseudotranslations more obviously "not a translation" and to make them exercise any code that deals with encodings, we also transform all English vowels into equivalent vowels with diacriticals on them (rings, acutes, diaresis, and circumflex), and we write the "p" in the P-language as a Hebrew character Qof. It looks sort of like a latin character "p" but it is outside the latin-1 character set which will stress character encoding bugs. ''' from grit import lazy_re from grit import tclib # An RFC language code for the P pseudolanguage. PSEUDO_LANG = 'x-P-pseudo' # Hebrew character Qof. It looks kind of like a 'p' but is outside # the latin-1 character set which is good for our purposes. # TODO(joi) For now using P instead of Qof, because of some bugs it used. Find # a better solution, i.e. one that introduces a non-latin1 character into the # pseudotranslation. #_QOF = u'\u05e7' _QOF = u'P' # How we map each vowel. _VOWELS = { u'a' : u'\u00e5', # a with ring u'e' : u'\u00e9', # e acute u'i' : u'\u00ef', # i diaresis u'o' : u'\u00f4', # o circumflex u'u' : u'\u00fc', # u diaresis u'y' : u'\u00fd', # y acute u'A' : u'\u00c5', # A with ring u'E' : u'\u00c9', # E acute u'I' : u'\u00cf', # I diaresis u'O' : u'\u00d4', # O circumflex u'U' : u'\u00dc', # U diaresis u'Y' : u'\u00dd', # Y acute } # Matches vowels and P _PSUB_RE = lazy_re.compile("(%s)" % '|'.join(_VOWELS.keys() + ['P'])) # Pseudotranslations previously created. This is important for performance # reasons, especially since we routinely pseudotranslate the whole project # several or many different times for each build. _existing_translations = {} def MapVowels(str, also_p = False): '''Returns a copy of 'str' where characters that exist as keys in _VOWELS have been replaced with the corresponding value. If also_p is true, this function will also change capital P characters into a Hebrew character Qof. ''' def Repl(match): if match.group() == 'p': if also_p: return _QOF else: return 'p' else: return _VOWELS[match.group()] return _PSUB_RE.sub(Repl, str) def PseudoString(str): '''Returns a pseudotranslation of the provided string, in our enhanced P-language.''' if str in _existing_translations: return _existing_translations[str] outstr = u'' ix = 0 while ix < len(str): if str[ix] not in _VOWELS.keys(): outstr += str[ix] ix += 1 else: # We want to treat consecutive vowels as one composite vowel. This is not # always accurate e.g. in composite words but good enough. consecutive_vowels = u'' while ix < len(str) and str[ix] in _VOWELS.keys(): consecutive_vowels += str[ix] ix += 1 changed_vowels = MapVowels(consecutive_vowels) outstr += changed_vowels outstr += _QOF outstr += changed_vowels _existing_translations[str] = outstr return outstr def PseudoMessage(message): '''Returns a pseudotranslation of the provided message. Args: message: tclib.Message() Return: tclib.Translation() ''' transl = tclib.Translation() for part in message.GetContent(): if isinstance(part, tclib.Placeholder): transl.AppendPlaceholder(part) else: transl.AppendText(PseudoString(part)) return transl
bsd-2-clause
JoyMonteiro/CliMT
lib/examples/basic_radiation.py
4
1313
#!/usr/bin/env python # # Set up realistic tropical temperature and moisture profiles # and compute radiative fluxes # from numpy import * import climt #--- instantiate radiation module r = climt.radiation(scheme='cam3') #--- initialise T,q # Surface temperature Ts = 273.15 + 30. # Strospheric temp Tst = 273.15 - 80. # Surface pressure ps = 1000. # Equispaced pressure levels p = ( arange(r.nlev)+ 0.5 )/r.nlev * ps # Return moist adiabat with 70% rel hum (T,q) = climt.thermodyn.moistadiabat(p, Ts, Tst, 1.) cldf = q*0. clwp = q*0. cldf[len(cldf)/3] = 0.5 clwp[len(cldf)/3] = 100. #--- compute radiative fluxes and heating rates r(p=p, ps=ps, T=T, Ts=Ts, q=q, cldf=cldf, clwp=clwp) if 'SwToa' in r.State.keys(): print r['SwToa'],r['LwToa'],r['SwSrf'],r['LwSrf'] print r['SwToaCf'],r['LwToaCf'],(r['solin']-r['SwToa'])/r['solin'] #--- print out results lwflx = r['lwflx'] swflx = r['swflx'] lwhr = r['lwhr'] swhr = r['swhr'] q = r['q'] T = r['T'] z=climt.thermodyn.z(p, T, p0=ps)/1000. print "lev z p T q lwflx lwhr swflx swhr " for i in range(r['nlev']): print "%3i %6.1f %6.1f %6.1f %6.2f %10.2f %6.2f %10.2f %6.2f" % \ (i, z[i], p[i], T[i]-273.15, q[i], lwflx[i], lwhr[i], swflx[i], swhr[i])
bsd-3-clause
benchisell/photostream-bc
flask/lib/python2.7/site-packages/pip-1.5.6-py2.7.egg/pip/exceptions.py
398
1086
"""Exceptions used throughout package""" class PipError(Exception): """Base pip exception""" class InstallationError(PipError): """General exception during installation""" class UninstallationError(PipError): """General exception during uninstallation""" class DistributionNotFound(InstallationError): """Raised when a distribution cannot be found to satisfy a requirement""" class BestVersionAlreadyInstalled(PipError): """Raised when the most up-to-date version of a package is already installed. """ class BadCommand(PipError): """Raised when virtualenv or a command is not found""" class CommandError(PipError): """Raised when there is an error in command-line arguments""" class PreviousBuildDirError(PipError): """Raised when there's a previous conflicting build directory""" class HashMismatch(InstallationError): """Distribution file hash values don't match.""" class InvalidWheelFilename(InstallationError): """Invalid wheel filename.""" class UnsupportedWheel(InstallationError): """Unsupported wheel."""
bsd-3-clause
TechAtNYU/feedback-service
feedback.py
1
3796
import requests import secrets import smtplib headers = { 'content-type': 'application/vnd.api+json', 'accept': 'application/*, text/*', 'authorization': 'Bearer ' + secrets.tnyu_api_key } def get_emails(event_id, event_data, eboard_members, attendees): res = requests.get('https://api.tnyu.org/v3/events/' + event_id + '?include=attendees', headers=headers, verify=False) if r.status_code != 200: return r = res.json() event_data.append(r['data']) for post in r['included']: if post['attributes'].get('contact'): if post['attributes']['roles']: eboard_members.append(post) else: attendees.append(post) def send_emails(event_data, survey_link, eboard_members, attendees): server = smtplib.SMTP('smtp.gmail.com', 587) server.starttls() server.login(secrets.tnyu_email, secrets.tnyu_email_password) for i, member in enumerate(eboard_members): msg = "\r\n".join([ 'Hi ' + eboard_members[i]['attributes']['name'] + '!\n\n' + 'Thanks for coming out! We are constantly looking to improve ' + 'on our events, and we would really appreciate it if you ' + 'could take two minutes out of your day to fill out our' + 'feedback form. We\'d love to know how we could do better: ' + survey_link + '?rsvpId=' + eboard_members[i]['id'], '', 'Filling the form out will give us an idea of how everything ' + 'went and if there was something you really liked about the ' + 'event or something you did not like.\n', 'Feel free to email feedback@techatnyu.org if you have ' + 'other questions or concerns.', '', 'Thank you,', 'Tech@NYU team' ]) try: server.sendmail(secrets.tnyu_email, eboard_members[i][ 'attributes']['contact']['email'], msg) except UnicodeEncodeError: continue for i, attendee in enumerate(attendees): msg = "\r\n".join([ "From: " + secrets.tnyu_email, "To: " + attendees[j]['attributes']['contact']['email'], "Subject: Thank you for coming to Tech@NYU's " + event_data[0]['attributes']['title'], '', 'Hi ' + attendees[j]['attributes']['name'] + '!\n\n' + 'Thanks for coming out! We are constantly looking to improve ' + 'on our events, and we would really appreciate it if you could ' + ' take two minutes out of your day to fill out our feedback ' + 'form. We\'d love to know how we could do better: ' + survey_link + '?rsvpId=' + attendees[j]['id'], '', 'Filling the form out will give us an idea of how everything ' + 'went and if there was something you really liked about the ' + 'event or something you did not like.\n', 'Feel free to email feedback@techatnyu.org if you have other ' + 'questions or concerns.', '', 'Thank you,', 'Tech@NYU team' ]) try: server.sendmail(secrets.tnyu_email, attendees[j][ 'attributes']['contact']['email'], msg) except UnicodeEncodeError: continue server.quit() def main(): event_id = '5644e5e37af46de029dfb9f9' eboard_members = [] attendees = [] event_data = [] survey_link = 'https://techatnyu.typeform.com/to/ElE6F5' print attendees[0] get_emails(event_id, event_data, eboard_members, attendees) send_emails(event_data, survey_link, eboard_members, attendees) main()
mit
RTS2/rts2
python/rts2/progressbar.py
3
1665
# Prints to console pretty progress bar # # (C) 2016 Petr Kubanek <petr@kubanek.net> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. import sys import fcntl import termios import struct import time import atexit def show_cursor(): sys.stderr.write('\x1b[?;25;h') sys.stderr.flush() atexit.register(show_cursor) try: COLS = struct.unpack('hh', fcntl.ioctl(sys.stderr, termios.TIOCGWINSZ, '1234'))[1] except IOError as io: # outputing to file.. COLS = 0 def progress(current, total): if COLS <= 0: return prefix = '{0:5} / {1}'.format(current, total) bar_start = ' [' bar_end = ']' bar_size = COLS - len(prefix + bar_start + bar_end) if current >= total: amount = bar_size remain = 0 else: amount = int(current / (total / float(bar_size))) remain = bar_size - amount bar = '\x1b[7m' + ' ' * amount + '\x1b[0m' + ' ' * remain sys.stderr.write('\x1b[?;25;l' + prefix + bar_start + bar + bar_end + '\r') if current >= total: sys.stderr.write('\n\x1b[?;25;h') sys.stderr.flush()
lgpl-3.0
bsmr-eve/Pyfa
gui/fitCommands/guiToggleModuleState.py
2
1208
import wx from service.fit import Fit import gui.mainFrame from gui import globalEvents as GE from .calc.fitChangeState import FitChangeStatesCommand class GuiModuleStateChangeCommand(wx.Command): def __init__(self, fitID, baseMod, modules, click): # todo: instead of modules, needs to be positions. Dead objects are a thing wx.Command.__init__(self, True, "Module State Change") self.mainFrame = gui.mainFrame.MainFrame.getInstance() self.sFit = Fit.getInstance() self.fitID = fitID self.baseMod = baseMod self.modules = modules self.click = click self.internal_history = wx.CommandProcessor() def Do(self): if self.internal_history.Submit(FitChangeStatesCommand(self.fitID, self.baseMod, self.modules, self.click)): self.sFit.recalc(self.fitID) wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=self.fitID)) return True return False def Undo(self): for _ in self.internal_history.Commands: self.internal_history.Undo() self.sFit.recalc(self.fitID) wx.PostEvent(self.mainFrame, GE.FitChanged(fitID=self.fitID)) return True
gpl-3.0
drybjed/ansible-modules-extras
notification/mqtt.py
101
4848
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, 2014, Jan-Piet Mens <jpmens () gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # DOCUMENTATION = ''' --- module: mqtt short_description: Publish a message on an MQTT topic for the IoT version_added: "1.2" description: - Publish a message on an MQTT topic. options: server: description: - MQTT broker address/name required: false default: localhost port: description: - MQTT broker port number required: false default: 1883 username: description: - Username to authenticate against the broker. required: false password: description: - Password for C(username) to authenticate against the broker. required: false client_id: description: - MQTT client identifier required: false default: hostname + pid topic: description: - MQTT topic name required: true default: null payload: description: - Payload. The special string C("None") may be used to send a NULL (i.e. empty) payload which is useful to simply notify with the I(topic) or to clear previously retained messages. required: true default: null qos: description: - QoS (Quality of Service) required: false default: 0 choices: [ "0", "1", "2" ] retain: description: - Setting this flag causes the broker to retain (i.e. keep) the message so that applications that subsequently subscribe to the topic can received the last retained message immediately. required: false default: False # informational: requirements for nodes requirements: [ mosquitto ] notes: - This module requires a connection to an MQTT broker such as Mosquitto U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.python.org/pypi/paho-mqtt)). author: "Jan-Piet Mens (@jpmens)" ''' EXAMPLES = ''' - local_action: mqtt topic=service/ansible/{{ ansible_hostname }} payload="Hello at {{ ansible_date_time.iso8601 }}" qos=0 retain=false client_id=ans001 ''' # =========================================== # MQTT module support methods. # HAS_PAHOMQTT = True try: import socket import paho.mqtt.publish as mqtt except ImportError: HAS_PAHOMQTT = False # =========================================== # Main # def main(): module = AnsibleModule( argument_spec=dict( server = dict(default = 'localhost'), port = dict(default = 1883), topic = dict(required = True), payload = dict(required = True), client_id = dict(default = None), qos = dict(default="0", choices=["0", "1", "2"]), retain = dict(default=False, type='bool'), username = dict(default = None), password = dict(default = None), ), supports_check_mode=True ) if not HAS_PAHOMQTT: module.fail_json(msg="Paho MQTT is not installed") server = module.params.get("server", 'localhost') port = module.params.get("port", 1883) topic = module.params.get("topic") payload = module.params.get("payload") client_id = module.params.get("client_id", '') qos = int(module.params.get("qos", 0)) retain = module.params.get("retain") username = module.params.get("username", None) password = module.params.get("password", None) if client_id is None: client_id = "%s_%s" % (socket.getfqdn(), os.getpid()) if payload and payload == 'None': payload = None auth=None if username is not None: auth = { 'username' : username, 'password' : password } try: rc = mqtt.single(topic, payload, qos=qos, retain=retain, client_id=client_id, hostname=server, port=port, auth=auth) except Exception, e: module.fail_json(msg="unable to publish to MQTT broker %s" % (e)) module.exit_json(changed=False, topic=topic) # import module snippets from ansible.module_utils.basic import * main()
gpl-3.0
andybak/ansible-modules-extras
messaging/rabbitmq_parameter.py
104
4614
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Chatham Financial <oss@chathamfinancial.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: rabbitmq_parameter short_description: Adds or removes parameters to RabbitMQ description: - Manage dynamic, cluster-wide parameters for RabbitMQ version_added: "1.1" author: '"Chris Hoffman (@chrishoffman)"' options: component: description: - Name of the component of which the parameter is being set required: true default: null name: description: - Name of the parameter being set required: true default: null value: description: - Value of the parameter, as a JSON term required: false default: null vhost: description: - vhost to apply access privileges. required: false default: / node: description: - erlang node name of the rabbit we wish to configure required: false default: rabbit version_added: "1.2" state: description: - Specify if user is to be added or removed required: false default: present choices: [ 'present', 'absent'] ''' EXAMPLES = """ # Set the federation parameter 'local_username' to a value of 'guest' (in quotes) - rabbitmq_parameter: component=federation name=local-username value='"guest"' state=present """ class RabbitMqParameter(object): def __init__(self, module, component, name, value, vhost, node): self.module = module self.component = component self.name = name self.value = value self.vhost = vhost self.node = node self._value = None self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) def _exec(self, args, run_in_check_mode=False): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): cmd = [self._rabbitmqctl, '-q', '-n', self.node] rc, out, err = self.module.run_command(cmd + args, check_rc=True) return out.splitlines() return list() def get(self): parameters = self._exec(['list_parameters', '-p', self.vhost], True) for param_item in parameters: component, name, value = param_item.split('\t') if component == self.component and name == self.name: self._value = value return True return False def set(self): self._exec(['set_parameter', '-p', self.vhost, self.component, self.name, self.value]) def delete(self): self._exec(['clear_parameter', '-p', self.vhost, self.component, self.name]) def has_modifications(self): return self.value != self._value def main(): arg_spec = dict( component=dict(required=True), name=dict(required=True), value=dict(default=None), vhost=dict(default='/'), state=dict(default='present', choices=['present', 'absent']), node=dict(default='rabbit') ) module = AnsibleModule( argument_spec=arg_spec, supports_check_mode=True ) component = module.params['component'] name = module.params['name'] value = module.params['value'] vhost = module.params['vhost'] state = module.params['state'] node = module.params['node'] rabbitmq_parameter = RabbitMqParameter(module, component, name, value, vhost, node) changed = False if rabbitmq_parameter.get(): if state == 'absent': rabbitmq_parameter.delete() changed = True else: if rabbitmq_parameter.has_modifications(): rabbitmq_parameter.set() changed = True elif state == 'present': rabbitmq_parameter.set() changed = True module.exit_json(changed=changed, component=component, name=name, vhost=vhost, state=state) # import module snippets from ansible.module_utils.basic import * main()
gpl-3.0
arjunasuresh3/Mypykoans
python 2/runner/path_to_enlightenment.py
20
4759
#!/usr/bin/env python # -*- coding: utf-8 -*- # The path to enlightenment starts with the following: import unittest from koans.about_asserts import AboutAsserts from koans.about_none import AboutNone from koans.about_lists import AboutLists from koans.about_list_assignments import AboutListAssignments from koans.about_dictionaries import AboutDictionaries from koans.about_strings import AboutStrings from koans.about_tuples import AboutTuples from koans.about_methods import AboutMethods from koans.about_control_statements import AboutControlStatements from koans.about_true_and_false import AboutTrueAndFalse from koans.about_sets import AboutSets from koans.about_triangle_project import AboutTriangleProject from koans.about_exceptions import AboutExceptions from koans.about_triangle_project2 import AboutTriangleProject2 from koans.about_iteration import AboutIteration from koans.about_generators import AboutGenerators from koans.about_lambdas import AboutLambdas from koans.about_scoring_project import AboutScoringProject from koans.about_classes import AboutClasses from koans.about_new_style_classes import AboutNewStyleClasses from koans.about_with_statements import AboutWithStatements from koans.about_monkey_patching import AboutMonkeyPatching from koans.about_dice_project import AboutDiceProject from koans.about_method_bindings import AboutMethodBindings from koans.about_decorating_with_functions import AboutDecoratingWithFunctions from koans.about_decorating_with_classes import AboutDecoratingWithClasses from koans.about_inheritance import AboutInheritance from koans.about_multiple_inheritance import AboutMultipleInheritance from koans.about_regex import AboutRegex from koans.about_scope import AboutScope from koans.about_modules import AboutModules from koans.about_packages import AboutPackages from koans.about_class_attributes import AboutClassAttributes from koans.about_attribute_access import AboutAttributeAccess from koans.about_deleting_objects import AboutDeletingObjects from koans.about_proxy_object_project import * from koans.about_extra_credit import AboutExtraCredit def koans(): loader = unittest.TestLoader() suite = unittest.TestSuite() loader.sortTestMethodsUsing = None suite.addTests(loader.loadTestsFromTestCase(AboutAsserts)) suite.addTests(loader.loadTestsFromTestCase(AboutNone)) suite.addTests(loader.loadTestsFromTestCase(AboutLists)) suite.addTests(loader.loadTestsFromTestCase(AboutListAssignments)) suite.addTests(loader.loadTestsFromTestCase(AboutDictionaries)) suite.addTests(loader.loadTestsFromTestCase(AboutStrings)) suite.addTests(loader.loadTestsFromTestCase(AboutTuples)) suite.addTests(loader.loadTestsFromTestCase(AboutMethods)) suite.addTests(loader.loadTestsFromTestCase(AboutControlStatements)) suite.addTests(loader.loadTestsFromTestCase(AboutTrueAndFalse)) suite.addTests(loader.loadTestsFromTestCase(AboutSets)) suite.addTests(loader.loadTestsFromTestCase(AboutTriangleProject)) suite.addTests(loader.loadTestsFromTestCase(AboutExceptions)) suite.addTests(loader.loadTestsFromTestCase(AboutTriangleProject2)) suite.addTests(loader.loadTestsFromTestCase(AboutIteration)) suite.addTests(loader.loadTestsFromTestCase(AboutGenerators)) suite.addTests(loader.loadTestsFromTestCase(AboutLambdas)) suite.addTests(loader.loadTestsFromTestCase(AboutScoringProject)) suite.addTests(loader.loadTestsFromTestCase(AboutClasses)) suite.addTests(loader.loadTestsFromTestCase(AboutNewStyleClasses)) suite.addTests(loader.loadTestsFromTestCase(AboutWithStatements)) suite.addTests(loader.loadTestsFromTestCase(AboutMonkeyPatching)) suite.addTests(loader.loadTestsFromTestCase(AboutDiceProject)) suite.addTests(loader.loadTestsFromTestCase(AboutMethodBindings)) suite.addTests(loader.loadTestsFromTestCase(AboutDecoratingWithFunctions)) suite.addTests(loader.loadTestsFromTestCase(AboutDecoratingWithClasses)) suite.addTests(loader.loadTestsFromTestCase(AboutInheritance)) suite.addTests(loader.loadTestsFromTestCase(AboutMultipleInheritance)) suite.addTests(loader.loadTestsFromTestCase(AboutScope)) suite.addTests(loader.loadTestsFromTestCase(AboutModules)) suite.addTests(loader.loadTestsFromTestCase(AboutPackages)) suite.addTests(loader.loadTestsFromTestCase(AboutClassAttributes)) suite.addTests(loader.loadTestsFromTestCase(AboutAttributeAccess)) suite.addTests(loader.loadTestsFromTestCase(AboutDeletingObjects)) suite.addTests(loader.loadTestsFromTestCase(AboutProxyObjectProject)) suite.addTests(loader.loadTestsFromTestCase(TelevisionTest)) suite.addTests(loader.loadTestsFromTestCase(AboutExtraCredit)) return suite
mit
PaddlePaddle/Paddle
python/paddle/fluid/tests/unittests/test_pad_op.py
2
3374
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import unittest import numpy as np from op_test import OpTest import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import Program, program_guard class TestPadOp(OpTest): def setUp(self): self.initTestCase() self.dtype = self.get_dtype() self.op_type = "pad" self.inputs = {'X': np.random.random(self.shape).astype(self.dtype), } self.attrs = {} self.attrs['paddings'] = np.array(self.paddings).flatten() self.attrs['pad_value'] = self.pad_value self.outputs = { 'Out': np.pad(self.inputs['X'], self.paddings, mode='constant', constant_values=self.pad_value) } def get_dtype(self): return np.float64 def test_check_output(self): self.check_output() def test_check_grad_normal(self): self.check_grad(['X'], 'Out') def initTestCase(self): self.shape = (16, 16) self.paddings = [(0, 1), (2, 3)] self.pad_value = 0.0 class TestCase1(TestPadOp): def initTestCase(self): self.shape = (2, 3, 4, 5) self.paddings = [(0, 1), (2, 3), (2, 1), (1, 1)] self.pad_value = 0.5 class TestCase2(TestPadOp): def initTestCase(self): self.shape = (5, 5, 5) self.paddings = [(0, 0), (0, 0), (1, 2)] self.pad_value = 1.0 class TestCase3(TestPadOp): def initTestCase(self): self.shape = (100) self.paddings = [(0, 1)] self.pad_value = 0.9 #----------------Pad Fp16---------------- def create_test_fp16(parent): @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestPadFp16(parent): def get_dtype(self): return np.float16 def test_check_grad_normal(self): self.check_grad(['X'], 'Out', max_relative_error=0.3) cls_name = "{0}_{1}".format(parent.__name__, "Fp16") TestPadFp16.__name__ = cls_name globals()[cls_name] = TestPadFp16 create_test_fp16(TestPadOp) create_test_fp16(TestCase1) create_test_fp16(TestCase2) create_test_fp16(TestCase3) class TestPadOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): input_data = np.random.random((2, 2)).astype("float32") def test_Variable(): fluid.layers.pad(x=input_data, paddings=[1, 1, 1, 1]) self.assertRaises(TypeError, test_Variable) data = fluid.data(name='data', shape=[4], dtype='float16') fluid.layers.pad(x=data, paddings=[0, 1]) if __name__ == '__main__': unittest.main()
apache-2.0
thurt/arangodb
3rdParty/V8-4.3.61/third_party/python_26/Lib/distutils/spawn.py
63
6991
"""distutils.spawn Provides the 'spawn()' function, a front-end to various platform- specific functions for launching another program in a sub-process. Also provides the 'find_executable()' to search the path for a given executable name. """ # This module should be kept compatible with Python 2.1. __revision__ = "$Id: spawn.py 37828 2004-11-10 22:23:15Z loewis $" import sys, os, string from distutils.errors import * from distutils import log def spawn (cmd, search_path=1, verbose=0, dry_run=0): """Run another program, specified as a command list 'cmd', in a new process. 'cmd' is just the argument list for the new process, ie. cmd[0] is the program to run and cmd[1:] are the rest of its arguments. There is no way to run a program with a name different from that of its executable. If 'search_path' is true (the default), the system's executable search path will be used to find the program; otherwise, cmd[0] must be the exact path to the executable. If 'dry_run' is true, the command will not actually be run. Raise DistutilsExecError if running the program fails in any way; just return on success. """ if os.name == 'posix': _spawn_posix(cmd, search_path, dry_run=dry_run) elif os.name == 'nt': _spawn_nt(cmd, search_path, dry_run=dry_run) elif os.name == 'os2': _spawn_os2(cmd, search_path, dry_run=dry_run) else: raise DistutilsPlatformError, \ "don't know how to spawn programs on platform '%s'" % os.name # spawn () def _nt_quote_args (args): """Quote command-line arguments for DOS/Windows conventions: just wraps every argument which contains blanks in double quotes, and returns a new argument list. """ # XXX this doesn't seem very robust to me -- but if the Windows guys # say it'll work, I guess I'll have to accept it. (What if an arg # contains quotes? What other magic characters, other than spaces, # have to be escaped? Is there an escaping mechanism other than # quoting?) for i in range(len(args)): if string.find(args[i], ' ') != -1: args[i] = '"%s"' % args[i] return args def _spawn_nt (cmd, search_path=1, verbose=0, dry_run=0): executable = cmd[0] cmd = _nt_quote_args(cmd) if search_path: # either we find one or it stays the same executable = find_executable(executable) or executable log.info(string.join([executable] + cmd[1:], ' ')) if not dry_run: # spawn for NT requires a full path to the .exe try: rc = os.spawnv(os.P_WAIT, executable, cmd) except OSError, exc: # this seems to happen when the command isn't found raise DistutilsExecError, \ "command '%s' failed: %s" % (cmd[0], exc[-1]) if rc != 0: # and this reflects the command running but failing raise DistutilsExecError, \ "command '%s' failed with exit status %d" % (cmd[0], rc) def _spawn_os2 (cmd, search_path=1, verbose=0, dry_run=0): executable = cmd[0] #cmd = _nt_quote_args(cmd) if search_path: # either we find one or it stays the same executable = find_executable(executable) or executable log.info(string.join([executable] + cmd[1:], ' ')) if not dry_run: # spawnv for OS/2 EMX requires a full path to the .exe try: rc = os.spawnv(os.P_WAIT, executable, cmd) except OSError, exc: # this seems to happen when the command isn't found raise DistutilsExecError, \ "command '%s' failed: %s" % (cmd[0], exc[-1]) if rc != 0: # and this reflects the command running but failing print "command '%s' failed with exit status %d" % (cmd[0], rc) raise DistutilsExecError, \ "command '%s' failed with exit status %d" % (cmd[0], rc) def _spawn_posix (cmd, search_path=1, verbose=0, dry_run=0): log.info(string.join(cmd, ' ')) if dry_run: return exec_fn = search_path and os.execvp or os.execv pid = os.fork() if pid == 0: # in the child try: #print "cmd[0] =", cmd[0] #print "cmd =", cmd exec_fn(cmd[0], cmd) except OSError, e: sys.stderr.write("unable to execute %s: %s\n" % (cmd[0], e.strerror)) os._exit(1) sys.stderr.write("unable to execute %s for unknown reasons" % cmd[0]) os._exit(1) else: # in the parent # Loop until the child either exits or is terminated by a signal # (ie. keep waiting if it's merely stopped) while 1: try: (pid, status) = os.waitpid(pid, 0) except OSError, exc: import errno if exc.errno == errno.EINTR: continue raise DistutilsExecError, \ "command '%s' failed: %s" % (cmd[0], exc[-1]) if os.WIFSIGNALED(status): raise DistutilsExecError, \ "command '%s' terminated by signal %d" % \ (cmd[0], os.WTERMSIG(status)) elif os.WIFEXITED(status): exit_status = os.WEXITSTATUS(status) if exit_status == 0: return # hey, it succeeded! else: raise DistutilsExecError, \ "command '%s' failed with exit status %d" % \ (cmd[0], exit_status) elif os.WIFSTOPPED(status): continue else: raise DistutilsExecError, \ "unknown error executing '%s': termination status %d" % \ (cmd[0], status) # _spawn_posix () def find_executable(executable, path=None): """Try to find 'executable' in the directories listed in 'path' (a string listing directories separated by 'os.pathsep'; defaults to os.environ['PATH']). Returns the complete filename or None if not found. """ if path is None: path = os.environ['PATH'] paths = string.split(path, os.pathsep) (base, ext) = os.path.splitext(executable) if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'): executable = executable + '.exe' if not os.path.isfile(executable): for p in paths: f = os.path.join(p, executable) if os.path.isfile(f): # the file exists, we have a shot at spawn working return f return None else: return executable # find_executable()
apache-2.0
bobbyrward/fr0st
fr0stlib/gui/utils.py
1
8708
import wx, os from functools import partial from fr0stlib.decorators import * def LoadIcon(*path): # Check for an icons dir in app base path first for development filename = os.path.join(wx.GetApp().AppBaseDir, 'icons', *path) + '.png' if not os.path.exists(filename): # Not there, check install path filename = os.path.join(wx.GetApp().IconsDir, *path) + '.png' img = wx.Image(filename, type=wx.BITMAP_TYPE_PNG) img.Rescale(16,16) return wx.BitmapFromImage(img) def Box(self, name, *a, **k): box = wx.StaticBoxSizer(wx.StaticBox(self, -1, name), k.get('orient', wx.VERTICAL)) box.AddMany(a) return box def MakeTCs(self, *a, **k): fgs = wx.FlexGridSizer(99, 2, 1, 1) tcs = {} for i, default in a: tc = NumberTextCtrl(self, **k) tc.SetFloat(default) tcs[i] = tc fgs.Add(wx.StaticText(self, -1, i.replace("_", " ").title()), 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) fgs.Add(tc, 0, wx.ALIGN_RIGHT, 5) return fgs, tcs class MyChoice(wx.Choice): def __init__(self, parent, name, d, initial): self.d = d choices = sorted(d.iteritems()) wx.Choice.__init__(self, parent, -1, choices=[k for k,_ in choices]) self.SetSelection([v for _,v in choices].index(initial)) def GetFloat(self): return self.d[self.GetStringSelection()] class SizePanel(wx.Panel): def __init__(self, parent, callback=lambda: None): self.parent = parent self.keepratio = True self.callback = callback wx.Panel.__init__(self, parent, -1) fgs, tcs = MakeTCs(self, ("width", 512.), ("height", 384.), low=0, callback=self.SizeCallback) self.__dict__.update(tcs) for i in (self.width, self.height): i.MakeIntOnly() i.low = 1 ratio = wx.CheckBox(self, -1, "Keep Ratio") ratio.SetValue(True) ratio.Bind(wx.EVT_CHECKBOX, self.OnRatio) box = Box(self, "Size", fgs, ratio) self.SetSizer(box) box.Fit(self) def GetInts(self): return [int(tc.GetFloat()) for tc in (self.width, self.height)] def UpdateSize(self, size): width, height = (float(i) for i in size) self.width.SetFloat(width) self.height.SetFloat(height) self.ratio = width / height def OnRatio(self, e): self.keepratio = e.GetInt() def SizeCallback(self, tc, tempsave=None): if self.keepratio: v = tc.GetFloat() tc.SetInt(v) if tc == self.width: w, h = v, v / self.ratio self.height.SetInt(h) else: w, h = v * self.ratio, v self.width.SetInt(w) else: self.ratio = float(self.width.GetFloat()) / self.height.GetFloat() self.callback() class NumberTextCtrl(wx.TextCtrl): low = None high = None @BindEvents def __init__(self, parent, low=None, high=None, callback=None): self.parent = parent # Size is set to ubuntu default (75,27), maybe make it 75x21 in win wx.TextCtrl.__init__(self,parent,-1, size=(75,27)) if (low,high) != (None,None): self.SetAllowedRange(low, high) if callback: self.callback = partial(callback, self) else: self.callback = lambda tempsave=None: None self.HasChanged = False self.SetFloat(0.0) def GetFloat(self): return float(self.GetValue() or "0") def SetFloat(self, v): v = self.Checkrange(float(v)) self._value = v string = ("%.6f" %v).rstrip("0") if string.endswith("."): string += "0" # Avoid values like '0.' or '1.' self.SetValue(string) def GetInt(self): return int(self.GetValue() or "0") def SetInt(self, v): v = self.Checkrange(int(v)) self._value = v self.SetValue(str(v)) def MakeIntOnly(self): self.SetInt(self.GetFloat()) self.SetFloat, self.GetFloat = self.SetInt, self.GetInt def SetAllowedRange(self, low=None, high=None): self.low = low self.high = high def Checkrange(self, v): if self.low is not None and v < self.low: return self.low elif self.high is not None and v > self.high: return self.high return v @Bind(wx.EVT_MOUSEWHEEL) def OnMouseWheel(self, evt): if self.SetFloat == self.SetInt: return if evt.CmdDown(): if evt.AltDown(): delta = 0.01 else: delta = 0.1 elif evt.AltDown(): delta = 0.001 else: evt.Skip() return self.SetFocus() # Makes sure OnKeyUp gets called. v = self._value + delta * evt.GetWheelRotation() / evt.GetWheelDelta() self.SetFloat(v) self.callback(tempsave=False) self.HasChanged = True @Bind(wx.EVT_KEY_UP) def OnKeyUp(self, e): # TODO: This code is duplicated with the one found in xformeditor. key = e.GetKeyCode() if (key == wx.WXK_CONTROL and not e.AltDown()) or ( key == wx.WXK_ALT and not e.ControlDown()): if self.HasChanged: if hasattr(self.parent, 'parent') and hasattr(self.parent.parent, 'TreePanel'): self.parent.parent.TreePanel.TempSave() self.HasChanged = False @Bind(wx.EVT_CHAR) def OnChar(self, event): key = event.GetKeyCode() if key in [wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER]: self.OnKillFocus(None) elif key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255 or key == wx.WXK_TAB: event.Skip() elif chr(key) in "0123456789.-": event.Skip() else: # not calling Skip() eats the event pass #wx.Bell() @Bind(wx.EVT_KILL_FOCUS) def OnKillFocus(self,event): # cmp done with strings because equal floats can compare differently. if str(self._value) != self.GetValue(): try: v = self.GetFloat() # Can raise ValueError except ValueError: self.SetFloat(self._value) return self.SetFloat(v) self.callback() class MultiSliderMixin(object): """Class to dynamically create and control sliders.""" _new = None _changed = False def __init__(self, *a, **k): super(MultiSliderMixin, self).__init__(*a, **k) self.sliders = {} self.Bind(wx.EVT_IDLE, self.OnIdle) def MakeSlider(self, name, init, low, high, strictrange=True): """Programatically builds stuff.""" tc = NumberTextCtrl(self, callback=self.__callback) if strictrange: tc.SetAllowedRange(low, high) slider = wx.Slider(self, -1, init*100, low*100, high*100, style=wx.SL_HORIZONTAL | wx.SL_SELRANGE ) self.sliders[name] = slider, tc slider.Bind(wx.EVT_SLIDER, partial(self.OnSlider, tc=tc)) ## slider.Bind(wx.EVT_LEFT_DOWN, self.OnSliderDown) slider.Bind(wx.EVT_LEFT_UP, self.OnSliderUp) name = name.replace("_", " ").title() return Box(self, name, tc, (slider, wx.EXPAND), orient=wx.HORIZONTAL) def UpdateSlider(self, name, val): slider, tc = self.sliders[name] slider.SetValue(int(val*100)) tc.SetFloat(val) def IterSliders(self): for name, (_, tc) in self.sliders.iteritems(): yield name, tc.GetFloat() def OnSlider(self, e, tc): val = e.GetInt()/100. # Make sure _new is only set when there are actual changes. if val != tc._value: self._new = True tc.SetFloat(str(val)) e.Skip() ## def OnSliderDown(self, e): ## e.Skip() def OnSliderUp(self, e): if self._changed: self.parent.TreePanel.TempSave() self._changed = False e.Skip() def OnIdle(self, e): if self._new is not None: self.UpdateFlame() self._new = None self._changed = True def __callback(self, tc, tempsave=True): self.UpdateFlame() if tempsave: self.parent.TreePanel.TempSave() def UpdateFlame(self): Abstract def UpdateView(self): Abstract
gpl-3.0
sam-m888/gramps
gramps/plugins/docgen/gtkprint.py
9
20880
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2007 Zsolt Foldvari # Copyright (C) 2008-2009 Brian G. Matherly # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """Printing interface based on Gtk.Print* classes. """ #------------------------------------------------------------------------ # # Python modules # #------------------------------------------------------------------------ from math import radians import logging #------------------------------------------------------------------------- # # GTK modules # #------------------------------------------------------------------------- import cairo try: # the Gramps-Connect server has no DISPLAY from gi.repository import GObject from gi.repository import Gtk except: pass #------------------------------------------------------------------------ # # Gramps modules # #------------------------------------------------------------------------ from gramps.gen.plug.docgen import PAPER_PORTRAIT import gramps.plugins.lib.libcairodoc as libcairodoc from gramps.gen.const import GRAMPS_LOCALE as glocale _ = glocale.translation.gettext #------------------------------------------------------------------------ # # Set up logging # #------------------------------------------------------------------------ LOG = logging.getLogger(".GtkPrint") #------------------------------------------------------------------------ # # Constants # #------------------------------------------------------------------------ # printer settings (might be needed to align for different platforms) PRINTER_DPI = 72.0 PRINTER_SCALE = 1.0 # the print settings to remember between print sessions PRINT_SETTINGS = None # minimum spacing around a page in print preview MARGIN = 6 # zoom modes in print preview (ZOOM_BEST_FIT, ZOOM_FIT_WIDTH, ZOOM_FREE,) = list(range(3)) #------------------------------------------------------------------------ # # Converter functions # #------------------------------------------------------------------------ def paperstyle_to_pagesetup(paper_style): """Convert a PaperStyle instance into a Gtk.PageSetup instance. @param paper_style: Gramps paper style object to convert @param type: PaperStyle @return: page_setup @rtype: Gtk.PageSetup """ # paper size names according to 'PWG Candidate Standard 5101.1-2002' # ftp://ftp.pwg.org/pub/pwg/candidates/cs-pwgmsn10-20020226-5101.1.pdf gramps_to_gtk = { "Letter": "na_letter", "Legal": "na_legal", "A0": "iso_a0", "A1": "iso_a1", "A2": "iso_a2", "A3": "iso_a3", "A4": "iso_a4", "A5": "iso_a5", "B0": "iso_b0", "B1": "iso_b1", "B2": "iso_b2", "B3": "iso_b3", "B4": "iso_b4", "B5": "iso_b5", "B6": "iso_b6", "B": "na_ledger", "C": "na_c", "D": "na_d", "E": "na_e", } # First set the paper size gramps_paper_size = paper_style.get_size() gramps_paper_name = gramps_paper_size.get_name() # All sizes not included in the translation table (even if a standard size) # are handled as custom format, because we are not intelligent enough. if gramps_paper_name in gramps_to_gtk: paper_size = Gtk.PaperSize.new(name=gramps_to_gtk[gramps_paper_name]) LOG.debug("Selected paper size: %s", gramps_to_gtk[gramps_paper_name]) else: if paper_style.get_orientation() == PAPER_PORTRAIT: paper_width = gramps_paper_size.get_width() * 10 paper_height = gramps_paper_size.get_height() * 10 else: paper_width = gramps_paper_size.get_height() * 10 paper_height = gramps_paper_size.get_width() * 10 paper_size = Gtk.PaperSize.new_custom("custom", "Custom Size", paper_width, paper_height, Gtk.Unit.MM) LOG.debug("Selected paper size: (%f,%f)", paper_width, paper_height) page_setup = Gtk.PageSetup() page_setup.set_paper_size(paper_size) # Set paper orientation if paper_style.get_orientation() == PAPER_PORTRAIT: page_setup.set_orientation(Gtk.PageOrientation.PORTRAIT) else: page_setup.set_orientation(Gtk.PageOrientation.LANDSCAPE) # Set paper margins page_setup.set_top_margin(paper_style.get_top_margin() * 10, Gtk.Unit.MM) page_setup.set_bottom_margin(paper_style.get_bottom_margin() * 10, Gtk.Unit.MM) page_setup.set_left_margin(paper_style.get_left_margin() * 10, Gtk.Unit.MM) page_setup.set_right_margin(paper_style.get_right_margin() * 10, Gtk.Unit.MM) return page_setup #------------------------------------------------------------------------ # # PrintPreview class # #------------------------------------------------------------------------ class PrintPreview: """Implement a dialog to show print preview. """ zoom_factors = { 0.50: '50%', 0.75: '75%', 1.00: '100%', 1.25: '125%', 1.50: '150%', 1.75: '175%', 2.00: '200%', 3.00: '300%', 4.00: '400%', } def __init__(self, operation, preview, context, parent): self._operation = operation self._preview = preview self._context = context self._parent = parent self.__build_window() self._current_page = None # Private def __build_window(self): """Build the window from Glade. """ from gramps.gui.glade import Glade glade_xml = Glade() self._window = glade_xml.toplevel self._window.set_transient_for(self._parent) # remember active widgets for future use self._swin = glade_xml.get_object('swin') self._drawing_area = glade_xml.get_object('drawingarea') self._first_button = glade_xml.get_object('first') self._prev_button = glade_xml.get_object('prev') self._next_button = glade_xml.get_object('next') self._last_button = glade_xml.get_object('last') self._pages_entry = glade_xml.get_object('entry') self._pages_label = glade_xml.get_object('label') self._zoom_fit_width_button = glade_xml.get_object('zoom_fit_width') self._zoom_fit_width_button.set_stock_id('gramps-zoom-fit-width') self._zoom_best_fit_button = glade_xml.get_object('zoom_best_fit') self._zoom_best_fit_button.set_stock_id('gramps-zoom-best-fit') self._zoom_in_button = glade_xml.get_object('zoom_in') self._zoom_in_button.set_stock_id('gramps-zoom-in') self._zoom_out_button = glade_xml.get_object('zoom_out') self._zoom_out_button.set_stock_id('gramps-zoom-out') # connect the signals glade_xml.connect_signals(self) self._drawing_area.connect("draw", self.on_drawingarea_draw_event) ##def create_surface(self): ##return cairo.PDFSurface(StringIO(), ##self._context.get_width(), ##self._context.get_height()) ##def get_page(self, page_no): ##"""Get the cairo surface of the given page. ##Surfaces are also cached for instant access. ##""" ##if page_no >= len(self._page_numbers): ##LOG.debug("Page number %d doesn't exist." % page_no) ##page_no = 0 ##if page_no not in self._page_surfaces: ##surface = self.create_surface() ##cr = cairo.Context(surface) ##if PRINTER_SCALE != 1.0: ##cr.scale(PRINTER_SCALE, PRINTER_SCALE) ##self._context.set_cairo_context(cr, PRINTER_DPI, PRINTER_DPI) ##self._preview.render_page(self._page_numbers[page_no]) ##self._page_surfaces[page_no] = surface ##return self._page_surfaces[page_no] def __set_page(self, page_no): if page_no < 0 or page_no >= self._page_no: return if self._current_page != page_no: self._drawing_area.queue_draw() self._current_page = page_no self._first_button.set_sensitive(self._current_page) self._prev_button.set_sensitive(self._current_page) self._next_button.set_sensitive(self._current_page < self._page_no - 1) self._last_button.set_sensitive(self._current_page < self._page_no - 1) self._pages_entry.set_text('%d' % (self._current_page + 1)) def __set_zoom(self, zoom): self._zoom = zoom screen_width = int(self._paper_width * self._zoom + 2 * MARGIN) screen_height = int(self._paper_height * self._zoom + 2 * MARGIN) self._drawing_area.set_size_request(screen_width, screen_height) self._drawing_area.queue_draw() self._zoom_in_button.set_sensitive(self._zoom != max(self.zoom_factors)) self._zoom_out_button.set_sensitive(self._zoom != min(self.zoom_factors)) def __zoom_in(self): zoom = [z for z in self.zoom_factors if z > self._zoom] if zoom: return min(zoom) else: return self._zoom def __zoom_out(self): zoom = [z for z in self.zoom_factors if z < self._zoom] if zoom: return max(zoom) else: return self._zoom def __zoom_fit_width(self): width, height, vsb_w, hsb_h = self.__get_view_size() zoom = width / self._paper_width if self._paper_height * zoom > height: zoom = (width - vsb_w) / self._paper_width return zoom def __zoom_best_fit(self): width, height, vsb_w, hsb_h = self.__get_view_size() zoom = min(width / self._paper_width, height / self._paper_height) return zoom def __get_view_size(self): """Get the dimensions of the scrolled window. """ width = self._swin.get_allocated_width() - 2 * MARGIN height = self._swin.get_allocated_height() - 2 * MARGIN if self._swin.get_shadow_type() != Gtk.ShadowType.NONE: width -= 2 * self._swin.get_style().xthickness height -= 2 * self._swin.get_style().ythickness spacing = GObject.Value() spacing.init(GObject.TYPE_INT) spacing = self._swin.style_get_property('scrollbar-spacing', spacing) if spacing: spacing = spacing.get_int() else: spacing = 0 reqmin, req = self._swin.get_vscrollbar().get_preferred_size() vsb_w = spacing + req.width reqmin, req = self._swin.get_hscrollbar().get_preferred_size() hsb_h = spacing + req.height return width, height, vsb_w, hsb_h def __end_preview(self): self._operation.end_preview() # Signal handlers def on_drawingarea_draw_event(self, drawing_area, context): cr = context #cr.rectangle(event.area) #cr.clip() # get the extents of the page and the screen paper_w = int(self._paper_width * self._zoom) paper_h = int(self._paper_height * self._zoom) width, height, vsb_w, hsb_h = self.__get_view_size() if paper_h > height: width -= vsb_w if paper_w > width: height -= hsb_h # put the paper on the middle of the window xtranslate = MARGIN if paper_w < width: xtranslate += (width - paper_w) / 2 ytranslate = MARGIN if paper_h < height: ytranslate += (height - paper_h) / 2 cr.translate(xtranslate, ytranslate) # draw an empty white page cr.set_source_rgb(1.0, 1.0, 1.0) cr.rectangle(0, 0, paper_w, paper_h) cr.fill_preserve() cr.set_source_rgb(0, 0, 0) cr.set_line_width(1) cr.stroke() if self._orientation == Gtk.PageOrientation.LANDSCAPE: cr.rotate(radians(-90)) cr.translate(-paper_h, 0) ##page_setup = self._context.get_page_setup() ##cr.translate(page_setup.get_left_margin(Gtk.Unit.POINTS), ##page_setup.get_top_margin(Gtk.Unit.POINTS)) ##cr.set_source_surface(self.get_page(0)) ##cr.paint() # draw the content of the currently selected page # Here we use dpi scaling instead of scaling the cairo context, # because it gives better result. In the latter case the distance # of glyphs was changing. dpi = PRINTER_DPI * self._zoom self._context.set_cairo_context(cr, dpi, dpi) self._preview.render_page(self._current_page) def on_swin_size_allocate(self, scrolledwindow, allocation): if self._zoom_mode == ZOOM_FIT_WIDTH: self.__set_zoom(self.__zoom_fit_width()) if self._zoom_mode == ZOOM_BEST_FIT: self.__set_zoom(self.__zoom_best_fit()) def on_print_clicked(self, toolbutton): pass def on_first_clicked(self, toolbutton): self.__set_page(0) def on_prev_clicked(self, toolbutton): self.__set_page(self._current_page - 1) def on_next_clicked(self, toolbutton): self.__set_page(self._current_page + 1) def on_last_clicked(self, toolbutton): self.__set_page(self._page_no - 1) def on_entry_activate(self, entry): try: new_page = int(entry.get_text()) - 1 except ValueError: new_page = self._current_page if new_page < 0 or new_page >= self._page_no: new_page = self._current_page self.__set_page(new_page) def on_zoom_fit_width_toggled(self, toggletoolbutton): if toggletoolbutton.get_active(): self._zoom_best_fit_button.set_active(False) self._zoom_mode = ZOOM_FIT_WIDTH self.__set_zoom(self.__zoom_fit_width()) else: self._zoom_mode = ZOOM_FREE def on_zoom_best_fit_toggled(self, toggletoolbutton): if toggletoolbutton.get_active(): self._zoom_fit_width_button.set_active(False) self._zoom_mode = ZOOM_BEST_FIT self.__set_zoom(self.__zoom_best_fit()) else: self._zoom_mode = ZOOM_FREE def on_zoom_in_clicked(self, toolbutton): self._zoom_fit_width_button.set_active(False) self._zoom_best_fit_button.set_active(False) self._zoom_mode = ZOOM_FREE self.__set_zoom(self.__zoom_in()) def on_zoom_out_clicked(self, toolbutton): self._zoom_fit_width_button.set_active(False) self._zoom_best_fit_button.set_active(False) self._zoom_mode = ZOOM_FREE self.__set_zoom(self.__zoom_out()) def on_window_delete_event(self, widget, event): self.__end_preview() return False def on_quit_clicked(self, toolbutton): self.__end_preview() self._window.destroy() # Public def start(self): # get paper/page dimensions page_setup = self._context.get_page_setup() self._paper_width = page_setup.get_paper_width(Gtk.Unit.POINTS) self._paper_height = page_setup.get_paper_height(Gtk.Unit.POINTS) self._page_width = page_setup.get_page_width(Gtk.Unit.POINTS) self._page_height = page_setup.get_page_height(Gtk.Unit.POINTS) self._orientation = page_setup.get_orientation() # get the total number of pages ##self._page_numbers = [0,] ##self._page_surfaces = {} self._page_no = self._operation.get_property('n_pages') self._pages_label.set_text(_('of %d') % self._page_no) # set zoom level and initial page number self._zoom_mode = ZOOM_FREE self.__set_zoom(1.0) self.__set_page(0) # let's the show begin... self._window.show() #------------------------------------------------------------------------ # # GtkPrint class # #------------------------------------------------------------------------ class GtkPrint(libcairodoc.CairoDoc): """Print document via GtkPrint* interface. Requires Gtk+ 2.10. """ def run(self): """Run the Gtk Print operation. """ global PRINT_SETTINGS # get a page setup from the paper style we have page_setup = paperstyle_to_pagesetup(self.paper) # set up a print operation operation = Gtk.PrintOperation() operation.set_default_page_setup(page_setup) operation.connect("begin_print", self.on_begin_print) operation.connect("draw_page", self.on_draw_page) operation.connect("paginate", self.on_paginate) operation.connect("preview", self.on_preview) # set print settings if it was stored previously if PRINT_SETTINGS is not None: operation.set_print_settings(PRINT_SETTINGS) # run print dialog while True: self.preview = None res = operation.run(Gtk.PrintOperationAction.PRINT_DIALOG, self.uistate.window) if self.preview is None: # cancel or print break # set up printing again; can't reuse PrintOperation? operation = Gtk.PrintOperation() operation.set_default_page_setup(page_setup) operation.connect("begin_print", self.on_begin_print) operation.connect("draw_page", self.on_draw_page) operation.connect("paginate", self.on_paginate) operation.connect("preview", self.on_preview) # set print settings if it was stored previously if PRINT_SETTINGS is not None: operation.set_print_settings(PRINT_SETTINGS) # store print settings if printing was successful if res == Gtk.PrintOperationResult.APPLY: PRINT_SETTINGS = operation.get_print_settings() def on_begin_print(self, operation, context): """Setup environment for printing. """ # get data from context here only once to save time on pagination self.page_width = round(context.get_width()) self.page_height = round(context.get_height()) self.dpi_x = context.get_dpi_x() self.dpi_y = context.get_dpi_y() def on_paginate(self, operation, context): """Paginate the whole document in chunks. """ layout = context.create_pango_layout() finished = self.paginate(layout, self.page_width, self.page_height, self.dpi_x, self.dpi_y) # update page number operation.set_n_pages(len(self._pages)) # start preview if needed if finished and self.preview: self.preview.start() return finished def on_draw_page(self, operation, context, page_nr): """Draw the requested page. """ cr = context.get_cairo_context() layout = context.create_pango_layout() width = round(context.get_width()) height = round(context.get_height()) dpi_x = context.get_dpi_x() dpi_y = context.get_dpi_y() self.draw_page(page_nr, cr, layout, width, height, dpi_x, dpi_y) def on_preview(self, operation, preview, context, parent): """Implement custom print preview functionality. """ ##if constfunc.win()': ##return False self.preview = PrintPreview(operation, preview, context, parent) # give a dummy cairo context to Gtk.PrintContext, # PrintPreview will update it with the real one try: width = int(round(context.get_width())) except ValueError: width = 0 try: height = int(round(context.get_height())) except ValueError: height = 0 surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height) cr = cairo.Context(surface) context.set_cairo_context(cr, PRINTER_DPI, PRINTER_DPI) return True
gpl-2.0
qwefi/nova
nova/network/driver.py
13
1391
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo.config import cfg from nova.openstack.common import importutils from nova.openstack.common import log as logging driver_opts = [ cfg.StrOpt('network_driver', default='nova.network.linux_net', help='Driver to use for network creation'), ] CONF = cfg.CONF CONF.register_opts(driver_opts) LOG = logging.getLogger(__name__) def load_network_driver(network_driver=None): if not network_driver: network_driver = CONF.network_driver if not network_driver: LOG.error(_("Network driver option required, but not specified")) sys.exit(1) LOG.info(_("Loading network driver '%s'") % network_driver) return importutils.import_module(network_driver)
apache-2.0
repotvsupertuga/tvsupertuga.repository
plugin.video.loganaddon/resources/lib/libraries/cachemeta.py
23
2531
# -*- coding: utf-8 -*- ''' Specto Add-on Copyright (C) 2015 lambda This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import re,hashlib,time try: from sqlite3 import dbapi2 as database except: from pysqlite2 import dbapi2 as database from resources.lib.libraries import control def get(function, timeout, *args): try: response = None f = repr(function) f = re.sub('.+\smethod\s|.+function\s|\sat\s.+|\sof\s.+', '', f) a = hashlib.md5() for i in args: a.update(str(i)) a = str(a.hexdigest()) except: pass try: control.makeFile(control.dataPath) dbcon = database.connect(control.cachemetaFile) dbcur = dbcon.cursor() dbcur.execute("SELECT * FROM rel_list WHERE func = '%s' AND args = '%s'" % (f, a)) match = dbcur.fetchone() response = eval(match[2].encode('utf-8')) t1 = int(match[3]) t2 = int(time.time()) update = (abs(t2 - t1) / 3600) >= int(timeout) if update == False: return response except: pass try: r = function(*args) if (r == None or r == []) and not response == None: return response elif (r == None or r == []): return r except: return try: insert = True if r['cover_url'] == '' or r['backdrop_url'] == '': insert = False r = repr(r) t = int(time.time()) dbcur.execute("CREATE TABLE IF NOT EXISTS rel_list (""func TEXT, ""args TEXT, ""response TEXT, ""added TEXT, ""UNIQUE(func, args)"");") dbcur.execute("DELETE FROM rel_list WHERE func = '%s' AND args = '%s'" % (f, a)) if insert == True: dbcur.execute("INSERT INTO rel_list Values (?, ?, ?, ?)", (f, a, r, t)) dbcon.commit() except: pass try: return eval(r.encode('utf-8')) except: pass
gpl-2.0
geekboxzone/lollipop_external_chromium_org
build/android/gyp/pack_arm_relocations.py
28
4068
#!/usr/bin/env python # # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Pack ARM relative relocations in a library (or copy unchanged). If --enable-packing and --configuration-name=='Release', invoke the relocation_packer tool to pack the .rel.dyn section in the given library files. This step is inserted after the libraries are stripped. Packing adds a new .android.rel.dyn section to the file and reduces the size of .rel.dyn accordingly. Currently packing only understands ARM32 shared libraries. For all other architectures --enable-packing should be set to zero. In this case the script copies files verbatim, with no attempt to pack relative relocations. Any library listed in --exclude-packing-list is also copied verbatim, irrespective of any --enable-packing setting. Typically this would be 'libchromium_android_linker.so'. """ import optparse import os import shlex import shutil import sys import tempfile from util import build_utils def PackArmLibraryRelocations(android_pack_relocations, android_objcopy, library_path, output_path): if not build_utils.IsTimeStale(output_path, [library_path]): return # Copy and add a 'NULL' .android.rel.dyn section for the packing tool. with tempfile.NamedTemporaryFile() as stream: stream.write('NULL') stream.flush() objcopy_command = [android_objcopy, '--add-section', '.android.rel.dyn=%s' % stream.name, library_path, output_path] build_utils.CheckOutput(objcopy_command) # Pack R_ARM_RELATIVE relocations. pack_command = [android_pack_relocations, output_path] build_utils.CheckOutput(pack_command) def CopyArmLibraryUnchanged(library_path, output_path): if not build_utils.IsTimeStale(output_path, [library_path]): return shutil.copy(library_path, output_path) def main(args): args = build_utils.ExpandFileArgs(args) parser = optparse.OptionParser() parser.add_option('--configuration-name', default='Release', help='Gyp configuration name (i.e. Debug, Release)') parser.add_option('--enable-packing', choices=['0', '1'], help=('Pack relocations if 1 and configuration name is \'Release\',' ' otherwise plain file copy')) parser.add_option('--exclude-packing-list', default='', help='Names of any libraries explicitly not packed') parser.add_option('--android-pack-relocations', help='Path to the ARM relocations packer binary') parser.add_option('--android-objcopy', help='Path to the toolchain\'s objcopy binary') parser.add_option('--stripped-libraries-dir', help='Directory for stripped libraries') parser.add_option('--packed-libraries-dir', help='Directory for packed libraries') parser.add_option('--libraries', help='List of libraries') parser.add_option('--stamp', help='Path to touch on success') options, _ = parser.parse_args(args) enable_packing = (options.enable_packing == '1' and options.configuration_name == 'Release') exclude_packing_set = set(shlex.split(options.exclude_packing_list)) libraries = build_utils.ParseGypList(options.libraries) build_utils.MakeDirectory(options.packed_libraries_dir) for library in libraries: library_path = os.path.join(options.stripped_libraries_dir, library) output_path = os.path.join(options.packed_libraries_dir, library) if enable_packing and library not in exclude_packing_set: PackArmLibraryRelocations(options.android_pack_relocations, options.android_objcopy, library_path, output_path) else: CopyArmLibraryUnchanged(library_path, output_path) if options.stamp: build_utils.Touch(options.stamp) return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
bsd-3-clause
eLRuLL/scrapy
scrapy/http/response/text.py
1
9259
""" This module implements the TextResponse class which adds encoding handling and discovering (through HTTP headers) to base Response class. See documentation in docs/topics/request-response.rst """ from contextlib import suppress from typing import Generator from urllib.parse import urljoin import parsel from w3lib.encoding import (html_body_declared_encoding, html_to_unicode, http_content_type_encoding, resolve_encoding) from w3lib.html import strip_html5_whitespace from scrapy.http import Request from scrapy.http.response import Response from scrapy.utils.python import memoizemethod_noargs, to_unicode from scrapy.utils.response import get_base_url class TextResponse(Response): _DEFAULT_ENCODING = 'ascii' def __init__(self, *args, **kwargs): self._encoding = kwargs.pop('encoding', None) self._cached_benc = None self._cached_ubody = None self._cached_selector = None super(TextResponse, self).__init__(*args, **kwargs) def _set_url(self, url): if isinstance(url, str): self._url = to_unicode(url, self.encoding) else: super(TextResponse, self)._set_url(url) def _set_body(self, body): self._body = b'' # used by encoding detection if isinstance(body, str): if self._encoding is None: raise TypeError('Cannot convert unicode body - %s has no encoding' % type(self).__name__) self._body = body.encode(self._encoding) else: super(TextResponse, self)._set_body(body) def replace(self, *args, **kwargs): kwargs.setdefault('encoding', self.encoding) return Response.replace(self, *args, **kwargs) @property def encoding(self): return self._declared_encoding() or self._body_inferred_encoding() def _declared_encoding(self): return self._encoding or self._headers_encoding() \ or self._body_declared_encoding() def body_as_unicode(self): """Return body as unicode""" return self.text @property def text(self): """ Body as unicode """ # access self.encoding before _cached_ubody to make sure # _body_inferred_encoding is called benc = self.encoding if self._cached_ubody is None: charset = 'charset=%s' % benc self._cached_ubody = html_to_unicode(charset, self.body)[1] return self._cached_ubody def urljoin(self, url): """Join this Response's url with a possible relative url to form an absolute interpretation of the latter.""" return urljoin(get_base_url(self), url) @memoizemethod_noargs def _headers_encoding(self): content_type = self.headers.get(b'Content-Type', b'') return http_content_type_encoding(to_unicode(content_type)) def _body_inferred_encoding(self): if self._cached_benc is None: content_type = to_unicode(self.headers.get(b'Content-Type', b'')) benc, ubody = html_to_unicode(content_type, self.body, auto_detect_fun=self._auto_detect_fun, default_encoding=self._DEFAULT_ENCODING) self._cached_benc = benc self._cached_ubody = ubody return self._cached_benc def _auto_detect_fun(self, text): for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'): try: text.decode(enc) except UnicodeError: continue return resolve_encoding(enc) @memoizemethod_noargs def _body_declared_encoding(self): return html_body_declared_encoding(self.body) @property def selector(self): from scrapy.selector import Selector if self._cached_selector is None: self._cached_selector = Selector(self) return self._cached_selector def xpath(self, query, **kwargs): return self.selector.xpath(query, **kwargs) def css(self, query): return self.selector.css(query) def follow(self, url, callback=None, method='GET', headers=None, body=None, cookies=None, meta=None, encoding=None, priority=0, dont_filter=False, errback=None, cb_kwargs=None, flags=None): # type: (...) -> Request """ Return a :class:`~.Request` instance to follow a link ``url``. It accepts the same arguments as ``Request.__init__`` method, but ``url`` can be not only an absolute URL, but also * a relative URL * a :class:`~scrapy.link.Link` object, e.g. the result of :ref:`topics-link-extractors` * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g. ``response.css('a.my_link')[0]`` * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g. ``response.css('a::attr(href)')[0]`` or ``response.xpath('//img/@src')[0]`` See :ref:`response-follow-example` for usage examples. """ if isinstance(url, parsel.Selector): url = _url_from_selector(url) elif isinstance(url, parsel.SelectorList): raise ValueError("SelectorList is not supported") encoding = self.encoding if encoding is None else encoding return super(TextResponse, self).follow( url=url, callback=callback, method=method, headers=headers, body=body, cookies=cookies, meta=meta, encoding=encoding, priority=priority, dont_filter=dont_filter, errback=errback, cb_kwargs=cb_kwargs, flags=flags, ) def follow_all(self, urls=None, callback=None, method='GET', headers=None, body=None, cookies=None, meta=None, encoding=None, priority=0, dont_filter=False, errback=None, cb_kwargs=None, flags=None, css=None, xpath=None): # type: (...) -> Generator[Request, None, None] """ A generator that produces :class:`~.Request` instances to follow all links in ``urls``. It accepts the same arguments as the :class:`~.Request`'s ``__init__`` method, except that each ``urls`` element does not need to be an absolute URL, it can be any of the following: * a relative URL * a :class:`~scrapy.link.Link` object, e.g. the result of :ref:`topics-link-extractors` * a :class:`~scrapy.selector.Selector` object for a ``<link>`` or ``<a>`` element, e.g. ``response.css('a.my_link')[0]`` * an attribute :class:`~scrapy.selector.Selector` (not SelectorList), e.g. ``response.css('a::attr(href)')[0]`` or ``response.xpath('//img/@src')[0]`` In addition, ``css`` and ``xpath`` arguments are accepted to perform the link extraction within the ``follow_all`` method (only one of ``urls``, ``css`` and ``xpath`` is accepted). Note that when passing a ``SelectorList`` as argument for the ``urls`` parameter or using the ``css`` or ``xpath`` parameters, this method will not produce requests for selectors from which links cannot be obtained (for instance, anchor tags without an ``href`` attribute) """ arg_count = len(list(filter(None, (urls, css, xpath)))) if arg_count != 1: raise ValueError('Please supply exactly one of the following arguments: urls, css, xpath') if not urls: if css: urls = self.css(css) if xpath: urls = self.xpath(xpath) if isinstance(urls, parsel.SelectorList): selectors = urls urls = [] for sel in selectors: with suppress(_InvalidSelector): urls.append(_url_from_selector(sel)) return super(TextResponse, self).follow_all( urls=urls, callback=callback, method=method, headers=headers, body=body, cookies=cookies, meta=meta, encoding=encoding, priority=priority, dont_filter=dont_filter, errback=errback, cb_kwargs=cb_kwargs, flags=flags, ) class _InvalidSelector(ValueError): """ Raised when a URL cannot be obtained from a Selector """ def _url_from_selector(sel): # type: (parsel.Selector) -> str if isinstance(sel.root, str): # e.g. ::attr(href) result return strip_html5_whitespace(sel.root) if not hasattr(sel.root, 'tag'): raise _InvalidSelector("Unsupported selector: %s" % sel) if sel.root.tag not in ('a', 'link'): raise _InvalidSelector("Only <a> and <link> elements are supported; got <%s>" % sel.root.tag) href = sel.root.get('href') if href is None: raise _InvalidSelector("<%s> element has no href attribute: %s" % (sel.root.tag, sel)) return strip_html5_whitespace(href)
bsd-3-clause
dianchen96/gym
gym/envs/mujoco/mujoco_env.py
1
9674
import os from gym import error, spaces from gym.utils import seeding import numpy as np from os import path import gym import six try: import mujoco_py from mujoco_py.mjlib import mjlib except ImportError as e: raise error.DependencyNotInstalled("{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(e)) class MujocoEnv(gym.Env): """Superclass for all MuJoCo environments. """ def __init__(self, model_path, frame_skip): if model_path.startswith("/"): fullpath = model_path else: fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path) if not path.exists(fullpath): raise IOError("File %s does not exist" % fullpath) self.frame_skip = frame_skip self.model = mujoco_py.MjModel(fullpath) self.data = self.model.data self.viewer = None # self.camera2 = None # #import pdb; pdb.set_trace() # self.camera2 = mujoco_py.MjViewer(init_width=500, init_height=500) # self.camera2.start() # self.camera2.set_model(self.model) # self.camera2_setup() self.metadata = { 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': int(np.round(1.0 / self.dt)) } self.init_qpos = self.model.data.qpos.ravel().copy() self.init_qvel = self.model.data.qvel.ravel().copy() observation, _reward, done, _info = self._step(np.zeros(self.model.nu)) assert not done self.obs_dim = observation.size bounds = self.model.actuator_ctrlrange.copy() low = bounds[:, 0] high = bounds[:, 1] self.action_space = spaces.Box(low, high) high = np.inf*np.ones(self.obs_dim) low = -high self.observation_space = spaces.Box(low, high) self._seed() def _seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] # methods to override: # ---------------------------- def reset_model(self): """ Reset the robot degrees of freedom (qpos and qvel). Implement this in each subclass. """ raise NotImplementedError def viewer_setup(self): """ This method is called when the viewer is initialized and after every reset Optionally implement this method, if you need to tinker with camera position and so forth. """ pass # ----------------------------- def _reset(self): mjlib.mj_resetData(self.model.ptr, self.data.ptr) ob = self.reset_model() if self.viewer is not None: self.viewer.autoscale() self.viewer_setup() return ob def set_state(self, qpos, qvel): assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,) self.model.data.qpos = qpos self.model.data.qvel = qvel self.model._compute_subtree() # pylint: disable=W0212 # import pdb; pdb.set_trace() self.model.forward() @property def dt(self): return self.model.opt.timestep * self.frame_skip def do_simulation(self, ctrl, n_frames): self.model.data.ctrl = ctrl for _ in range(n_frames): self.model.step() def _render(self, mode='human', close=False): if close: if self.viewer is not None: self._get_viewer().finish() self.viewer = None return if mode == 'rgb_array': self._get_viewer().render() data, width, height = self._get_viewer().get_image() return np.fromstring(data, dtype='uint8').reshape(height, width, 3)[::-1, :, :] elif mode == 'human': self._get_viewer().loop_once() def _get_viewer(self): if self.viewer is None: self.viewer = mujoco_py.MjViewer() self.viewer.start() self.viewer.set_model(self.model) self.viewer_setup() return self.viewer def get_body_com(self, body_name): idx = self.model.body_names.index(six.b(body_name)) return self.model.data.com_subtree[idx] def get_body_comvel(self, body_name): idx = self.model.body_names.index(six.b(body_name)) return self.model.body_comvels[idx] def get_body_xmat(self, body_name): idx = self.model.body_names.index(six.b(body_name)) return self.model.data.xmat[idx].reshape((3, 3)) def state_vector(self): return np.concatenate([ self.model.data.qpos.flat, self.model.data.qvel.flat ]) class MujocoPixelEnv(MujocoEnv): def __init__( self, model_path, frame_skip, width=42, height=42, mode="rgb" ): if model_path.startswith("/"): fullpath = model_path else: fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path) if not path.exists(fullpath): raise IOError("File %s does not exist" % fullpath) self.frame_skip = frame_skip self.model = mujoco_py.MjModel(fullpath) self.data = self.model.data self.width = width self.height = height self.mode = mode self.viewer = None self.camera2 = None self.camera2 = mujoco_py.MjViewer(init_width=self.width, init_height=self.height) self.camera2.start() self.camera2.set_model(self.model) self.camera2_setup() self.metadata = { 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': int(np.round(1.0 / self.dt)) } self.init_qpos = self.model.data.qpos.ravel().copy() self.init_qvel = self.model.data.qvel.ravel().copy() observation, _reward, done, _info = self._step(np.zeros(self.model.nu)) assert not done self.obs_dim = observation.size bounds = self.model.actuator_ctrlrange.copy() low = bounds[:, 0] high = bounds[:, 1] self.action_space = spaces.Box(low, high) high = np.inf*np.ones(self.obs_dim) low = -high self.observation_space = spaces.Box(low, high) self._seed() def camera2_setup(self): raise NotImplementedError def _get_obs(self): camera2_output = None self.camera2.render() data, width, height = self.camera2.get_image() camera2_output = np.fromstring(data, dtype='uint8').reshape(height, width, 3)[::-1, :, :] if self.mode == "grey": camera2_output = np.mean(camera2_output, axis=2)[:, :, np.newaxis] return camera2_output class MujocoPixel2CamEnv(MujocoEnv): def __init__( self, model_path, frame_skip, width=42, height=42, mode="rgb" ): if model_path.startswith("/"): fullpath = model_path else: fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path) if not path.exists(fullpath): raise IOError("File %s does not exist" % fullpath) self.frame_skip = frame_skip self.model = mujoco_py.MjModel(fullpath) self.data = self.model.data self.width = width self.height = height self.mode = mode self.viewer = None self.camera2 = None self.camera2 = mujoco_py.MjViewer(init_width=self.width, init_height=self.height) self.camera2.start() self.camera2.set_model(self.model) self.camera2_setup() self.camera3 = None self.camera3 = mujoco_py.MjViewer(init_width=self.width, init_height=self.height) self.camera3.start() self.camera3.set_model(self.model) self.camera3_setup() azimuth = self.camera2.cam.azimuth self.camera3.cam.azimuth = azimuth + 180 self.metadata = { 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': int(np.round(1.0 / self.dt)) } self.init_qpos = self.model.data.qpos.ravel().copy() self.init_qvel = self.model.data.qvel.ravel().copy() observation, _reward, done, _info = self._step(np.zeros(self.model.nu)) assert not done self.obs_dim = observation.size bounds = self.model.actuator_ctrlrange.copy() low = bounds[:, 0] high = bounds[:, 1] self.action_space = spaces.Box(low, high) high = np.inf*np.ones(self.obs_dim) low = -high self.observation_space = spaces.Box(low, high) self._seed() def camera2_setup(self): raise NotImplementedError def camera3_setup(self): raise NotImplementedError def _get_obs(self): camera2_output = None self.camera2.render() data, width, height = self.camera2.get_image() camera2_output = np.fromstring(data, dtype='uint8').reshape(height, width, 3)[::-1, :, :] if self.mode == "grey": camera2_output = np.mean(camera2_output, axis=2)[:, :, np.newaxis] camera3_output = None self.camera3.render() data, width, height = self.camera3.get_image() camera3_output = np.fromstring(data, dtype='uint8').reshape(height, width, 3)[::-1, :, :] if self.mode == "grey": camera3_output = np.mean(camera3_output, axis=2)[:, :, np.newaxis] return np.concatenate([camera2_output, camera3_output], axis=2)
mit
GauravSahu/odoo
addons/fetchmail/res_config.py
437
5234
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class fetchmail_config_settings(osv.osv_memory): """ This wizard can be inherited in conjunction with 'res.config.settings', in order to define fields that configure a fetchmail server. It relies on the following convention on the object:: class my_config_settings(osv.osv_memory): _name = 'my.settings' _inherit = ['res.config.settings', 'fetchmail.config.settings'] _columns = { 'fetchmail_stuff': fields.boolean(..., fetchmail_model='my.stuff', fetchmail_name='Incoming Stuff'), } def configure_fetchmail_stuff(self, cr, uid, ids, context=None): return self.configure_fetchmail(cr, uid, 'fetchmail_stuff', context) and in the form view:: <field name="fetchmail_stuff"/> <button type="object" name="configure_fetchmail_stuff"/> The method ``get_default_fetchmail`` determines the value of all fields that start with 'fetchmail_'. It looks up fetchmail server configurations that match the given model name (``fetchmail_model``) and are active. The button action ``configure_fetchmail_stuff`` is caught by the object, and calls automatically the method ``configure_fetchmail``; it opens the fetchmail server configuration form for the corresponding field. """ _name = 'fetchmail.config.settings' def get_default_fetchmail(self, cr, uid, fields, context=None): """ determine the value of all fields like 'fetchmail_XXX' """ ir_model = self.pool.get('ir.model') fetchmail_server = self.pool.get('fetchmail.server') fetchmail_fields = [f for f in fields if f.startswith('fetchmail_')] res = {} for f in fetchmail_fields: model_name = self._columns[f].fetchmail_model model_id = ir_model.search(cr, uid, [('model', '=', model_name)])[0] server_ids = fetchmail_server.search(cr, uid, [('object_id', '=', model_id), ('state', '=', 'done')]) res[f] = bool(server_ids) return res def set_fetchmail(self, cr, uid, ids, context=None): """ deactivate fetchmail servers for all fields like 'fetchmail_XXX' that are False """ config = self.browse(cr, uid, ids[0], context) fetchmail_fields = [f for f in self._columns if f.startswith('fetchmail_')] # determine which models should not have active fetchmail servers, and # deactivate all active servers for those models models = [self._columns[f].fetchmail_model for f in fetchmail_fields if not config[f]] if models: fetchmail_server = self.pool.get('fetchmail.server') server_ids = fetchmail_server.search(cr, uid, [('object_id.model', 'in', models), ('state', '=', 'done')]) fetchmail_server.set_draft(cr, uid, server_ids, context) def configure_fetchmail(self, cr, uid, field, context=None): """ open the form view of the fetchmail.server to configure """ action = { 'type': 'ir.actions.act_window', 'res_model': 'fetchmail.server', 'view_mode': 'form', 'target': 'current', } model_name = self._columns[field].fetchmail_model model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', model_name)])[0] server_ids = self.pool.get('fetchmail.server').search(cr, uid, [('object_id', '=', model_id)]) if server_ids: action['res_id'] = server_ids[0] else: action['context'] = { 'default_name': self._columns[field].fetchmail_name, 'default_object_id': model_id, } return action def __getattr__(self, name): """ catch calls to 'configure_fetchmail_XXX' """ if name.startswith('configure_fetchmail_'): return (lambda cr, uid, ids, context=None: self.configure_fetchmail(cr, uid, name[10:], context)) return super(fetchmail_config_settings, self).__getattr__(name) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
mrknow/filmkodi
plugin.video.mrknow/mylib/third_party/pep8/lib2to3/lib2to3/fixes/fix_apply.py
315
1904
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for apply(). This converts apply(func, v, k) into (func)(*v, **k).""" # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Call, Comma, parenthesize class FixApply(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'apply' trailer< '(' arglist< (not argument<NAME '=' any>) func=any ',' (not argument<NAME '=' any>) args=any [',' (not argument<NAME '=' any>) kwds=any] [','] > ')' > > """ def transform(self, node, results): syms = self.syms assert results func = results["func"] args = results["args"] kwds = results.get("kwds") prefix = node.prefix func = func.clone() if (func.type not in (token.NAME, syms.atom) and (func.type != syms.power or func.children[-2].type == token.DOUBLESTAR)): # Need to parenthesize func = parenthesize(func) func.prefix = "" args = args.clone() args.prefix = "" if kwds is not None: kwds = kwds.clone() kwds.prefix = "" l_newargs = [pytree.Leaf(token.STAR, u"*"), args] if kwds is not None: l_newargs.extend([Comma(), pytree.Leaf(token.DOUBLESTAR, u"**"), kwds]) l_newargs[-2].prefix = u" " # that's the ** token # XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t) # can be translated into f(x, y, *t) instead of f(*(x, y) + t) #new = pytree.Node(syms.power, (func, ArgList(l_newargs))) return Call(func, l_newargs, prefix=prefix)
apache-2.0
tthtlc/dpkt
dpkt/llc.py
22
2099
import struct import dpkt, stp, ethernet class LLC(dpkt.Packet): _typesw = {} def _unpack_data(self, buf): if self.type == ethernet.ETH_TYPE_8021Q: self.tag, self.type = struct.unpack('>HH', buf[:4]) buf = buf[4:] elif self.type == ethernet.ETH_TYPE_MPLS or \ self.type == ethernet.ETH_TYPE_MPLS_MCAST: # XXX - skip labels for i in range(24): if struct.unpack('>I', buf[i:i+4])[0] & 0x0100: # MPLS_STACK_BOTTOM break self.type = ethernet.ETH_TYPE_IP buf = buf[(i + 1) * 4:] try: self.data = self._typesw[self.type](buf) setattr(self, self.data.__class__.__name__.lower(), self.data) except (KeyError, dpkt.UnpackError): self.data = buf def unpack(self, buf): self.data = buf if self.data.startswith('\xaa\xaa'): # SNAP self.type = struct.unpack('>H', self.data[6:8])[0] self._unpack_data(self.data[8:]) else: # non-SNAP dsap = ord(self.data[0]) if dsap == 0x06: # SAP_IP self.data = self.ip = self._typesw[ethernet.ETH_TYPE_IP](self.data[3:]) elif dsap == 0x10 or dsap == 0xe0: # SAP_NETWARE{1,2} self.data = self.ipx = self._typesw[ethernet.ETH_TYPE_IPX](self.data[3:]) elif dsap == 0x42: # SAP_STP self.data = self.stp = stp.STP(self.data[3:]) if __name__ == '__main__': import unittest class LLCTestCase(unittest.TestCase): def test_llc(self): s = '\xaa\xaa\x03\x00\x00\x00\x08\x00\x45\x00\x00\x28\x07\x27\x40\x00\x80\x06\x1d\x39\x8d\xd4\x37\x3d\x3f\xf5\xd1\x69\xc0\x5f\x01\xbb\xb2\xd6\xef\x23\x38\x2b\x4f\x08\x50\x10\x42\x04\xac\x17\x00\x00' import ip llc_pkt = LLC(s) ip_pkt = ip.IP(llc_pkt.data) self.failUnless(llc_pkt.type == ethernet.ETH_TYPE_IP) self.failUnless(ip_pkt.dst == '\x3f\xf5\xd1\x69') unittest.main()
bsd-3-clause
Curahelper/Cura
cura/Settings/UserChangesModel.py
4
4859
from UM.Qt.ListModel import ListModel from PyQt5.QtCore import pyqtSlot, Qt from UM.Application import Application from cura.Settings.ExtruderManager import ExtruderManager from UM.Settings.ContainerRegistry import ContainerRegistry from UM.i18n import i18nCatalog from UM.Settings.SettingFunction import SettingFunction from collections import OrderedDict import os class UserChangesModel(ListModel): KeyRole = Qt.UserRole + 1 LabelRole = Qt.UserRole + 2 ExtruderRole = Qt.UserRole + 3 OriginalValueRole = Qt.UserRole + 4 UserValueRole = Qt.UserRole + 6 CategoryRole = Qt.UserRole + 7 def __init__(self, parent = None): super().__init__(parent = parent) self.addRoleName(self.KeyRole, "key") self.addRoleName(self.LabelRole, "label") self.addRoleName(self.ExtruderRole, "extruder") self.addRoleName(self.OriginalValueRole, "original_value") self.addRoleName(self.UserValueRole, "user_value") self.addRoleName(self.CategoryRole, "category") self._i18n_catalog = None self._update() @pyqtSlot() def forceUpdate(self): self._update() def _update(self): item_dict = OrderedDict() item_list = [] global_stack = Application.getInstance().getGlobalContainerStack() if not global_stack: return stacks = ExtruderManager.getInstance().getActiveGlobalAndExtruderStacks() # Check if the definition container has a translation file and ensure it's loaded. definition = global_stack.getBottom() definition_suffix = ContainerRegistry.getMimeTypeForContainer(type(definition)).preferredSuffix catalog = i18nCatalog(os.path.basename(definition.getId() + "." + definition_suffix)) if catalog.hasTranslationLoaded(): self._i18n_catalog = catalog for file_name in definition.getInheritedFiles(): catalog = i18nCatalog(os.path.basename(file_name)) if catalog.hasTranslationLoaded(): self._i18n_catalog = catalog for stack in stacks: # Make a list of all containers in the stack. containers = [] latest_stack = stack while latest_stack: containers.extend(latest_stack.getContainers()) latest_stack = latest_stack.getNextStack() # Drop the user container. user_changes = containers.pop(0) for setting_key in user_changes.getAllKeys(): original_value = None # Find the category of the instance by moving up until we find a category. category = user_changes.getInstance(setting_key).definition while category.type != "category": category = category.parent # Handle translation (and fallback if we weren't able to find any translation files. if self._i18n_catalog: category_label = self._i18n_catalog.i18nc(category.key + " label", category.label) else: category_label = category.label if self._i18n_catalog: label = self._i18n_catalog.i18nc(setting_key + " label", stack.getProperty(setting_key, "label")) else: label = stack.getProperty(setting_key, "label") for container in containers: if stack == global_stack: resolve = global_stack.getProperty(setting_key, "resolve") if resolve is not None: original_value = resolve break original_value = container.getProperty(setting_key, "value") # If a value is a function, ensure it's called with the stack it's in. if isinstance(original_value, SettingFunction): original_value = original_value(stack) if original_value is not None: break item_to_add = {"key": setting_key, "label": label, "user_value": str(user_changes.getProperty(setting_key, "value")), "original_value": str(original_value), "extruder": "", "category": category_label} if stack != global_stack: item_to_add["extruder"] = stack.getName() if category_label not in item_dict: item_dict[category_label] = [] item_dict[category_label].append(item_to_add) for each_item_list in item_dict.values(): item_list += each_item_list self.setItems(item_list)
agpl-3.0
N3da/incubator-airflow
airflow/operators/generic_transfer.py
46
2863
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults from airflow.hooks.base_hook import BaseHook class GenericTransfer(BaseOperator): """ Moves data from a connection to another, assuming that they both provide the required methods in their respective hooks. The source hook needs to expose a `get_records` method, and the destination a `insert_rows` method. This is mean to be used on small-ish datasets that fit in memory. :param sql: SQL query to execute against the source database :type sql: str :param destination_table: target table :type destination_table: str :param source_conn_id: source connection :type source_conn_id: str :param destination_conn_id: source connection :type destination_conn_id: str :param preoperator: sql statement or list of statements to be executed prior to loading the data :type preoperator: str or list of str """ template_fields = ('sql', 'destination_table', 'preoperator') template_ext = ('.sql', '.hql',) ui_color = '#b0f07c' @apply_defaults def __init__( self, sql, destination_table, source_conn_id, destination_conn_id, preoperator=None, *args, **kwargs): super(GenericTransfer, self).__init__(*args, **kwargs) self.sql = sql self.destination_table = destination_table self.source_conn_id = source_conn_id self.destination_conn_id = destination_conn_id self.preoperator = preoperator def execute(self, context): source_hook = BaseHook.get_hook(self.source_conn_id) logging.info("Extracting data from {}".format(self.source_conn_id)) logging.info("Executing: \n" + self.sql) results = source_hook.get_records(self.sql) destination_hook = BaseHook.get_hook(self.destination_conn_id) if self.preoperator: logging.info("Running preoperator") logging.info(self.preoperator) destination_hook.run(self.preoperator) logging.info("Inserting rows into {}".format(self.destination_conn_id)) destination_hook.insert_rows(table=self.destination_table, rows=results)
apache-2.0
chenlian2015/skia_from_google
tools/skp/page_sets/skia_jsfiddlebigcar_desktop.py
2
1282
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0401,W0614 from telemetry.page import page as page_module from telemetry.page import page_set as page_set_module class SkiaBuildbotDesktopPage(page_module.Page): def __init__(self, url, page_set): super(SkiaBuildbotDesktopPage, self).__init__( url=url, page_set=page_set, credentials_path='data/credentials.json') self.user_agent_type = 'desktop' self.archive_data_file = 'data/skia_jsfiddlebigcar_desktop.json' def RunNavigateSteps(self, action_runner): action_runner.NavigateToPage(self) action_runner.Wait(5) class SkiaJsfiddlebigcarDesktopPageSet(page_set_module.PageSet): """ Pages designed to represent the median, not highly optimized web """ def __init__(self): super(SkiaJsfiddlebigcarDesktopPageSet, self).__init__( user_agent_type='desktop', archive_data_file='data/skia_jsfiddlebigcar_desktop.json') urls_list = [ # Why: Page from Chromium's silk test cases 'http://jsfiddle.net/vBQHH/3/embedded/result/', ] for url in urls_list: self.AddPage(SkiaBuildbotDesktopPage(url, self))
bsd-3-clause
michaelb42/ethersex
contrib/transgraph/transform.py
17
5467
import sys import random import re from xml.dom.ext.reader import Sax2 from xml.dom.ext import Print from xml.dom.NodeFilter import NodeFilter scripts = [] deleteNodes = [] def handle_text_node(node): text = node.firstChild.nodeValue.strip() if text.find("%%") != -1: pre = text[0:text.find("%%")] rest = text[text.find("%%"):] last = rest[2:].find("%%") if last == -1: return cmd = rest[2:last+2] post = rest[last+4:] if len(cmd) == 0: return if len(pre): node.setAttribute("pre", pre) if len(post): node.setAttribute("post", post) id = "textNode" + str(random.randint(0,65354)) node.setAttribute("id", id) scripts.append("%s|write %s"%(cmd,id)) node.firstChild.nodeValue = pre + "__" + post if text.find("{{") != -1: myscripts = text.replace("{{", "").split("}}") for i in myscripts: if len(i): scripts.append(i) deleteNodes.append(node) if text.find("##") != -1: ui_element = re.match("^.*##(.*)##.*$", text) if len(ui_element.groups()) == 0: return ui_element = ui_element.groups()[0].strip() ui_element = ui_element.split(":", 1) if len(ui_element) >= 2: handle_ui_element(node,ui_element) deleteNodes.append(node) def handle_ui_element(node, ui): ui_type = ui[0].strip() ui_id = ui[1].strip() if ui_type == "led": r = 0.5 led = doc.createElement("circle") led.setAttribute("cx", str(float(node.getAttribute("x")) + r)) led.setAttribute("cy", str(float(node.getAttribute("y")) - r)) led.setAttribute("r", str(r)) led.setAttribute("fill", "red") led.setAttribute("id", ui_id) node.parentNode.appendChild(led) def generate_script(scripts): txt = '''var vars = new Object(); function ltrim(a) { for(var k = 0; k < a.length && isWhitespace(a.charAt(k)); k++); return a.substr(k, a.length); } function rtrim(a) { for(var j=a.length-1; j>=0 && isWhitespace(a.charAt(j)) ; j--) ; return a.substr(0,j+1); } function trim(a) { return ltrim(rtrim(a)); } function isWhitespace(charToCheck) { var whitespaceChars = " \\t\\n\\r\\f"; return (whitespaceChars.indexOf(charToCheck) != -1); } String.prototype.strip = function () { return trim(this); } function pipe(stdin, stdout) { var prog; if (stdout.length == 0) return; if (stdout.indexOf("|") != -1) { prog = stdout.substr(0, stdout.indexOf("|")); stdout = stdout.substr(stdout.indexOf("|")+1).strip(); } else { prog = stdout; stdout = ""; } var func, prog; if (prog.strip().indexOf(" ") != -1) { func = prog.strip().substr(0,prog.indexOf(" ")).strip(); args = prog.strip().substr(prog.indexOf(" ") + 1).strip(); } else { func = prog.strip(); args = ""; } eval(func + "('" + args + "', '" + stdin + "', '" + stdout +"')"); } function echo(args, stdin, stdout) { pipe(args, stdout); } function store(args, stdin, stdout) { vars[args.strip()] = stdin; } function get(args, stdin, stdout) { if (vars[args.strip()] != undefined) pipe(vars[args.strip()], stdout); else pipe("", stdout); } function warn(args, stdin, stdout) {alert(stdin);} function periodic(args, stdin, stdout) { timeout = parseFloat(args); setInterval('pipe("", "'+stdout+'")', timeout * 1000); eval('pipe("", "'+stdout+'")'); } function cut(args, stdin, stdout) { var a = args.split(" "); var input; stdin = stdin.replace(/\s+/g, " "); if (a.length > 1) input = stdin.split(a[1]); else input = stdin.split(" "); if (input.length < parseInt(a[0]) - 1) pipe("", stdout); else pipe(input[parseInt(a[0]) - 1], stdout); } function sub(args, stdin, stdout) { var re = args.replace("^#|#$", "").strip().split('#'); pipe(stdin.replace(re[0], re[1]),stdout); } function ecmd(args, stdin, stdout) { var url = "/ecmd?" + args; var handler = function(request, data) { pipe(request.responseText.strip(), data); } ArrAjax.aufruf(url, handler, "GET", stdout); } function bool_attr(args, stdin, stdout) { args = args.split(" "); if (args.length < 4) return; obj = document.getElementById(args[0]); if (!obj) return; if (parseInt(stdin)) obj.setAttribute(args[1], args[2]); else obj.setAttribute(args[1], args[3]); } function to_bool(args,stdin,stdout) { var a = stdin.strip().toLowerCase(); if (a == "on" || a == "true") pipe("1", stdout); else pipe("0", stdout); } function test (args,stdin,stdout) { if (stdin.strip().length == 0) return; if (eval(stdin + args)) pipe("1", stdout); else pipe("0", stdout); } function write(args, stdin, stdout) { var obj = document.getElementById(args); if (obj) { if (obj.hasAttribute("pre")) stdin = obj.getAttribute("pre") + stdin; if (obj.hasAttribute("post")) stdin = stdin + obj.getAttribute("post"); obj.firstChild.nodeValue = stdin; } } ''' for script in scripts: txt += "pipe('', '%s');\n"%(script) return txt # create Reader object reader = Sax2.Reader() # parse the document doc = reader.fromStream(sys.stdin) walker = doc.createTreeWalker(doc.documentElement, NodeFilter.SHOW_ELEMENT, None, 0) while 1: if walker.currentNode.tagName == "text": handle_text_node(walker.currentNode) next = walker.nextNode() if next is None: break for node in deleteNodes: node.parentNode.removeChild(node) # Add the scripting node scr = doc.createElement("script") scr.setAttribute("xlink:href","scr.js"); doc.documentElement.appendChild(scr) scr = doc.createElement("script") scrtext = doc.createTextNode(generate_script(scripts)) scr.appendChild(scrtext) doc.documentElement.appendChild(scr) Print(doc, sys.stdout)
gpl-3.0
seem-sky/rt-thread
tools/codeblocks.py
53
4627
# # File : codeblocks.py # This file is part of RT-Thread RTOS # COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Change Logs: # Date Author Notes # 2015-01-20 Bernard Add copyright information # import os import sys import string import building import xml.etree.ElementTree as etree from xml.etree.ElementTree import SubElement from utils import _make_path_relative from utils import xml_indent fs_encoding = sys.getfilesystemencoding() def CB_AddHeadFiles(program, elem, project_path): building.source_ext = [] building.source_ext = ["h"] for item in program: building.walk_children(item) building.source_list.sort() # print building.source_list for f in building.source_list: path = _make_path_relative(project_path, f) Unit = SubElement(elem, 'Unit') Unit.set('filename', path.decode(fs_encoding)) def CB_AddCFiles(ProjectFiles, parent, gname, files, project_path): for f in files: fn = f.rfile() name = fn.name path = os.path.dirname(fn.abspath) path = _make_path_relative(project_path, path) path = os.path.join(path, name) Unit = SubElement(parent, 'Unit') Unit.set('filename', path.decode(fs_encoding)) Option = SubElement(Unit, 'Option') Option.set('compilerVar', "CC") def CBProject(target, script, program): project_path = os.path.dirname(os.path.abspath(target)) if os.path.isfile('template.cbp'): tree = etree.parse('template.cbp') else: tree = etree.parse(os.path.join(os.path.dirname(__file__), 'template.cbp')) root = tree.getroot() out = file(target, 'wb') out.write('<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>\n') ProjectFiles = [] # SECTION 1. add "*.c|*.h" files group for elem in tree.iter(tag='Project'): # print elem.tag, elem.attrib break # add c files for group in script: group_xml = CB_AddCFiles(ProjectFiles, elem, group['name'], group['src'], project_path) # add h files CB_AddHeadFiles(program, elem, project_path) # SECTION 2. # write head include path if building.Env.has_key('CPPPATH'): cpp_path = building.Env['CPPPATH'] paths = set() for path in cpp_path: inc = _make_path_relative(project_path, os.path.normpath(path)) paths.add(inc) #.replace('\\', '/') paths = [i for i in paths] paths.sort() # write include path, definitions for elem in tree.iter(tag='Compiler'): break for path in paths: Add = SubElement(elem, 'Add') Add.set('directory', path) for macro in building.Env.get('CPPDEFINES', []): Add = SubElement(elem, 'Add') Add.set('option', "-D"+macro) # write link flags ''' # write lib dependence if building.Env.has_key('LIBS'): for elem in tree.iter(tag='Tool'): if elem.attrib['Name'] == 'VCLinkerTool': break libs_with_extention = [i+'.lib' for i in building.Env['LIBS']] libs = ' '.join(libs_with_extention) elem.set('AdditionalDependencies', libs) # write lib include path if building.Env.has_key('LIBPATH'): lib_path = building.Env['LIBPATH'] paths = set() for path in lib_path: inc = _make_path_relative(project_path, os.path.normpath(path)) paths.add(inc) #.replace('\\', '/') paths = [i for i in paths] paths.sort() lib_paths = ';'.join(paths) elem.set('AdditionalLibraryDirectories', lib_paths) ''' xml_indent(root) out.write(etree.tostring(root, encoding='utf-8')) out.close()
gpl-2.0
Djabbz/wakatime
wakatime/packages/pygments_py3/pygments/lexers/installers.py
72
12866
# -*- coding: utf-8 -*- """ pygments.lexers.installers ~~~~~~~~~~~~~~~~~~~~~~~~~~ Lexers for installer/packager DSLs and formats. :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, using, this, default from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Punctuation, Generic, Number, Whitespace __all__ = ['NSISLexer', 'RPMSpecLexer', 'SourcesListLexer', 'DebianControlLexer'] class NSISLexer(RegexLexer): """ For `NSIS <http://nsis.sourceforge.net/>`_ scripts. .. versionadded:: 1.6 """ name = 'NSIS' aliases = ['nsis', 'nsi', 'nsh'] filenames = ['*.nsi', '*.nsh'] mimetypes = ['text/x-nsis'] flags = re.IGNORECASE tokens = { 'root': [ (r'[;#].*\n', Comment), (r"'.*?'", String.Single), (r'"', String.Double, 'str_double'), (r'`', String.Backtick, 'str_backtick'), include('macro'), include('interpol'), include('basic'), (r'\$\{[a-z_|][\w|]*\}', Keyword.Pseudo), (r'/[a-z_]\w*', Name.Attribute), ('.', Text), ], 'basic': [ (r'(\n)(Function)(\s+)([._a-z][.\w]*)\b', bygroups(Text, Keyword, Text, Name.Function)), (r'\b([_a-z]\w*)(::)([a-z][a-z0-9]*)\b', bygroups(Keyword.Namespace, Punctuation, Name.Function)), (r'\b([_a-z]\w*)(:)', bygroups(Name.Label, Punctuation)), (r'(\b[ULS]|\B)([!<>=]?=|\<\>?|\>)\B', Operator), (r'[|+-]', Operator), (r'\\', Punctuation), (r'\b(Abort|Add(?:BrandingImage|Size)|' r'Allow(?:RootDirInstall|SkipFiles)|AutoCloseWindow|' r'BG(?:Font|Gradient)|BrandingText|BringToFront|Call(?:InstDLL)?|' r'(?:Sub)?Caption|ChangeUI|CheckBitmap|ClearErrors|CompletedText|' r'ComponentText|CopyFiles|CRCCheck|' r'Create(?:Directory|Font|Shortcut)|Delete(?:INI(?:Sec|Str)|' r'Reg(?:Key|Value))?|DetailPrint|DetailsButtonText|' r'Dir(?:Show|Text|Var|Verify)|(?:Disabled|Enabled)Bitmap|' r'EnableWindow|EnumReg(?:Key|Value)|Exch|Exec(?:Shell|Wait)?|' r'ExpandEnvStrings|File(?:BufSize|Close|ErrorText|Open|' r'Read(?:Byte)?|Seek|Write(?:Byte)?)?|' r'Find(?:Close|First|Next|Window)|FlushINI|Function(?:End)?|' r'Get(?:CurInstType|CurrentAddress|DlgItem|DLLVersion(?:Local)?|' r'ErrorLevel|FileTime(?:Local)?|FullPathName|FunctionAddress|' r'InstDirError|LabelAddress|TempFileName)|' r'Goto|HideWindow|Icon|' r'If(?:Abort|Errors|FileExists|RebootFlag|Silent)|' r'InitPluginsDir|Install(?:ButtonText|Colors|Dir(?:RegKey)?)|' r'Inst(?:ProgressFlags|Type(?:[GS]etText)?)|Int(?:CmpU?|Fmt|Op)|' r'IsWindow|LangString(?:UP)?|' r'License(?:BkColor|Data|ForceSelection|LangString|Text)|' r'LoadLanguageFile|LockWindow|Log(?:Set|Text)|MessageBox|' r'MiscButtonText|Name|Nop|OutFile|(?:Uninst)?Page(?:Ex(?:End)?)?|' r'PluginDir|Pop|Push|Quit|Read(?:(?:Env|INI|Reg)Str|RegDWORD)|' r'Reboot|(?:Un)?RegDLL|Rename|RequestExecutionLevel|ReserveFile|' r'Return|RMDir|SearchPath|Section(?:Divider|End|' r'(?:(?:Get|Set)(?:Flags|InstTypes|Size|Text))|Group(?:End)?|In)?|' r'SendMessage|Set(?:AutoClose|BrandingImage|Compress(?:ionLevel|' r'or(?:DictSize)?)?|CtlColors|CurInstType|DatablockOptimize|' r'DateSave|Details(?:Print|View)|Error(?:s|Level)|FileAttributes|' r'Font|OutPath|Overwrite|PluginUnload|RebootFlag|ShellVarContext|' r'Silent|StaticBkColor)|' r'Show(?:(?:I|Uni)nstDetails|Window)|Silent(?:Un)?Install|Sleep|' r'SpaceTexts|Str(?:CmpS?|Cpy|Len)|SubSection(?:End)?|' r'Uninstall(?:ButtonText|(?:Sub)?Caption|EXEName|Icon|Text)|' r'UninstPage|Var|VI(?:AddVersionKey|ProductVersion)|WindowIcon|' r'Write(?:INIStr|Reg(:?Bin|DWORD|(?:Expand)?Str)|Uninstaller)|' r'XPStyle)\b', Keyword), (r'\b(CUR|END|(?:FILE_ATTRIBUTE_)?' r'(?:ARCHIVE|HIDDEN|NORMAL|OFFLINE|READONLY|SYSTEM|TEMPORARY)|' r'HK(CC|CR|CU|DD|LM|PD|U)|' r'HKEY_(?:CLASSES_ROOT|CURRENT_(?:CONFIG|USER)|DYN_DATA|' r'LOCAL_MACHINE|PERFORMANCE_DATA|USERS)|' r'ID(?:ABORT|CANCEL|IGNORE|NO|OK|RETRY|YES)|' r'MB_(?:ABORTRETRYIGNORE|DEFBUTTON[1-4]|' r'ICON(?:EXCLAMATION|INFORMATION|QUESTION|STOP)|' r'OK(?:CANCEL)?|RETRYCANCEL|RIGHT|SETFOREGROUND|TOPMOST|USERICON|' r'YESNO(?:CANCEL)?)|SET|SHCTX|' r'SW_(?:HIDE|SHOW(?:MAXIMIZED|MINIMIZED|NORMAL))|' r'admin|all|auto|both|bottom|bzip2|checkbox|colored|current|false|' r'force|hide|highest|if(?:diff|newer)|lastused|leave|left|' r'listonly|lzma|nevershow|none|normal|off|on|pop|push|' r'radiobuttons|right|show|silent|silentlog|smooth|textonly|top|' r'true|try|user|zlib)\b', Name.Constant), ], 'macro': [ (r'\!(addincludedir(?:dir)?|addplugindir|appendfile|cd|define|' r'delfilefile|echo(?:message)?|else|endif|error|execute|' r'if(?:macro)?n?(?:def)?|include|insertmacro|macro(?:end)?|packhdr|' r'search(?:parse|replace)|system|tempfilesymbol|undef|verbose|' r'warning)\b', Comment.Preproc), ], 'interpol': [ (r'\$(R?[0-9])', Name.Builtin.Pseudo), # registers (r'\$(ADMINTOOLS|APPDATA|CDBURN_AREA|COOKIES|COMMONFILES(?:32|64)|' r'DESKTOP|DOCUMENTS|EXE(?:DIR|FILE|PATH)|FAVORITES|FONTS|HISTORY|' r'HWNDPARENT|INTERNET_CACHE|LOCALAPPDATA|MUSIC|NETHOOD|PICTURES|' r'PLUGINSDIR|PRINTHOOD|PROFILE|PROGRAMFILES(?:32|64)|QUICKLAUNCH|' r'RECENT|RESOURCES(?:_LOCALIZED)?|SENDTO|SM(?:PROGRAMS|STARTUP)|' r'STARTMENU|SYSDIR|TEMP(?:LATES)?|VIDEOS|WINDIR|\{NSISDIR\})', Name.Builtin), (r'\$(CMDLINE|INSTDIR|OUTDIR|LANGUAGE)', Name.Variable.Global), (r'\$[a-z_]\w*', Name.Variable), ], 'str_double': [ (r'"', String, '#pop'), (r'\$(\\[nrt"]|\$)', String.Escape), include('interpol'), (r'.', String.Double), ], 'str_backtick': [ (r'`', String, '#pop'), (r'\$(\\[nrt"]|\$)', String.Escape), include('interpol'), (r'.', String.Double), ], } class RPMSpecLexer(RegexLexer): """ For RPM ``.spec`` files. .. versionadded:: 1.6 """ name = 'RPMSpec' aliases = ['spec'] filenames = ['*.spec'] mimetypes = ['text/x-rpm-spec'] _directives = ('(?:package|prep|build|install|clean|check|pre[a-z]*|' 'post[a-z]*|trigger[a-z]*|files)') tokens = { 'root': [ (r'#.*\n', Comment), include('basic'), ], 'description': [ (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text), '#pop'), (r'\n', Text), (r'.', Text), ], 'changelog': [ (r'\*.*\n', Generic.Subheading), (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text), '#pop'), (r'\n', Text), (r'.', Text), ], 'string': [ (r'"', String.Double, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), include('interpol'), (r'.', String.Double), ], 'basic': [ include('macro'), (r'(?i)^(Name|Version|Release|Epoch|Summary|Group|License|Packager|' r'Vendor|Icon|URL|Distribution|Prefix|Patch[0-9]*|Source[0-9]*|' r'Requires\(?[a-z]*\)?|[a-z]+Req|Obsoletes|Suggests|Provides|Conflicts|' r'Build[a-z]+|[a-z]+Arch|Auto[a-z]+)(:)(.*)$', bygroups(Generic.Heading, Punctuation, using(this))), (r'^%description', Name.Decorator, 'description'), (r'^%changelog', Name.Decorator, 'changelog'), (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text)), (r'%(attr|defattr|dir|doc(?:dir)?|setup|config(?:ure)?|' r'make(?:install)|ghost|patch[0-9]+|find_lang|exclude|verify)', Keyword), include('interpol'), (r"'.*?'", String.Single), (r'"', String.Double, 'string'), (r'.', Text), ], 'macro': [ (r'%define.*\n', Comment.Preproc), (r'%\{\!\?.*%define.*\}', Comment.Preproc), (r'(%(?:if(?:n?arch)?|else(?:if)?|endif))(.*)$', bygroups(Comment.Preproc, Text)), ], 'interpol': [ (r'%\{?__[a-z_]+\}?', Name.Function), (r'%\{?_([a-z_]+dir|[a-z_]+path|prefix)\}?', Keyword.Pseudo), (r'%\{\?\w+\}', Name.Variable), (r'\$\{?RPM_[A-Z0-9_]+\}?', Name.Variable.Global), (r'%\{[a-zA-Z]\w+\}', Keyword.Constant), ] } class SourcesListLexer(RegexLexer): """ Lexer that highlights debian sources.list files. .. versionadded:: 0.7 """ name = 'Debian Sourcelist' aliases = ['sourceslist', 'sources.list', 'debsources'] filenames = ['sources.list'] mimetype = ['application/x-debian-sourceslist'] tokens = { 'root': [ (r'\s+', Text), (r'#.*?$', Comment), (r'^(deb(?:-src)?)(\s+)', bygroups(Keyword, Text), 'distribution') ], 'distribution': [ (r'#.*?$', Comment, '#pop'), (r'\$\(ARCH\)', Name.Variable), (r'[^\s$[]+', String), (r'\[', String.Other, 'escaped-distribution'), (r'\$', String), (r'\s+', Text, 'components') ], 'escaped-distribution': [ (r'\]', String.Other, '#pop'), (r'\$\(ARCH\)', Name.Variable), (r'[^\]$]+', String.Other), (r'\$', String.Other) ], 'components': [ (r'#.*?$', Comment, '#pop:2'), (r'$', Text, '#pop:2'), (r'\s+', Text), (r'\S+', Keyword.Pseudo), ] } def analyse_text(text): for line in text.splitlines(): line = line.strip() if line.startswith('deb ') or line.startswith('deb-src '): return True class DebianControlLexer(RegexLexer): """ Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs. .. versionadded:: 0.9 """ name = 'Debian Control file' aliases = ['control', 'debcontrol'] filenames = ['control'] tokens = { 'root': [ (r'^(Description)', Keyword, 'description'), (r'^(Maintainer)(:\s*)', bygroups(Keyword, Text), 'maintainer'), (r'^((Build-)?Depends)', Keyword, 'depends'), (r'^((?:Python-)?Version)(:\s*)(\S+)$', bygroups(Keyword, Text, Number)), (r'^((?:Installed-)?Size)(:\s*)(\S+)$', bygroups(Keyword, Text, Number)), (r'^(MD5Sum|SHA1|SHA256)(:\s*)(\S+)$', bygroups(Keyword, Text, Number)), (r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$', bygroups(Keyword, Whitespace, String)), ], 'maintainer': [ (r'<[^>]+>', Generic.Strong), (r'<[^>]+>$', Generic.Strong, '#pop'), (r',\n?', Text), (r'.', Text), ], 'description': [ (r'(.*)(Homepage)(: )(\S+)', bygroups(Text, String, Name, Name.Class)), (r':.*\n', Generic.Strong), (r' .*\n', Text), default('#pop'), ], 'depends': [ (r':\s*', Text), (r'(\$)(\{)(\w+\s*:\s*\w+)', bygroups(Operator, Text, Name.Entity)), (r'\(', Text, 'depend_vers'), (r',', Text), (r'\|', Operator), (r'[\s]+', Text), (r'[})]\s*$', Text, '#pop'), (r'\}', Text), (r'[^,]$', Name.Function, '#pop'), (r'([+.a-zA-Z0-9-])(\s*)', bygroups(Name.Function, Text)), (r'\[.*?\]', Name.Entity), ], 'depend_vers': [ (r'\),', Text, '#pop'), (r'\)[^,]', Text, '#pop:2'), (r'([><=]+)(\s*)([^)]+)', bygroups(Operator, Text, Number)) ] }
bsd-3-clause
soma0sd/pyNuc
ensdf/dbgen.py
1
2763
# -*- coding: utf-8 -*- """Inner Module Import""" from ensdf.genlib import files from ensdf.genlib import regexp """Python Packages""" import pickle def get_card(ident=''): data = [] file_list = files.get_all_files() prog = lambda i: (i+1)*100/len(file_list) for ix, f in enumerate(file_list): card = [] for l in f.readlines(): l = l.replace('\n', '') if l.strip() == '': if ident in card[0]: data.append(card) card = [] else: card.append(l) print("\rGet Card... [{:6.2f}%]".format(prog(ix)), end='') print() return data uq = [] def get_ground_level(): global uq card = get_card("ADOPTED LEVELS") prog = lambda i: (i+1)*100/len(card) data = {} for ic, c in enumerate(card): for ixl, l1 in enumerate(c): lv = regexp.re_level_rec(l1) if lv: key = regexp.nucid2nucpy(lv['NUCID']) if key in data.keys(): break data[key] = {} data[key]['E'] = lv['E'] data[key]['J'] = lv['J'] data[key]['T'] = lv['T'] data[key]['MODE'] = [] mods = '' for l2 in c[ixl+1:]: de = regexp.re_level_decay(l2) if regexp.re_level_rec(l2): break elif de: mods += de mode = regexp.mode_parsing(mods, key) data[key]['MODE'] = mode print("\rGet Ground level...[{:6.2f}%]".format(prog(ic)), end='') print() return data def get_nist(): import re data = {} iso = [] card = [] re_C = re.compile('^[_]+$') re_i = re.compile('^(.{3}) (.{3}) (.{3}) (.{1,18})[ ]*(.{0,13})') re_f = re.compile('[\d\.]+') f = files.get_nist_file() for l in f.readlines()[3:]: l = l.replace('\n', '') if re_C.match(l): iso.append(card) card = [] else: card.append(l) for c in iso: m1 = re_i.match(c[0]) main = m1.groups() Z = int(main[0]) symbol = main[1].strip() mass = float(re_f.match(main[3]).group(0)) if re_f.match(main[4]): na = float(re_f.match(main[4]).group(0)) else: na = 0.0 code = "{:03d}{:03d}".format(Z, int(main[2])) data[code] = {'SYM': symbol, 'M': mass, 'IS': na} for cs in c[1:]: m2 = re_i.match(cs) sub = m2.groups() mass = float(re_f.match(sub[3]).group(0)) if re_f.match(sub[4]): na = float(re_f.match(sub[4]).group(0)) else: na = 0.0 code = "{:03d}{:03d}".format(Z, int(sub[2])) data[code] = {'SYM': symbol, 'M': mass, 'IS': na} data['000001'] = {'SYM': 'n', 'M': 1.008664916, 'IS': 0.0} return data data = 0 data = get_ground_level() nist = get_nist() f = open('nucinfo.pkl', 'wb') pickle.dump(data, f) f = open('nist.pkl', 'wb') pickle.dump(nist, f)
mit
dagnello/ansible-modules-core
cloud/openstack/os_ironic_node.py
131
12309
#!/usr/bin/python # coding: utf-8 -*- # (c) 2015, Hewlett-Packard Development Company, L.P. # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. try: import shade HAS_SHADE = True except ImportError: HAS_SHADE = False DOCUMENTATION = ''' --- module: os_ironic_node short_description: Activate/Deactivate Bare Metal Resources from OpenStack author: "Monty Taylor (@emonty)" extends_documentation_fragment: openstack version_added: "2.0" description: - Deploy to nodes controlled by Ironic. options: state: description: - Indicates desired state of the resource choices: ['present', 'absent'] default: present deploy: description: - Indicates if the resource should be deployed. Allows for deployment logic to be disengaged and control of the node power or maintenance state to be changed. choices: ['true', 'false'] default: true uuid: description: - globally unique identifier (UUID) to be given to the resource. required: false default: None ironic_url: description: - If noauth mode is utilized, this is required to be set to the endpoint URL for the Ironic API. Use with "auth" and "auth_type" settings set to None. required: false default: None config_drive: description: - A configdrive file or HTTP(S) URL that will be passed along to the node. required: false default: None instance_info: description: - Definition of the instance information which is used to deploy the node. This information is only required when an instance is set to present. suboptions: image_source: description: - An HTTP(S) URL where the image can be retrieved from. image_checksum: description: - The checksum of image_source. image_disk_format: description: - The type of image that has been requested to be deployed. power: description: - A setting to allow power state to be asserted allowing nodes that are not yet deployed to be powered on, and nodes that are deployed to be powered off. choices: ['present', 'absent'] default: present maintenance: description: - A setting to allow the direct control if a node is in maintenance mode. required: false default: false maintenance_reason: description: - A string expression regarding the reason a node is in a maintenance mode. required: false default: None ''' EXAMPLES = ''' # Activate a node by booting an image with a configdrive attached os_ironic_node: cloud: "openstack" uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69" state: present power: present deploy: True maintenance: False config_drive: "http://192.168.1.1/host-configdrive.iso" instance_info: image_source: "http://192.168.1.1/deploy_image.img" image_checksum: "356a6b55ecc511a20c33c946c4e678af" image_disk_format: "qcow" delegate_to: localhost ''' def _choose_id_value(module): if module.params['uuid']: return module.params['uuid'] if module.params['name']: return module.params['name'] return None # TODO(TheJulia): Change this over to use the machine patch method # in shade once it is available. def _prepare_instance_info_patch(instance_info): patch = [] patch.append({ 'op': 'replace', 'path': '/instance_info', 'value': instance_info }) return patch def _is_true(value): true_values = [True, 'yes', 'Yes', 'True', 'true', 'present', 'on'] if value in true_values: return True return False def _is_false(value): false_values = [False, None, 'no', 'No', 'False', 'false', 'absent', 'off'] if value in false_values: return True return False def _check_set_maintenance(module, cloud, node): if _is_true(module.params['maintenance']): if _is_false(node['maintenance']): cloud.set_machine_maintenance_state( node['uuid'], True, reason=module.params['maintenance_reason']) module.exit_json(changed=True, msg="Node has been set into " "maintenance mode") else: # User has requested maintenance state, node is already in the # desired state, checking to see if the reason has changed. if (str(node['maintenance_reason']) not in str(module.params['maintenance_reason'])): cloud.set_machine_maintenance_state( node['uuid'], True, reason=module.params['maintenance_reason']) module.exit_json(changed=True, msg="Node maintenance reason " "updated, cannot take any " "additional action.") elif _is_false(module.params['maintenance']): if node['maintenance'] is True: cloud.remove_machine_from_maintenance(node['uuid']) return True else: module.fail_json(msg="maintenance parameter was set but a valid " "the value was not recognized.") return False def _check_set_power_state(module, cloud, node): if 'power on' in str(node['power_state']): if _is_false(module.params['power']): # User has requested the node be powered off. cloud.set_machine_power_off(node['uuid']) module.exit_json(changed=True, msg="Power requested off") if 'power off' in str(node['power_state']): if (_is_false(module.params['power']) and _is_false(module.params['state'])): return False if (_is_false(module.params['power']) and _is_false(module.params['state'])): module.exit_json( changed=False, msg="Power for node is %s, node must be reactivated " "OR set to state absent" ) # In the event the power has been toggled on and # deployment has been requested, we need to skip this # step. if (_is_true(module.params['power']) and _is_false(module.params['deploy'])): # Node is powered down when it is not awaiting to be provisioned cloud.set_machine_power_on(node['uuid']) return True # Default False if no action has been taken. return False def main(): argument_spec = openstack_full_argument_spec( uuid=dict(required=False), name=dict(required=False), instance_info=dict(type='dict', required=False), config_drive=dict(required=False), ironic_url=dict(required=False), state=dict(required=False, default='present'), maintenance=dict(required=False), maintenance_reason=dict(required=False), power=dict(required=False, default='present'), deploy=dict(required=False, default=True), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') if (module.params['auth_type'] in [None, 'None'] and module.params['ironic_url'] is None): module.fail_json(msg="Authentication appears disabled, Please " "define an ironic_url parameter") if (module.params['ironic_url'] and module.params['auth_type'] in [None, 'None']): module.params['auth'] = dict( endpoint=module.params['ironic_url'] ) node_id = _choose_id_value(module) if not node_id: module.fail_json(msg="A uuid or name value must be defined " "to use this module.") try: cloud = shade.operator_cloud(**module.params) node = cloud.get_machine(node_id) if node is None: module.fail_json(msg="node not found") uuid = node['uuid'] instance_info = module.params['instance_info'] changed = False # User has reqeusted desired state to be in maintenance state. if module.params['state'] is 'maintenance': module.params['maintenance'] = True if node['provision_state'] in [ 'cleaning', 'deleting', 'wait call-back']: module.fail_json(msg="Node is in %s state, cannot act upon the " "request as the node is in a transition " "state" % node['provision_state']) # TODO(TheJulia) This is in-development code, that requires # code in the shade library that is still in development. if _check_set_maintenance(module, cloud, node): if node['provision_state'] in 'active': module.exit_json(changed=True, result="Maintenance state changed") changed = True node = cloud.get_machine(node_id) if _check_set_power_state(module, cloud, node): changed = True node = cloud.get_machine(node_id) if _is_true(module.params['state']): if _is_false(module.params['deploy']): module.exit_json( changed=changed, result="User request has explicitly disabled " "deployment logic" ) if 'active' in node['provision_state']: module.exit_json( changed=changed, result="Node already in an active state." ) if instance_info is None: module.fail_json( changed=changed, msg="When setting an instance to present, " "instance_info is a required variable.") # TODO(TheJulia): Update instance info, however info is # deployment specific. Perhaps consider adding rebuild # support, although there is a known desire to remove # rebuild support from Ironic at some point in the future. patch = _prepare_instance_info_patch(instance_info) cloud.set_node_instance_info(uuid, patch) cloud.validate_node(uuid) cloud.activate_node(uuid, module.params['config_drive']) # TODO(TheJulia): Add more error checking and a wait option. # We will need to loop, or just add the logic to shade, # although this could be a very long running process as # baremetal deployments are not a "quick" task. module.exit_json(changed=changed, result="node activated") elif _is_false(module.params['state']): if node['provision_state'] not in "deleted": cloud.purge_node_instance_info(uuid) cloud.deactivate_node(uuid) module.exit_json(changed=True, result="deleted") else: module.exit_json(changed=False, result="node not found") else: module.fail_json(msg="State must be present, absent, " "maintenance, off") except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * main()
gpl-3.0
seaotterman/tensorflow
tensorflow/contrib/distributions/python/ops/bijectors/sigmoid_centered_impl.py
104
1375
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SigmoidCentered bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.distributions.python.ops.bijectors import softmax_centered __all__ = [ "SigmoidCentered", ] class SigmoidCentered(softmax_centered.SoftmaxCentered): """Bijector which computes Y = g(X) = exp([X 0]) / (1 + exp(-X)). Equivalent to: `bijector.SoftmaxCentered(event_ndims=0)`. See `bijector.SoftmaxCentered` for more details. """ def __init__(self, validate_args=False, name="sigmoid_centered"): super(SigmoidCentered, self).__init__( event_ndims=0, validate_args=validate_args, name=name)
apache-2.0
jbloom/epitopefinder
scripts/epitopefinder_plotdistributioncomparison.py
1
3447
#!python """Script for plotting distributions of epitopes per site for two sets of sites. Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py. Written by Jesse Bloom.""" import os import sys import random import epitopefinder.io import epitopefinder.plot def main(): """Main body of script.""" random.seed(1) # seed random number generator in case P values are being computed if not epitopefinder.plot.PylabAvailable(): raise ImportError("Cannot import matplotlib / pylab, which are required by this script.") # output is written to out, currently set to standard out out = sys.stdout out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n") # read input file and parse arguments args = sys.argv[1 : ] if len(args) != 1: raise IOError("Script must be called with exactly one argument specifying the input file") infilename = sys.argv[1] if not os.path.isfile(infilename): raise IOError("Failed to find infile %s" % infilename) d = epitopefinder.io.ParseInfile(open(infilename)) out.write("\nRead input arguments from %s\n" % infilename) out.write('Read the following key / value pairs:\n') for (key, value) in d.iteritems(): out.write("%s %s\n" % (key, value)) plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip() epitopesbysite1_list = [] epitopesbysite2_list = [] for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]: epitopesfile = epitopefinder.io.ParseFileList(d, xf) if len(epitopesfile) != 1: raise ValueError("%s specifies more than one file" % xf) epitopesfile = epitopesfile[0] for line in open(epitopesfile).readlines()[1 : ]: if not (line.isspace() or line[0] == '#'): (site, n) = line.split(',') (site, n) = (int(site), int(n)) xlist.append(n) if not xlist: raise ValueError("%s failed to specify information for any sites" % xf) set1name = epitopefinder.io.ParseStringValue(d, 'set1name') set2name = epitopefinder.io.ParseStringValue(d, 'set2name') title = epitopefinder.io.ParseStringValue(d, 'title').strip() if title.upper() in ['NONE', 'FALSE']: title = None pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue') if pvalue.upper() in ['NONE', 'FALSE']: pvalue = None pvaluewithreplacement = None else: pvalue = int(pvalue) pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement') if pvalue < 1: raise ValueError("pvalue must be >= 1") if len(epitopesbysite2_list) >= len(epitopesbysite1_list): raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.") ymax = None if 'ymax' in d: ymax = epitopefinder.io.ParseFloatValue(d, 'ymax') out.write('\nNow creating the plot file %s\n' % plotfile) epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax) out.write("\nScript is complete.\n") if __name__ == '__main__': main() # run the script
gpl-3.0
dazult/EPA-2012-Residential-Exposure-SOPs
sop_calcs/forms.py
1
76603
from __future__ import absolute_import import copy import datetime from itertools import chain from urlparse import urljoin from django.conf import settings from django.forms.util import flatatt, to_current_timezone from django.utils.datastructures import MultiValueDict, MergeDict from django.utils.html import escape, conditional_escape from django.utils.translation import ugettext, ugettext_lazy from django.utils.encoding import StrAndUnicode, force_unicode from django.utils.safestring import mark_safe from django.utils import datetime_safe, formats from django import forms import json from collections import defaultdict import operator class CheckboxSelectMultipleBootstrap(forms.SelectMultiple): def __init__(self,attrs=None, choices=()): super(CheckboxSelectMultipleBootstrap, self).__init__(attrs, choices) self.choices_attrs = {} def render(self, name, value, attrs=None, choices=()): if value is None: value = [] has_id = attrs and 'id' in attrs final_attrs = self.build_attrs(attrs, name=name) output = [u'<div>'] # Normalize to strings str_values = set([force_unicode(v) for v in value]) for i, (option_value, option_label) in enumerate(chain(self.choices, choices)): # If an ID attribute was given, add a numeric index as a suffix, # so that the checkboxes don't all have the same ID attribute. if has_id: final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i)) label_for = u' for="%s"' % final_attrs['id'] else: label_for = '' choice_attrs = copy.copy(final_attrs) if option_value in self.choices_attrs: choice_attrs.update(self.choices_attrs[option_value]) cb = forms.CheckboxInput(choice_attrs, check_test=lambda value: value in str_values) option_value = force_unicode(option_value) rendered_cb = cb.render(name, option_value) option_label = conditional_escape(force_unicode(option_label)) output.append(u'<div><label%s class="checkbox inline">%s %s</label></div>' % (label_for, rendered_cb, option_label)) output.append(u'</div>') return mark_safe(u'\n'.join(output)) def id_for_label(self, id_): # See the comment for RadioSelect.id_for_label() if id_: id_ += '_0' return id_ class RadioFieldBootstrapRenderer(forms.widgets.RadioSelect.renderer): def render(self): """ Outputs a <ul> for this set of choice fields. If an id was given to the field, it is applied to the <ul> (each item in the list will get an id of `$id_$i`). """ id_ = self.attrs.get('id', None) start_tag = '<div id="%s" class="radio inline">'% id_ if id_ else '<div>' output = [start_tag] for widget in self: output.append(force_unicode(widget)) output.append('</div>') return mark_safe('\n'.join(output)) class RadioSelectBootstrap(forms.widgets.RadioSelect): renderer = RadioFieldBootstrapRenderer from sop_calcs.gardensandtrees import gardensandtrees from sop_calcs.treated_pets import treated_pets from sop_calcs.insect_repellent import insect_repellent from sop_calcs.lawnturfsop import lawnturfsop from sop_calcs.general_handler_sop import general_handler_sop from sop_calcs.paintsop import paintsop from sop_calcs.impregnated_materials import impregnated_materials from sop_calcs.outdoor_misting import outdoor_misting, outdoor_misting_handler from sop_calcs.indoor_envirnoments import indoor from sop_calcs.exposure_profile import RiskProfile class ResultsForm(forms.Form): title = "Assessment Background Information" def __init__(self,*args,**kwargs): self.input_data = kwargs.pop('_input_data',None) super(ResultsForm,self).__init__(*args,**kwargs) def inputs(self): return self.input_data def lifestage_displays(self): lifestages = {} lifestages['adult'] = "Adult (All)" lifestages['adult_general'] = "Adult (All)" lifestages['adult_female'] = "Adult Female" lifestages['adult_male'] = "Adult Male" lifestages['1_to_2'] = "1 < 2 year old" lifestages['3_to_6'] = "3 < 6 year old" lifestages['6_to_11'] = "6 < 11 year old" lifestages['11_to_16'] = "11 < 16 year old" return lifestages def results(self): try: s = json.loads(self.input_data) ss = "" RIs = defaultdict(lambda : defaultdict(list)) exposure_routes = set(s['0']['exposure_routes']) exposure_scenarios = set(s['0']['exposure_scenarios']) body_weights_adults_options = [80., 69., 86.] # kg bodyweight = {} bodyweight['adult'] = 80.#body_weights_adults_options[0] bodyweight['adult_general'] = 80. bodyweight['adult_female'] = 69. bodyweight['adult_male'] = 86. pop_selection = "gen" amended_RIs = {} for duration in s['0']['exposure_durations']: amended_RIs[duration] = {} for target in s['0']['target_population']: if target == 'adult_female': pop_selection = "adult_female" bodyweight['adult'] = bodyweight['adult_female'] elif target == 'adult_male': pop_selection = "adult_male" bodyweight['adult'] = bodyweight['adult_male'] else: pop_selection = "gen" bodyweight['adult'] = bodyweight['adult_general'] bodyweight['1_to_2'] = 11. bodyweight['3_to_6'] = 19. bodyweight['6_to_11'] = 32. bodyweight['11_to_16'] = 57. inhalation_rate = {} inhalation_rate['adult'] = 0.64 inhalation_rate['1_to_2'] = 0.33 inhalation_rate['3_to_6'] = 0.42 SA_BW_ratio = {'1_to_2':640., 'adult':280.} risk_profile = RiskProfile(exposure_routes) for duration in s['0']['exposure_durations']: ss += "<br>%s<br>" % duration POD = {} LOC = {} absorption = {} POD['dermal'] = s['1']['dermal_%s_%s_POD'%(duration,target)] LOC['dermal'] = s['1']['dermal_%s_%s_LOC'%(duration,target)] try: POD['dermal'] = s['1']['dermal_%s_%s_POD'%(duration,target)] LOC['dermal'] = s['1']['dermal_%s_%s_LOC'%(duration,target)] absorption['dermal'] = s['1']['dermal_absorption'] except: absorption['dermal'] = 1 try: POD['inhalation'] = s['1']['inhalation_%s_%s_POD'%(duration,target)] LOC['inhalation'] = s['1']['inhalation_%s_%s_LOC'%(duration,target)] absorption['inhalation'] = s['1']['inhalation_absorption'] except: absorption['inhalation'] = 1 try: POD['oral'] = s['1']['oral_%s_%s_POD'%(duration,target)] LOC['oral'] = s['1']['oral_%s_%s_LOC'%(duration,target)] except: pass try: POD['dietary'] = s['1']['dietary_POD'] LOC['dietary'] = s['1']['dietary_LOC'] except: pass if s['3'] != None and 'generalhandler' in exposure_scenarios: #generalhandler SOP = "General Handler" combining_dermal_inhalation = [] #application_rate[formulation][scenario][application_method][application_type] application_rate = defaultdict(lambda : defaultdict(lambda : defaultdict(dict))) for formulation in GeneralHandlerForm.application_rate_form_map: for scenario in GeneralHandlerForm.application_rate_form_map[formulation]: for application_method in GeneralHandlerForm.application_rate_form_map[formulation][scenario]: for application_type in GeneralHandlerForm.application_rate_form_map[formulation][scenario][application_method]: if GeneralHandlerForm.application_rate_form_map[formulation][scenario][application_method][application_type] in s['3']: application_rate[formulation][scenario][application_method][application_type] = s['3'][GeneralHandlerForm.application_rate_form_map[formulation][scenario][application_method][application_type]] else: application_rate[formulation][scenario][application_method][application_type] = 0 results = general_handler_sop(POD, LOC, bodyweight, absorption, application_rate) risk_profile.update(results, SOP, duration) if s['4'] != None and 'generalhandler' in exposure_scenarios and 'generalhandler' in exposure_scenarios: #misting - handler SOP = "General Handler" OASS_fraction_ai = s['4']['OASS_fraction_ai'] OASS_amount_of_product_in_can = s['4']['OASS_amount_of_product_in_can'] ORMS_drum_size = s['4']['ORMS_drum_size'] ORMS_dilution_rate = s['4']['ORMS_dilution_rate'] ORMS_fraction_ai = s['4']['ORMS_fraction_ai'] AB_drum_size = s['4']['AB_drum_size'] AB_dilution_rate = s['4']['AB_dilution_rate'] AB_fraction_ai = s['4']['AB_fraction_ai'] results = outdoor_misting_handler(POD, LOC, bodyweight, absorption['dermal'], absorption['inhalation'], OASS_fraction_ai, OASS_amount_of_product_in_can, ORMS_drum_size, ORMS_dilution_rate, ORMS_fraction_ai, AB_drum_size, AB_dilution_rate, AB_fraction_ai) risk_profile.update(results, SOP, duration) if s['5'] != None and 'treatedpet' in exposure_scenarios: #treatedpet SOP = "Treated Pets" ai_amounts = {} amount_applied_form_map = TreatedPetForm.amount_applied_form_map for animal in ['cat','dog']: ai_amounts[animal] = {} for size in ['small','medium','large']: ai_amounts[animal][size] = s['5'][TreatedPetForm.amount_applied_form_map[animal][size]]*s['5']['fraction_ai']*1000. results = treated_pets(POD, LOC, bodyweight, absorption['dermal'], ai_amounts) risk_profile.update(results, SOP, duration) if s['6'] != None and 'lawn' in exposure_scenarios: #lawn SOP = "Lawns and Turf" fraction_active_ingredient = s['6']['fraction_ai_in_pellets'] ttr = {'liquid':s['6']['liquid_ttr_conc'], 'solid':s['6']['solid_ttr_conc']} application_rate = {'liquid':s['6']['liquid_application_rate'],'solid':s['6']['solid_application_rate']} # lb ai / acre results = lawnturfsop(POD, LOC, bodyweight, absorption['dermal'], application_rate, ttr, fraction_active_ingredient) risk_profile.update(results, SOP, duration) if s['7'] != None and 'garden' in exposure_scenarios: #gardensandtrees SOP = "Gardens and Trees" dfr = {'liquid':s['7']['liquid_dfr_conc'], 'solid':s['7']['solid_dfr_conc']} application_rate = {'liquid':s['7']['liquid_application_rate'],'solid':s['7']['solid_application_rate']} # lb ai / acre results = gardensandtrees(POD, LOC, bodyweight, absorption['dermal'], application_rate, dfr) #return "Here1" risk_profile.update(results, SOP, duration) #return exposure_scenarios if s['8'] != None and 'insect' in exposure_scenarios: #insect SOP = "Insect Repellents" amount_ai = defaultdict(lambda : defaultdict(dict)) for sunscreen_status in ['without','with']: for formulation in InsectRepellentsForm.formulations: amount_ai[sunscreen_status][formulation] = s['8'][InsectRepellentsForm.amount_ai_formulations_form_map[sunscreen_status][formulation]] results = insect_repellent(POD, LOC, bodyweight, absorption['dermal'], SA_BW_ratio, amount_ai ) risk_profile.update(results, SOP, duration) if s['9'] != None and 'paint' in exposure_scenarios: #paint SOP = "Paint and Preservatives" surface_residue_concentration = s['9']['surface_residue_concentration'] fraction_of_body_exposed = PaintsAndPreservativesForm.DEFAULT_FRACTION_OF_BODY_EXPOSED#s['9']['fraction_of_body_exposed'] daily_material_to_skin_transfer_efficency = PaintsAndPreservativesForm.DEFAULT_DAILY_MATERIAL_TO_SKIN_TRANSFER_EFFICENCY#s['9']['daily_material_to_skin_transfer_efficency'] exposure_time = PaintsAndPreservativesForm.EXPOSURE_TIME[s['9']['indoor_or_outdoor']]#s['9']['exposure_time'] hand_to_mouth_event_freqency = PaintsAndPreservativesForm.HAND_TO_MOUTH_EVENTS_PER_HOUR[s['9']['indoor_or_outdoor']]#s['9']['hand_to_mouth_event_frequency'] results = paintsop(POD, LOC, bodyweight, absorption['dermal'], SA_BW_ratio, surface_residue_concentration, fraction_of_body_exposed, daily_material_to_skin_transfer_efficency, exposure_time, hand_to_mouth_event_freqency ) risk_profile.update(results, SOP, duration) if s['10'] != None and 'impregnated_materials' in exposure_scenarios: #impregnated_materials SOP = "Impregnated Materials" surface_residue_concentration = s['10']['surface_residue_concentration'] weight_fraction = s['10']['weight_fraction_of_active_ingredient'] material_type = s['10']['material_type'] if surface_residue_concentration is None or surface_residue_concentration == 0: surface_residue_concentration = weight_fraction*ImpregnatedMaterialsForm.MATERIAL_WEIGHT_TO_SURFACE_AREA_DENSITY[material_type] body_fraction_exposed_type = s['10']['body_fraction_exposed_type'] fraction_of_body_exposed = ImpregnatedMaterialsForm.BODY_FRACTION_EXPOSED[body_fraction_exposed_type]#s['10']['fraction_of_body_exposed'] protective_barrier_present = s['10']['protective_barrier_present'] protection_factor = ImpregnatedMaterialsForm.PROTECTION_FACTOR[protective_barrier_present] #HtM type_of_flooring = s['10']['type_of_flooring'] fraction_of_ai_transferred_to_hands = ImpregnatedMaterialsForm.FRACTION_AI_HAND_TRANSFER[type_of_flooring] hand_exposure_time = ImpregnatedMaterialsForm.FLOOR_EXPOSURE_TIME[type_of_flooring] daily_material_to_skin_transfer_efficency = ImpregnatedMaterialsForm.FRACTION_AI_HAND_TRANSFER[type_of_flooring] #daily_material_to_skin_transfer_efficency = ImpregnatedMaterialsForm.DEFAULT_DAILY_MATERIAL_TO_SKIN_TRANSFER_EFFICENCY indoor_or_outdoor = s['10']['indoor_or_outdoor'] object_exposure_time = ImpregnatedMaterialsForm.EXPOSURE_TIME[indoor_or_outdoor] hand_to_mouth_event_freqency = ImpregnatedMaterialsForm.HAND_TO_MOUTH_EVENTS_PER_HOUR[indoor_or_outdoor] #daily_material_to_skin_transfer_efficency = forms.FloatField(required=False,initial=0.14) #OtM FRACTION_AI_HAND_TRANSFER = {'':0., 'carpet':0.06,'hard':0.08} fraction_of_residue_on_object = ImpregnatedMaterialsForm.FRACTION_AI_HAND_TRANSFER[type_of_flooring] object_to_mouth_event_frequency = ImpregnatedMaterialsForm.OBJECT_TO_MOUTH_EVENTS_PER_HOUR[indoor_or_outdoor] results = impregnated_materials(POD, LOC, bodyweight, absorption['dermal'], SA_BW_ratio, surface_residue_concentration, fraction_of_body_exposed, daily_material_to_skin_transfer_efficency, protection_factor, fraction_of_ai_transferred_to_hands, hand_exposure_time, hand_to_mouth_event_freqency, fraction_of_residue_on_object, object_exposure_time, object_to_mouth_event_frequency) risk_profile.update(results, SOP, duration) if s['11'] != None and 'indoor' in exposure_scenarios: #indoor SOP = "Indoor" space_spray_fraction_ai = s['11']['space_spray_fraction_ai'] space_spray_amount_of_product = s['11']['space_spray_amount_of_product'] space_spray_restriction = s['11']['space_spray_restriction'] molecular_weight = s['11']['molecular_weight'] vapor_pressure = s['11']['vapor_pressure'] residues = {} residues['broadcast'] = s['11']['broadcast_residue'] residues['perimeter/spot/bedbug (coarse)'] = s['11']['coarse_residue'] residues['perimeter/spot/bedbug (pin stream)'] = s['11']['pin_stream_residue'] residues['cracks and crevices'] = s['11']['crack_and_crevice_residue'] residues['foggers'] = s['11']['foggers_residue'] residues['space sprays'] = s['11']['space_sprays_residue'] matress_residue = s['11']['matress_residue'] results = indoor(POD, LOC, bodyweight, absorption['dermal'], absorption['inhalation'], space_spray_fraction_ai, space_spray_amount_of_product, space_spray_restriction, molecular_weight, vapor_pressure,residues,matress_residue) risk_profile.update(results, SOP, duration) if s['12'] != None and 'misting' in exposure_scenarios: #misting SOP = "Misting" OASS_fraction_ai = s['12']['OASS_fraction_ai'] OASS_amount_of_product_in_can = s['12']['OASS_amount_of_product_in_can'] CCTM_amount_ai_in_product= s['12']['CCTM_amount_ai_in_product'] ORMS_application_rate= s['12']['ORMS_application_rate'] ORMS_dilution_rate= s['12']['ORMS_dilution_rate'] ORMS_fraction_ai= s['12']['ORMS_fraction_ai'] AB_application_rate= s['12']['AB_application_rate'] AB_dilution_rate = s['12']['AB_dilution_rate'] AB_fraction_ai = s['12']['AB_fraction_ai'] results = outdoor_misting(POD, LOC, bodyweight, absorption['dermal'], absorption['inhalation'], OASS_fraction_ai, OASS_amount_of_product_in_can, CCTM_amount_ai_in_product, ORMS_application_rate, ORMS_dilution_rate, ORMS_fraction_ai, AB_application_rate, AB_dilution_rate, AB_fraction_ai) risk_profile.update(results, SOP, duration) sorted_RIs = {} ri_id=0 for duration in risk_profile.results: sorted_RIs[duration] = {} for lifestage in risk_profile.results[duration]: lifestage_final = lifestage if pop_selection != "gen" and lifestage != 'adult': continue elif pop_selection != "gen": lifestage_final = pop_selection sorted_RIs[duration][lifestage_final] = risk_profile.results[duration][lifestage] sorted_RIs[duration][lifestage_final].sort() amended_RIs[duration][lifestage_final] = [] for l in sorted_RIs[duration][lifestage_final]: n = list(l) n.append(ri_id) ri_id+=1 amended_RIs[duration][lifestage_final].append(n) return amended_RIs except Exception as e: return e, str(e) class IngredientOverviewForm(forms.Form): calls = 0 title = "Assessment Background Information" active_ingredient = forms.CharField(required=False) #GardenAndTreesForm, InsectRellentsForm, PaintsAndPreservativesForm SCENARIOS = [('generalhandler','Handler/Applicator (all scenarios)'),('insect','Insect Repellents'),('treatedpet','Treated Pets'),('lawn','Lawns/Turf'),('garden','Gardens And Trees'),('paint','Paints And Preservatives'),('impregnated_materials','Impregnated Materials'), ('indoor','Indoor'),('misting','Outdoor Misting ')] exposure_scenarios = forms.MultipleChoiceField(choices=SCENARIOS, widget=CheckboxSelectMultipleBootstrap()) ROUTES = [('oral', 'Incidental Oral'), ('dermal', 'Dermal'), ('inhalation', 'Inhalation') , ('dietary', 'Granule/Pellet Ingestion')] exposure_routes = forms.MultipleChoiceField(choices=ROUTES, widget=CheckboxSelectMultipleBootstrap(), initial = ['oral','dermal','inhalation','dietary']) DURATIONS = [('short','Short-Term'),('intermediate','Intermediate-Term'),('long','Long-Term')] exposure_durations = forms.MultipleChoiceField(choices=DURATIONS , widget=CheckboxSelectMultipleBootstrap()) TARGET_POP_CHOICES = [('gen','General Population (Adults + Children)'),('adult_female','Adult (Female Only)'),('adult_male','Adult (Male Only)')] TARGET_POP_CHOICES_DICT = {} for choice in TARGET_POP_CHOICES: TARGET_POP_CHOICES_DICT[choice[0]] = choice[1] target_population = forms.MultipleChoiceField(choices=TARGET_POP_CHOICES , widget=CheckboxSelectMultipleBootstrap(),initial=['gen']) def __init__(self,*args,**kwargs): super(IngredientOverviewForm,self).__init__(*args,**kwargs) IngredientOverviewForm.calls += 1 def clean(self): cleaned_data = super(IngredientOverviewForm, self).clean() exposure_scenarios = cleaned_data.get("exposure_scenarios") exposure_routes = cleaned_data.get("exposure_routes") if exposure_routes and exposure_scenarios: if 'dermal' in exposure_routes: return cleaned_data if 'oral' in exposure_routes: if True in [scenario in exposure_scenarios for scenario in ['lawn','insect','paint','treatedpet','indoor','impregnated_materials', 'misting']]: return cleaned_data if 'inhalation' in exposure_routes: if True in [scenario in exposure_scenarios for scenario in ['indoor','misting','generalhandler']]: return cleaned_data if 'dietary' in exposure_routes: if True in [scenario in exposure_scenarios for scenario in ['lawn']]: return cleaned_data raise forms.ValidationError("No combinations of these routes and scenarios exist.") return cleaned_data class ToxForm(forms.Form): calls = 0 title = "Toxicological Information" POD_STUDY_CHOICES = [('',''),('route-specific','Route-specific'),('oral','Oral')] ABS_STUDY_CHOICES = [('',''), ('human-study', 'Human Study'), ('animal-study', 'Animal Study'), ('POD or LOAEL/NOAEL comparison','Estimated by POD or LOAEL/NOAEL comparison'),('in vitro study','In vitro study'),('other','Other')] TARGET_POP_CHOICES = [('gen','General Population (Adults + Children)'),('adult_female','Adult (Female Only)'),('adult_male','Adult (Male Only)')] TARGET_POP_CHOICES_DICT = {} for choice in TARGET_POP_CHOICES: TARGET_POP_CHOICES_DICT[choice[0]] = choice[1] def __init__(self,*args,**kwargs): data = kwargs.pop('data_from_step_1',None) self.data_from_step_1 = data super(ToxForm,self).__init__(*args, **kwargs) self.data_from_step_1 = self.initial['data_from_step_1'] ToxForm.calls += 1 logger.error("ToxForm __init__ calls: %s "%ToxForm.calls) if self.data_from_step_1: if 'dermal' in self.data_from_step_1['exposure_routes']: self.fields['dermal_absorption'] = forms.FloatField(required=False, initial=1, label="Dermal Absorption (0-1)",min_value=0., max_value=1.) self.fields['dermal_absorption_study'] = forms.ChoiceField(choices=ToxForm.ABS_STUDY_CHOICES,required=False,label="Dermal Absorption Study") self.fields['dermal_POD_study'] = forms.ChoiceField(choices=ToxForm.POD_STUDY_CHOICES,required=False,label="Dermal POD Study" ) for duration in self.data_from_step_1['exposure_durations']: if 'dermal' in self.data_from_step_1['exposure_routes']: for target in self.data_from_step_1['target_population']: self.fields['dermal_%s_%s_POD'%(duration,target)] = forms.FloatField(required=False, min_value=0.,label="%s Term Dermal POD (mg/kg/day) (%s)"%(duration.capitalize(), ToxForm.TARGET_POP_CHOICES_DICT[target]) ) self.fields['dermal_%s_%s_LOC'%(duration,target)] = forms.FloatField(required=False, initial=100, min_value=0.,label="%s Term Dermal LOC (%s)"%(duration.capitalize(), ToxForm.TARGET_POP_CHOICES_DICT[target]) ) if True in [scenario in self.data_from_step_1['exposure_scenarios'] for scenario in ['lawn','insect','paint','treatedpet','indoor','impregnated_materials','misting']] and 'oral' in self.data_from_step_1['exposure_routes']: for target in self.data_from_step_1['target_population']: self.fields['oral_%s_%s_POD'%(duration,target)] = forms.FloatField(required=False, min_value=0.,label="%s Term Oral POD (mg/kg/day) (%s)"%(duration.capitalize(), ToxForm.TARGET_POP_CHOICES_DICT[target])) self.fields['oral_%s_%s_LOC'%(duration,target)] = forms.FloatField(required=False, initial=100, min_value=0., label="%s Term Oral LOC (%s)"%(duration.capitalize(), ToxForm.TARGET_POP_CHOICES_DICT[target])) if True in [scenario in self.data_from_step_1['exposure_scenarios'] for scenario in ['indoor','misting','generalhandler']] and 'inhalation' in self.data_from_step_1['exposure_routes']: self.fields['inhalation_absorption'] = forms.FloatField(required=False, initial=1, label="Inhalation Absorption (0-1)",min_value=0., max_value=1.) self.fields['inhalation_absorption_study'] = forms.ChoiceField(choices=ToxForm.ABS_STUDY_CHOICES,required=False,label="Inhalation Absorption Study") self.fields['inhalation_POD_study'] = forms.ChoiceField(choices=ToxForm.POD_STUDY_CHOICES,required=False, label="Inhalation POD Study") for duration in self.data_from_step_1['exposure_durations']: if True in [scenario in self.data_from_step_1['exposure_scenarios'] for scenario in ['indoor','misting','generalhandler']] and 'inhalation' in self.data_from_step_1['exposure_routes']: for target in self.data_from_step_1['target_population']: self.fields['inhalation_%s_%s_POD'%(duration,target)] = forms.FloatField(required=False, min_value=0.,label="%s Term Inhalation POD (mg/kg/day) (%s)"%(duration.capitalize(), ToxForm.TARGET_POP_CHOICES_DICT[target])) self.fields['inhalation_%s_%s_LOC'%(duration,target)] = forms.FloatField(required=False, initial=100, min_value=0.,label="%s Term Inhalation LOC (%s)"%(duration.capitalize(), ToxForm.TARGET_POP_CHOICES_DICT[target])) if 'lawn' in self.data_from_step_1['exposure_scenarios'] and 'dietary' in self.data_from_step_1['exposure_routes']: if 'gen' in self.data_from_step_1['target_population']: self.fields['dietary_POD'] = forms.FloatField(required=False, min_value=0.,label="Dietary POD (mg/kg/day) (Children)") self.fields['dietary_LOC'] = forms.FloatField(required=False, initial=100,min_value=0., label="Dietary LOC (Children)") #assert(self.data_from_step_1, Exception(self.data_from_step_1)) #raise Exception(self.data_from_step_1) def clean(self, *args, **kwargs): cleaned_data = super(ToxForm, self).clean() for route in self.data_from_step_1['exposure_routes']: if '%s_absorption'%(route) in self.fields: absorption = cleaned_data.get('%s_absorption'%(route)) pod_study = cleaned_data.get('%s_POD_study'%(route)) if pod_study == 'route-specific' and absorption != 1: msg = u"Absorption must be 1 for route specific POD studies." self._errors['%s_absorption'%(route)] = self.error_class([msg]) self._errors['%s_POD_study'%(route)] = self.error_class([msg]) del cleaned_data['%s_POD_study'%(route)] if '%s_absorption'%(route) in cleaned_data: del cleaned_data['%s_absorption'%(route)] # Always return the full collection of cleaned data. return cleaned_data class GeneralHandlerForm(forms.Form): title = "General Handler Data Entry Form" application_rate = defaultdict(lambda : defaultdict(lambda : defaultdict(dict))) application_rate_units = defaultdict(lambda : defaultdict(lambda : defaultdict(dict))) application_rate_form_map = defaultdict(lambda : defaultdict(lambda : defaultdict(dict))) application_rate['Dust/Powder']['Indoor Environment']['Plunger Duster']['Broadcast; Perimeter/Spot/ Bedbug (course application)'] = 0 application_rate['Dust/Powder']['Gardens / Trees']['Plunger Duster'][''] = 0 application_rate['Dust/Powder']['Indoor Environment']['Bulb duster']['Perimeter/Spot/Bedbug; Crack and Crevice'] = 0 application_rate['Dust/Powder']['Gardens / Trees']['Bulb duster'][''] = 0 application_rate['Dust/Powder']['Indoor Environment']['Electric/power duster']['Broadcast; Perimeter/Spot/ Bedbug (course application)'] = 0 application_rate['Dust/Powder']['Gardens / Trees']['Electric/power duster'][''] = 0 application_rate['Dust/Powder']['Indoor Environment']['Hand crank duster']['Broadcast; Perimeter/Spot/ Bedbug (course application)'] = 0 application_rate['Dust/Powder']['Gardens / Trees']['Hand crank duster'][''] = 0 application_rate['Dust/Powder']['Indoor Environment']['Shaker can']['Broadcast'] = 0 application_rate['Dust/Powder']['Indoor Environment']['Shaker can']['Broadcast; Perimeter/Spot/ Bedbug (course application)'] = 0 application_rate['Dust/Powder']['Gardens / Trees']['Shaker can']['can'] = 0 application_rate['Dust/Powder']['Gardens / Trees']['Shaker can']['ft2'] = 0 application_rate['Liquid concentrate']['Indoor Environment']['Manually-pressurized handwand (w/ or w/o pin stream nozzle)']['Broadcast, Perimeter/Spot/ Bedbug (course application); Perimeter /Spot/ Bedbug (pinstream application); Crack and Crevice'] = 0 application_rate['Liquid concentrate']['Gardens / Trees']['Manually-pressurized handwand']['ft2'] = 0 application_rate['Liquid concentrate']['Gardens / Trees']['Manually-pressurized handwand']['gallons'] = 0 application_rate['Liquid concentrate']['Gardens / Trees']['Hose-end Sprayer']['ft2'] = 0 application_rate['Liquid concentrate']['Gardens / Trees']['Hose-end Sprayer']['gallons'] = 0 application_rate['Liquid concentrate']['Lawns / Turf']['Hose-end Sprayer'][''] = 0 application_rate['Liquid concentrate']['Lawns / Turf']['Manually-pressurized handwand'][''] = 0 application_rate['Liquid concentrate']['Gardens / Trees']['Backpack']['ft2'] = 0 application_rate['Liquid concentrate']['Gardens / Trees']['Backpack']['gallons'] = 0 application_rate['Liquid concentrate']['Gardens / Trees']['Sprinkler can']['ft2'] = 0 application_rate['Liquid concentrate']['Gardens / Trees']['Sprinkler can']['gallons'] = 0 application_rate['Liquid concentrate']['Lawns / Turf']['Sprinkler can'][''] = 0 application_rate['Ready-to-use']['Indoor Environment']['Aerosol can']['Broadcast Surface Spray'] = 0 application_rate['Ready-to-use']['Indoor Environment']['Aerosol can']['Perimeter/ Spot/ Bedbug (course application)'] = 0 application_rate['Ready-to-use']['Indoor Environment']['Aerosol can with pin stream nozzle']['Perimeter/ Spot/ Bedbug (pin stream application); Crack and Crevice'] = 0 application_rate['Ready-to-use']['Indoor Environment']['Aerosol can']['Space spray'] = 0 application_rate['Ready-to-use']['Gardens / Trees']['Aerosol can'][''] = 0 application_rate['Ready-to-use']['Lawns / Turf']['Aerosol can'][''] = 0 application_rate['Ready-to-use']['Indoor Environment']['Trigger-spray bottle']['Broadcast'] = 0 application_rate['Ready-to-use']['Indoor Environment']['Trigger-spray bottle']['Perimeter/ Spot/ Bedbug (course application)'] = 0 application_rate['Ready-to-use']['Insect Repellent']['Aerosol can'][''] = 0 application_rate['Ready-to-use']['Insect Repellent']['Trigger-spray bottle'][''] = 0 application_rate['Ready-to-use']['Gardens / Trees']['Trigger-spray bottle'][''] = 0 application_rate['Ready-to-use']['Lawns / Turf']['Trigger-spray bottle'][''] = 0 application_rate['Ready-to-use']['Indoor Environment']['Bait (granular, hand dispersal)'][''] = 0 application_rate['Ready-to-use']['Gardens / Trees']['Hose-end Sprayer']['ft2'] = 0 application_rate['Ready-to-use']['Gardens / Trees']['Hose-end Sprayer']['gallons'] = 0 application_rate['Ready-to-use']['Lawns / Turf']['Hose-end Sprayer'][''] = 0 application_rate['Wettable powders']['Indoor Environment']['Manually-pressurized handwand (w/ or w/o pin stream nozzle)']['Broadcast, Perimeter/Spot/ Bedbug (course application); Perimeter /Spot/ Bedbug (pinstream application); Crack and Crevice'] = 0 application_rate['Liquid concentrate']['Lawns / Turf']['Backpack'][''] = 0 application_rate['Wettable powders']['Gardens / Trees']['Manually-pressurized handwand']['ft2'] = 0 application_rate['Wettable powders']['Gardens / Trees']['Manually-pressurized handwand']['gallons'] = 0 application_rate['Wettable powders']['Gardens / Trees']['Hose-end Sprayer']['ft2'] = 0 application_rate['Wettable powders']['Gardens / Trees']['Hose-end Sprayer']['gallons'] = 0 application_rate['Wettable powders']['Lawns / Turf']['Hose-end Sprayer'][''] = 0 application_rate['Wettable powders']['Lawns / Turf']['Manually-pressurized handwand'][''] = 0 application_rate['Wettable powders']['Gardens / Trees']['Backpack']['ft2'] = 0 application_rate['Wettable powders']['Gardens / Trees']['Backpack']['gallons'] = 0 application_rate['Wettable powders']['Gardens / Trees']['Sprinkler can']['ft2'] = 0 application_rate['Wettable powders']['Gardens / Trees']['Sprinkler can']['gallons'] = 0 application_rate['Wettable powders']['Lawns / Turf']['Sprinkler can'][''] = 0 application_rate['Wettable powders in water-soluble packaging']['Indoor Environment']['Manually-pressurized handwand (w/ or w/o pin stream nozzle)']['Broadcast, Perimeter/Spot/ Bedbug (course application); Perimeter /Spot/ Bedbug (pinstream application); Crack and Crevice'] = 0 application_rate['Wettable powders']['Lawns / Turf']['Backpack'][''] = 0 application_rate['Wettable powders in water-soluble packaging']['Gardens / Trees']['Manually-pressurized handwand']['ft2'] = 0 application_rate['Wettable powders in water-soluble packaging']['Gardens / Trees']['Manually-pressurized handwand']['gallons'] = 0 application_rate['Wettable powders in water-soluble packaging']['Gardens / Trees']['Hose-end Sprayer']['ft2'] = 0 application_rate['Wettable powders in water-soluble packaging']['Gardens / Trees']['Hose-end Sprayer']['gallons'] = 0 application_rate['Wettable powders in water-soluble packaging']['Lawns / Turf']['Hose-end Sprayer'][''] = 0 application_rate['Wettable powders in water-soluble packaging']['Lawns / Turf']['Manually-pressurized handwand'][''] = 0 application_rate['Wettable powders in water-soluble packaging']['Lawns / Turf']['Backpack'][''] = 0 application_rate['Wettable powders in water-soluble packaging']['Gardens / Trees']['Sprinkler can']['ft2'] = 0 application_rate['Wettable powders in water-soluble packaging']['Gardens / Trees']['Sprinkler can']['gallons'] = 0 application_rate['Wettable powders in water-soluble packaging']['Lawns / Turf']['Sprinkler can'][''] = 0 application_rate['Wettable powders in water-soluble packaging']['Gardens / Trees']['Backpack'][''] = 0 application_rate['Water-disersible Granule / Dry Flowable']['Lawns / Turf']['Manually-pressurized handwand'][''] = 0 application_rate['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Hose-end Sprayer']['ft2'] = 0 application_rate['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Hose-end Sprayer']['gallons'] = 0 application_rate['Water-disersible Granule / Dry Flowable']['Lawns / Turf']['Hose-end Sprayer'][''] = 0 application_rate['Wettable powders in water-soluble packaging']['Gardens / Trees']['Backpack'][''] = 0 application_rate['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Manually-pressurized handwand']['ft2'] = 0 application_rate['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Manually-pressurized handwand']['gallons'] = 0 application_rate['Water-disersible Granule / Dry Flowable']['Lawns / Turf']['Backpack'][''] = 0 application_rate['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Sprinkler can']['ft2'] = 0 application_rate['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Sprinkler can']['gallons'] = 0 application_rate['Water-disersible Granule / Dry Flowable']['Lawns / Turf']['Sprinkler can'][''] = 0 application_rate['Granule']['Gardens / Trees']['Push-type rotary spreader'][''] = 0 application_rate['Granule']['Lawns / Turf']['Push-type rotary spreader'][''] = 0 application_rate['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Backpack']['ft2'] = 0 application_rate['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Backpack']['gallons'] = 0 application_rate['Granule']['Lawns / Turf']['Belly grinder'][''] = 0 application_rate['Granule']['Gardens / Trees']['Spoon'][''] = 0 application_rate['Granule']['Lawns / Turf']['Spoon'][''] = 0 application_rate['Granule']['Gardens / Trees']['Cup'][''] = 0 application_rate['Granule']['Lawns / Turf']['Cup'][''] = 0 application_rate['Granule']['Gardens / Trees']['Hand dispersal'][''] = 0 application_rate['Granule']['Lawns / Turf']['Hand dispersal'][''] = 0 application_rate['Granule']['Gardens / Trees']['Shaker can']['can'] = 0 application_rate['Granule']['Gardens / Trees']['Shaker can']['ft2'] = 0 application_rate['Granule']['Lawns / Turf']['Shaker can'][''] = 0 application_rate['Microencapsulated']['Gardens / Trees']['Manually-pressurized handwand']['ft2'] = 0 application_rate['Microencapsulated']['Gardens / Trees']['Manually-pressurized handwand']['gallons'] = 0 application_rate['Microencapsulated']['Gardens / Trees']['Hose-end Sprayer']['ft2'] = 0 application_rate['Microencapsulated']['Gardens / Trees']['Hose-end Sprayer']['gallons'] = 0 application_rate['Microencapsulated']['Lawns / Turf']['Hose-end Sprayer'][''] = 0 application_rate['Microencapsulated']['Lawns / Turf']['Manually-pressurized handwand'][''] = 0 application_rate['Microencapsulated']['Gardens / Trees']['Backpack']['ft2'] = 0 application_rate['Microencapsulated']['Gardens / Trees']['Backpack']['gallons'] = 0 application_rate['Microencapsulated']['Lawns / Turf']['Backpack'][''] = 0 application_rate['Microencapsulated']['Gardens / Trees']['Sprinkler can']['ft2'] = 0 application_rate['Microencapsulated']['Gardens / Trees']['Sprinkler can']['gallons'] = 0 application_rate['Microencapsulated']['Lawns / Turf']['Sprinkler can'][''] = 0 application_rate['Ready-to-use']['Paints / Preservatives']['Aerosol can'][''] = 0 application_rate['Paints / Preservatives/ Stains']['Paints / Preservatives']['Airless Sprayer'][''] = 0 application_rate['Paints / Preservatives/ Stains']['Paints / Preservatives']['Brush'][''] = 0 application_rate['Paints / Preservatives/ Stains']['Paints / Preservatives']['Manually-pressurized handwand'][''] = 0 application_rate['Paints / Preservatives/ Stains']['Paints / Preservatives']['Roller'][''] = 0 application_rate['Liquid concentrate']['Treated Pets']['Dip'][''] = 0 application_rate['Liquid concentrate']['Treated Pets']['Sponge'][''] = 0 application_rate['Ready-to-use']['Treated Pets']['Trigger-spray bottle'][''] = 0 application_rate['Ready-to-use']['Treated Pets']['Aerosol can'][''] = 0 application_rate['Ready-to-use']['Treated Pets']['Shampoo'][''] = 0 application_rate['Ready-to-use']['Treated Pets']['Spot-on'][''] = 0 application_rate['Ready-to-use']['Treated Pets']['Collar'][''] = 0 application_rate['Dust/Powder']['Treated Pets']['Shaker Can'][''] = 0 application_rate_units['Dust/Powder']['Indoor Environment']['Plunger Duster']['Broadcast; Perimeter/Spot/ Bedbug (course application)'] = 'lb ai/lb dust' application_rate_units['Dust/Powder']['Gardens / Trees']['Plunger Duster'][''] = 'lb ai/ft2' application_rate_units['Dust/Powder']['Indoor Environment']['Bulb duster']['Perimeter/Spot/Bedbug; Crack and Crevice'] = 'lb ai/lb dust' application_rate_units['Dust/Powder']['Gardens / Trees']['Bulb duster'][''] = 'lb ai/ft2' application_rate_units['Dust/Powder']['Indoor Environment']['Electric/power duster']['Broadcast; Perimeter/Spot/ Bedbug (course application)'] = 'lb ai/lb dust' application_rate_units['Dust/Powder']['Gardens / Trees']['Electric/power duster'][''] = 'lb ai/ft2' application_rate_units['Dust/Powder']['Indoor Environment']['Hand crank duster']['Broadcast; Perimeter/Spot/ Bedbug (course application)'] = 'lb ai/lb dust' application_rate_units['Dust/Powder']['Gardens / Trees']['Hand crank duster'][''] = 'lb ai/ft2' application_rate_units['Dust/Powder']['Indoor Environment']['Shaker can']['Broadcast'] = 'lb ai/can' application_rate_units['Dust/Powder']['Indoor Environment']['Shaker can']['Broadcast; Perimeter/Spot/ Bedbug (course application)'] = 'lb ai/can' application_rate_units['Dust/Powder']['Gardens / Trees']['Shaker can']['can'] = 'lb ai/can' application_rate_units['Dust/Powder']['Gardens / Trees']['Shaker can']['ft2'] = 'lb ai/ft2' application_rate_units['Liquid concentrate']['Indoor Environment']['Manually-pressurized handwand (w/ or w/o pin stream nozzle)']['Broadcast, Perimeter/Spot/ Bedbug (course application); Perimeter /Spot/ Bedbug (pinstream application); Crack and Crevice'] = 'lb ai/gallon' application_rate_units['Liquid concentrate']['Gardens / Trees']['Manually-pressurized handwand']['gallons'] = 'lb ai/gallon' application_rate_units['Liquid concentrate']['Gardens / Trees']['Manually-pressurized handwand']['ft2'] = 'lb ai/ft2' application_rate_units['Liquid concentrate']['Gardens / Trees']['Hose-end Sprayer']['ft2'] = 'lb ai/ft2' application_rate_units['Liquid concentrate']['Gardens / Trees']['Hose-end Sprayer']['gallons'] = 'lb ai/gallon' application_rate_units['Liquid concentrate']['Lawns / Turf']['Hose-end Sprayer'][''] = 'lb ai/acre' application_rate_units['Liquid concentrate']['Lawns / Turf']['Manually-pressurized handwand'][''] = 'lb ai/gallon' application_rate_units['Liquid concentrate']['Gardens / Trees']['Backpack']['ft2'] = 'lb ai/ft2' application_rate_units['Liquid concentrate']['Gardens / Trees']['Backpack']['gallons'] = 'lb ai/gallon' application_rate_units['Liquid concentrate']['Gardens / Trees']['Sprinkler can']['ft2'] = 'lb ai/ft2' application_rate_units['Liquid concentrate']['Gardens / Trees']['Sprinkler can']['gallons'] = 'lb ai/gallon' application_rate_units['Liquid concentrate']['Lawns / Turf']['Sprinkler can'][''] = 'lb ai/ft2' application_rate_units['Ready-to-use']['Indoor Environment']['Aerosol can']['Broadcast Surface Spray'] = 'lb ai/16-oz can' application_rate_units['Ready-to-use']['Indoor Environment']['Aerosol can']['Perimeter/ Spot/ Bedbug (course application)'] = 'lb ai/16-oz can' application_rate_units['Ready-to-use']['Indoor Environment']['Aerosol can with pin stream nozzle']['Perimeter/ Spot/ Bedbug (pin stream application); Crack and Crevice'] = 'lb ai/16-oz can' application_rate_units['Ready-to-use']['Indoor Environment']['Aerosol can']['Space spray'] = 'lb ai/16-oz can' application_rate_units['Ready-to-use']['Insect Repellent']['Aerosol can'][''] = 'lb ai/can' application_rate_units['Ready-to-use']['Insect Repellent']['Trigger-spray bottle'][''] = 'lb ai/bottle' application_rate_units['Ready-to-use']['Gardens / Trees']['Aerosol can'][''] = 'lb ai/can' application_rate_units['Ready-to-use']['Lawns / Turf']['Aerosol can'][''] = 'lb ai/can' application_rate_units['Ready-to-use']['Indoor Environment']['Trigger-spray bottle']['Broadcast'] = 'lb ai/bottle' application_rate_units['Ready-to-use']['Indoor Environment']['Trigger-spray bottle']['Perimeter/ Spot/ Bedbug (course application)'] = 'lb ai/bottle' application_rate_units['Ready-to-use']['Gardens / Trees']['Trigger-spray bottle'][''] = 'lb ai/bottle' application_rate_units['Ready-to-use']['Lawns / Turf']['Trigger-spray bottle'][''] = 'lb ai/bottle' application_rate_units['Ready-to-use']['Indoor Environment']['Bait (granular, hand dispersal)'][''] = 'lb ai/ft2' application_rate_units['Ready-to-use']['Gardens / Trees']['Hose-end Sprayer']['ft2'] = 'lb ai/ft2' application_rate_units['Ready-to-use']['Gardens / Trees']['Hose-end Sprayer']['gallons'] = 'lb ai/gallon' application_rate_units['Ready-to-use']['Lawns / Turf']['Hose-end Sprayer'][''] = 'lb ai/acre' application_rate_units['Wettable powders']['Indoor Environment']['Manually-pressurized handwand (w/ or w/o pin stream nozzle)']['Broadcast, Perimeter/Spot/ Bedbug (course application); Perimeter /Spot/ Bedbug (pinstream application); Crack and Crevice'] = 'lb ai/gallon' application_rate_units['Liquid concentrate']['Lawns / Turf']['Backpack'][''] = 'lb ai/gallon' application_rate_units['Wettable powders']['Gardens / Trees']['Manually-pressurized handwand']['ft2'] = 'lb ai/ft2' application_rate_units['Wettable powders']['Gardens / Trees']['Manually-pressurized handwand']['gallons'] = 'lb ai/gallon' application_rate_units['Wettable powders']['Gardens / Trees']['Hose-end Sprayer']['ft2'] = 'lb ai/ft2' application_rate_units['Wettable powders']['Gardens / Trees']['Hose-end Sprayer']['gallons'] = 'lb ai/gallon' application_rate_units['Wettable powders']['Lawns / Turf']['Hose-end Sprayer'][''] = 'lb ai/acre' application_rate_units['Wettable powders']['Lawns / Turf']['Manually-pressurized handwand'][''] = 'lb ai/gallon' application_rate_units['Wettable powders']['Gardens / Trees']['Backpack']['ft2'] = 'lb ai/ft2' application_rate_units['Wettable powders']['Gardens / Trees']['Backpack']['gallons'] = 'lb ai/gallon' application_rate_units['Wettable powders']['Gardens / Trees']['Sprinkler can']['ft2'] = 'lb ai/ft2' application_rate_units['Wettable powders']['Gardens / Trees']['Sprinkler can']['gallons'] = 'lb ai/gallon' application_rate_units['Wettable powders']['Lawns / Turf']['Sprinkler can'][''] = 'lb ai/ft2' application_rate_units['Wettable powders in water-soluble packaging']['Indoor Environment']['Manually-pressurized handwand (w/ or w/o pin stream nozzle)']['Broadcast, Perimeter/Spot/ Bedbug (course application); Perimeter /Spot/ Bedbug (pinstream application); Crack and Crevice'] = 'lb ai/gallon' application_rate_units['Wettable powders']['Lawns / Turf']['Backpack'][''] = 'lb ai/gallon' application_rate_units['Wettable powders in water-soluble packaging']['Gardens / Trees']['Manually-pressurized handwand']['ft2'] = 'lb ai/ft2' application_rate_units['Wettable powders in water-soluble packaging']['Gardens / Trees']['Manually-pressurized handwand']['gallons'] = 'lb ai/gallon' application_rate_units['Wettable powders in water-soluble packaging']['Gardens / Trees']['Hose-end Sprayer']['ft2'] = 'lb ai/ft2' application_rate_units['Wettable powders in water-soluble packaging']['Gardens / Trees']['Hose-end Sprayer']['gallons'] = 'lb ai/gallon' application_rate_units['Wettable powders in water-soluble packaging']['Lawns / Turf']['Hose-end Sprayer'][''] = 'lb ai/acre' application_rate_units['Wettable powders in water-soluble packaging']['Lawns / Turf']['Manually-pressurized handwand'][''] = 'lb ai/gallon' application_rate_units['Wettable powders in water-soluble packaging']['Lawns / Turf']['Backpack'][''] = 'lb ai/gallon' application_rate_units['Wettable powders in water-soluble packaging']['Gardens / Trees']['Sprinkler can']['ft2'] = 'lb ai/ft2' application_rate_units['Wettable powders in water-soluble packaging']['Gardens / Trees']['Sprinkler can']['gallons'] = 'lb ai/gallon' application_rate_units['Wettable powders in water-soluble packaging']['Lawns / Turf']['Sprinkler can'][''] = 'lb ai/ft2' application_rate_units['Wettable powders in water-soluble packaging']['Gardens / Trees']['Backpack'][''] = 'lb ai/ft2' application_rate_units['Water-disersible Granule / Dry Flowable']['Lawns / Turf']['Manually-pressurized handwand'][''] = 'lb ai/gallon' application_rate_units['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Hose-end Sprayer']['ft2'] = 'lb ai/ft2' application_rate_units['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Hose-end Sprayer']['gallons'] = 'lb ai/gallon' application_rate_units['Water-disersible Granule / Dry Flowable']['Lawns / Turf']['Hose-end Sprayer'][''] = 'lb ai/acre' application_rate_units['Wettable powders in water-soluble packaging']['Gardens / Trees']['Backpack'][''] = 'lb ai/gallon' application_rate_units['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Manually-pressurized handwand']['ft2'] = 'lb ai/ft2' application_rate_units['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Manually-pressurized handwand']['gallons'] = 'lb ai/gallon' application_rate_units['Water-disersible Granule / Dry Flowable']['Lawns / Turf']['Backpack'][''] = 'lb ai/gallon' application_rate_units['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Sprinkler can']['ft2'] = 'lb ai/ft2' application_rate_units['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Sprinkler can']['gallons'] = 'lb ai/gallon' application_rate_units['Water-disersible Granule / Dry Flowable']['Lawns / Turf']['Sprinkler can'][''] = 'lb ai/ft2' application_rate_units['Granule']['Gardens / Trees']['Push-type rotary spreader'][''] = 'lb ai/ft2' application_rate_units['Granule']['Lawns / Turf']['Push-type rotary spreader'][''] = 'lb ai/acre' application_rate_units['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Backpack']['ft2'] = 'lb ai/ft2' application_rate_units['Water-disersible Granule / Dry Flowable']['Gardens / Trees']['Backpack']['gallons'] = 'lb ai/gallon' application_rate_units['Granule']['Lawns / Turf']['Belly grinder'][''] = 'lb ai/ft2' application_rate_units['Granule']['Gardens / Trees']['Spoon'][''] = 'lb ai/ft2' application_rate_units['Granule']['Lawns / Turf']['Spoon'][''] = 'lb ai/ft2' application_rate_units['Granule']['Gardens / Trees']['Cup'][''] = 'lb ai/ft2' application_rate_units['Granule']['Lawns / Turf']['Cup'][''] = 'lb ai/ft2' application_rate_units['Granule']['Gardens / Trees']['Hand dispersal'][''] = 'lb ai/ft2' application_rate_units['Granule']['Lawns / Turf']['Hand dispersal'][''] = 'lb ai/ft2' application_rate_units['Granule']['Gardens / Trees']['Shaker can']['can'] = 'lb ai/can' application_rate_units['Granule']['Gardens / Trees']['Shaker can']['ft2'] = 'lb ai/ft2' application_rate_units['Granule']['Lawns / Turf']['Shaker can'][''] = 'lb ai/ft2' application_rate_units['Microencapsulated']['Gardens / Trees']['Manually-pressurized handwand']['ft2'] = 'lb ai/ft2' application_rate_units['Microencapsulated']['Gardens / Trees']['Manually-pressurized handwand']['gallons'] = 'lb ai/gallon' application_rate_units['Microencapsulated']['Gardens / Trees']['Hose-end Sprayer']['ft2'] = 'lb ai/ft2' application_rate_units['Microencapsulated']['Gardens / Trees']['Hose-end Sprayer']['gallons'] = 'lb ai/gallon' application_rate_units['Microencapsulated']['Lawns / Turf']['Hose-end Sprayer'][''] = 'lb ai/acre' application_rate_units['Microencapsulated']['Lawns / Turf']['Manually-pressurized handwand'][''] = 'lb ai/gallon' application_rate_units['Microencapsulated']['Gardens / Trees']['Backpack']['ft2'] = 'lb ai/ft2' application_rate_units['Microencapsulated']['Gardens / Trees']['Backpack']['gallons'] = 'lb ai/gallon' application_rate_units['Microencapsulated']['Lawns / Turf']['Backpack'][''] = 'lb ai/gallon' application_rate_units['Microencapsulated']['Gardens / Trees']['Sprinkler can']['ft2'] = 'lb ai/ft2' application_rate_units['Microencapsulated']['Gardens / Trees']['Sprinkler can']['gallons'] = 'lb ai/gallon' application_rate_units['Microencapsulated']['Lawns / Turf']['Sprinkler can'][''] = 'lb ai/ft2' application_rate_units['Ready-to-use']['Paints / Preservatives']['Aerosol can'][''] = 'lb ai/12-oz can' application_rate_units['Paints / Preservatives/ Stains']['Paints / Preservatives']['Airless Sprayer'][''] = 'lb ai/1-gal can' application_rate_units['Paints / Preservatives/ Stains']['Paints / Preservatives']['Brush'][''] = 'lb ai/1-gal can' application_rate_units['Paints / Preservatives/ Stains']['Paints / Preservatives']['Manually-pressurized handwand'][''] = 'lb ai/1-gal can' application_rate_units['Paints / Preservatives/ Stains']['Paints / Preservatives']['Roller'][''] = 'lb ai/1-gal can' application_rate_units['Liquid concentrate']['Treated Pets']['Dip'][''] = 'lb ai/pet' application_rate_units['Liquid concentrate']['Treated Pets']['Sponge'][''] = 'lb ai/pet' application_rate_units['Ready-to-use']['Treated Pets']['Trigger-spray bottle'][''] = 'lb ai/pet' application_rate_units['Ready-to-use']['Treated Pets']['Aerosol can'][''] = 'lb ai/pet' application_rate_units['Ready-to-use']['Treated Pets']['Shampoo'][''] = 'lb ai/pet' application_rate_units['Ready-to-use']['Treated Pets']['Spot-on'][''] = 'lb ai/pet' application_rate_units['Ready-to-use']['Treated Pets']['Collar'][''] = 'lb ai/pet' application_rate_units['Dust/Powder']['Treated Pets']['Shaker Can'][''] = 'lb ai/pet' for formulation in application_rate: for scenario in application_rate[formulation]: for application_method in application_rate[formulation][scenario]: for application_type in application_rate[formulation][scenario][application_method]: application_rate_form_map[formulation][scenario][application_method][application_type] = "%s, %s, %s, %s" %(formulation, scenario, application_method, application_type ) def __init__(self,*args,**kwargs): self.data_from_general_handler_sub_scenario_step = kwargs.pop('data_from_general_handler_sub_scenario_step',None) super(GeneralHandlerForm,self).__init__(*args,**kwargs) application_rates = [] for formulation in GeneralHandlerForm.application_rate: if self.data_from_general_handler_sub_scenario_step: if formulation in self.data_from_general_handler_sub_scenario_step['formulations']: for scenario in GeneralHandlerForm.application_rate[formulation]: if scenario in self.data_from_general_handler_sub_scenario_step['sub_scenarios']: for application_method in GeneralHandlerForm.application_rate[formulation][scenario]: if application_method in self.data_from_general_handler_sub_scenario_step['equipment']: application_rates.append((formulation, scenario, application_method, GeneralHandlerForm.application_rate[formulation][scenario][application_method])) application_rates = sorted(application_rates, key=operator.itemgetter(1)) for formulation, scenario, application_method, application_rate in application_rates: for application_type in application_rate: self.fields[GeneralHandlerForm.application_rate_form_map[formulation][scenario][application_method][application_type]] = forms.FloatField(required=False,initial=0, label="%s [Application Rate (%s)]"%(GeneralHandlerForm.application_rate_form_map[formulation][scenario][application_method][application_type],GeneralHandlerForm.application_rate_units[formulation][scenario][application_method][application_type]),min_value=0.) class GeneralHandlerSubScenariosForm(forms.Form): title = "General Handler Sub Scenario Selection" SUB_SCENARIOS_CHOICES = [('Insect Repellent','Insect Repellent'),('Treated Pets','Treated Pets'),('Lawns / Turf','Lawns / Turf'),('Gardens / Trees','Gardens / Trees'),('Paints / Preservatives','Paints / Preservatives'), ('Indoor Environment','Indoor Environment'),('Misting','Misting')] sub_scenarios = forms.MultipleChoiceField(choices=SUB_SCENARIOS_CHOICES , widget=CheckboxSelectMultipleBootstrap()) FORMULATION_CHOICES = [('Dust/Powder','Dust/Powder'), ('Granule', 'Granule'),('Liquid concentrate','Liquid concentrate'), ('Microencapsulated','Microencapsulated'), ('Paints / Preservatives/ Stains','Paints / Preservatives/ Stains'), ('Ready-to-use','Ready-to-use'), ('Water-disersible Granule / Dry Flowable','Water-disersible Granule / Dry Flowable'), ('Wettable powders','Wettable powders'), ('Wettable powders in water-soluble packaging','Wettable powders in water-soluble packaging')] formulations = forms.MultipleChoiceField(choices=FORMULATION_CHOICES , widget=CheckboxSelectMultipleBootstrap(), required=False) EQUIPMENT_CHOICES = [('Aerosol can with pin stream nozzle','Aerosol can with pin stream nozzle'),('Aerosol can','Aerosol can'),('Airless Sprayer','Airless Sprayer'),('Backpack','Backpack'),('Bait (granular, hand dispersal)','Bait (granular, hand dispersal)'),('Belly grinder','Belly grinder'),('Brush','Brush'),('Bulb duster','Bulb duster'),('Collar','Collar'),('Cup','Cup'),('Dip','Dip'),('Electric/power duster','Electric/power duster'),('Hand crank duster','Hand crank duster'),('Hand dispersal','Hand dispersal'),('Hose-end Sprayer','Hose-end Sprayer'),('Manually-pressurized handwand','Manually-pressurized handwand'),('Manually-pressurized handwand (w/ or w/o pin stream nozzle)', 'Manually-pressurized handwand (w/ or w/o pin stream nozzle)'),('Plunger Duster','Plunger Duster'), ('Push-type rotary spreader', 'Push-type rotary spreader'),('Roller','Roller'),('Shaker can','Shaker can'),('Shampoo','Shampoo'),('Sponge','Sponge'),('Spot-on','Spot-on'),('Sprinkler can','Sprinkler can' ),('Trigger-spray bottle','Trigger-spray bottle')] equipment = forms.MultipleChoiceField(choices=EQUIPMENT_CHOICES , widget=CheckboxSelectMultipleBootstrap(), required=False) n_inputs_equipment = defaultdict(lambda : defaultdict(lambda : defaultdict(int))) n_inputs_formulation = defaultdict(lambda : defaultdict(int)) n_inputs_scenarios = defaultdict(int) for i in xrange(0, len(SUB_SCENARIOS_CHOICES)): for j in xrange(0, len(FORMULATION_CHOICES)): for k in xrange(0, len(EQUIPMENT_CHOICES)): formulation = FORMULATION_CHOICES[j][0] scenario = SUB_SCENARIOS_CHOICES[i][0] application_method = EQUIPMENT_CHOICES[k][0] try: size = len(GeneralHandlerForm.application_rate[formulation][scenario][application_method]) n_inputs_equipment[i][j][k] += size n_inputs_formulation[i][j] += size n_inputs_scenarios[i] += size except: pass def __init__(self,*args,**kwargs): super(GeneralHandlerSubScenariosForm,self).__init__(*args,**kwargs) def clean(self): cleaned_data = super(GeneralHandlerSubScenariosForm, self).clean() equipment = cleaned_data.get("equipment") formulations = cleaned_data.get("formulations") sub_scenarios = cleaned_data.get("sub_scenarios") if sub_scenarios == ['Misting']: return cleaned_data elif sub_scenarios: if formulations == [] or equipment == []: raise forms.ValidationError("Both formulations and equipment need to be selected for %s."%", ".join(sub_scenarios)) count = 0 for scenario in sub_scenarios: for formulation in formulations: for application_method in equipment: count += len(GeneralHandlerForm.application_rate[formulation][scenario][application_method]) if count == 0: raise forms.ValidationError("No scenarios available for this selection of formulations and equipment. Ensure at least one of the equipment choices has greater than 1 in brackets.") return cleaned_data class TreatedPetForm(forms.Form): title = "Treated Pet Data Entry Form" amount_applied_form_map = defaultdict(dict) for animal in ['cat','dog']: for size in ['small','medium','large']: amount_applied_form_map[animal][size] = "%s %s" %(size, animal) # amount_applied['Other Pet'][''] = 0 fraction_ai = forms.FloatField(required=False,initial=0,min_value=0.,max_value=1.,label ="Fraction ai in product(0-1)");#defaultdict(dict) default_pet_weights = {'cat':{},'dog':{}} #lb default_pet_weights['dog'] = {'small':10.36535946,'medium':38.16827225,'large':76.50578234} #lb default_pet_weights['cat'] = {'small':3.568299485,'medium':7.8300955,'large':16.13607146} pet_weight = default_pet_weights['dog']['medium'] #Surface Area (cm2) = ((12.3*((BW (lb)*454)^0.65)) def pet_surface_area(lb): return 12.3*((lb*454)**0.65) def __init__(self,*args,**kwargs): super(TreatedPetForm,self).__init__(*args,**kwargs) for animal in TreatedPetForm.amount_applied_form_map: for size in TreatedPetForm.amount_applied_form_map[animal]: TreatedPetForm.amount_applied_form_map[animal][size] = "%s %s" %(size, animal) self.fields[TreatedPetForm.amount_applied_form_map[animal][size]] = forms.FloatField(required=False,initial=0,min_value=0.,label = "Amount of product applied to a %s %s (g)" %(size, animal)) class LawnTurfForm(forms.Form): title = "Lawn and Turf Data Entry Form" liquid_application_rate = forms.FloatField(required=False,initial=0,min_value=0., label="Liquid Application Rate (lb ai/acre)") solid_application_rate = forms.FloatField(required=False,initial=0,min_value=0., label="Solid Application Rate (lb ai/acre)") liquid_ttr_conc = forms.FloatField(required=False,initial=0,min_value=0., label="Liquid TTR (calculated from application rate if not available) (ug/cm2)")#ORt = TTRt solid_ttr_conc = forms.FloatField(required=False,initial=0,min_value=0., label="Solid TTR (calculated from application rate if not available) (ug/cm2)")#ORt = TTRt fraction_ai_in_pellets = forms.FloatField(required=False,initial=0, min_value=0.,max_value=1.,label="Fraction of ai in pellets/granules (0-1)") class GardenAndTreesForm(forms.Form): title = "Garden and Trees Data Entry Form" liquid_application_rate = forms.FloatField(required=False,initial=0,min_value=0., label="Liquid Application Rate (lb ai/acre)") solid_application_rate = forms.FloatField(required=False,initial=0,min_value=0., label="Solid Application Rate (lb ai/acre)") liquid_dfr_conc = forms.FloatField(required=False,initial=0,min_value=0., label="Liquid DFR (calculated from application rate if not available) (ug/cm2)") solid_dfr_conc = forms.FloatField(required=False,initial=0,min_value=0., label="Solid DFR (calculated from application rate if not available) (ug/cm2)") class InsectRepellentsForm(forms.Form): title = "Insect Repellent Data Entry Form" formulations = ['Aerosol', 'Pump spray', 'Lotion','Towelette'] amount_ai_formulations_form_map = defaultdict(dict) for sunscreen_status in ['without','with']: for formulation in formulations: amount_ai_formulations_form_map[sunscreen_status][formulation] = "%s repellent %s sunscreen" %(formulation, sunscreen_status) def __init__(self,*args,**kwargs): super(InsectRepellentsForm,self).__init__(*args,**kwargs) for sunscreen_status in ['without','with']: for formulation in InsectRepellentsForm.formulations: self.fields[InsectRepellentsForm.amount_ai_formulations_form_map[sunscreen_status][formulation]] = forms.FloatField(required=False,initial=0, min_value=0.,max_value=1., label = "Fraction of ai in %s repellent %s sunscreen (mg ai / mg product)"%(formulation,sunscreen_status)) class PaintsAndPreservativesForm(forms.Form): title = "Paints and Preservatives Data Entry Form" surface_residue_concentration = forms.FloatField(required=False,initial=0, min_value=0., label="Surface Residue Concentration (mg ai/cm^2)") DEFAULT_FRACTION_OF_BODY_EXPOSED = 0.31 DEFAULT_DAILY_MATERIAL_TO_SKIN_TRANSFER_EFFICENCY = 0.14 EXPOSURE_TIME = {'indoor':4., 'outdoor':1.5} HAND_TO_MOUTH_EVENTS_PER_HOUR = {'indoor':20., 'outdoor':13.9} indoor_or_outdoor = forms.ChoiceField(choices=[('indoor','Indoor'),('outdoor','Outdoor')], initial='indoor', label="Location of interest (indoor/outdoor)") class ImpregnatedMaterialsForm(forms.Form): title = "Impregnated Materials Data Entry Form" surface_residue_concentration = forms.FloatField(required=False) weight_fraction_of_active_ingredient = forms.FloatField(required=False) MATERIAL_CHOICES = [('cotton', 'Cotton'), ('light_cotton_synthetic_mix', 'Light Cotton/Synthetic Mix'), ('heavy_cotton_synthetic_mix','Heavy Cotton/Synthetic Mix'),('all_synthetics','All Synthetics'),('household_carpets','Household Carpets'),('plastic_polymers','Plastic Polymers'), ('vinyl_flooring','Vinyl Flooring')] material_type = forms.ChoiceField(choices=MATERIAL_CHOICES,required=False) MATERIAL_CHOICES_DICT = {} for choice in MATERIAL_CHOICES: MATERIAL_CHOICES_DICT[choice[0]]=choice[1] MATERIAL_WEIGHT_TO_SURFACE_AREA_DENSITY = {'cotton': 20., 'light_cotton_synthetic_mix': 10., 'heavy_cotton_synthetic_mix':24.,'all_synthetics':1.,'household_carpets':120.,'plastic_polymers':100., 'vinyl_flooring':40.} #DERMAL BODY_FRACTION_CHOICES = [('pants_jacket_shirt','Pants, Jacket, or Shirts'), ('total', 'Total Body Coverage'), ('floor', 'Mattresses, Carpets or Flooring'), ('handlers','Handlers')] BODY_FRACTION_CHOICES_DICT = {} for choice in BODY_FRACTION_CHOICES: BODY_FRACTION_CHOICES_DICT[choice[0]]=choice[1] body_fraction_exposed_type = forms.ChoiceField(choices=BODY_FRACTION_CHOICES,required=True) BODY_FRACTION_EXPOSED = {'pants_jacket_shirt':0.5, 'total':1, 'floor':0.5, 'handlers':0.11} protective_barrier_present = forms.ChoiceField(choices=[('no','No'),('yes','Yes')],required=True,initial='no', label = "Is there a potential protective barried present (such as bed sheets or other fabrics)?") PROTECTION_FACTOR = {'no':1,'yes':0.5} #HtM TYPE_OF_FLOORING_CHOICES = [('',''), ('carpet','Carpet or Textiles'), ('hard', 'Hard Surface or Flooring')] TYPE_OF_FLOORING_CHOICES_DICT = {} for choice in TYPE_OF_FLOORING_CHOICES: TYPE_OF_FLOORING_CHOICES_DICT[choice[0]]=choice[1] type_of_flooring = forms.ChoiceField(choices=TYPE_OF_FLOORING_CHOICES ,required=False) FRACTION_AI_HAND_TRANSFER = {'':0., 'carpet':0.06,'hard':0.08} FLOOR_EXPOSURE_TIME = {'':0., 'carpet':4.,'hard':2.} DEFAULT_FRACTION_OF_BODY_EXPOSED = 0.31 type_of_flooring = forms.ChoiceField(choices=[('',''), ('carpet','Carpet'), ('hard', 'Hard Surface')] ,required=False) DEFAULT_DAILY_MATERIAL_TO_SKIN_TRANSFER_EFFICENCY = 0.14 EXPOSURE_TIME = {'indoor':4., 'outdoor':1.5} HAND_TO_MOUTH_EVENTS_PER_HOUR = {'indoor':20., 'outdoor':13.9} indoor_or_outdoor = forms.ChoiceField(choices=[('indoor','Indoor'),('outdoor','Outdoor')], initial='indoor', label="Location of interest (indoor/outdoor)") #daily_material_to_skin_transfer_efficency = forms.FloatField(required=False,initial=0.14) #OtM FRACTION_AI_HAND_TRANSFER = {'':0., 'carpet':0.06,'hard':0.08} OBJECT_TO_MOUTH_EVENTS_PER_HOUR = {'':14.,'indoor':14., 'outdoor':8.8} class IndoorEnvironmentsForm(forms.Form): title = "Indoor Environments Data Entry Form" space_spray_fraction_ai = forms.FloatField(required=False,initial=0, min_value=0.,max_value=1.,label="Fraction of ai in Aerosol Space Sprays (0-1)") space_spray_amount_of_product = forms.FloatField(required=False,initial=0, min_value=0.,label="Amount of product in Aerosol Space Spray can (g/can)") SPACE_SPRAY_RESTRICTION_CHOICES = [('NA','Not Applicable')] + [ (t/60., "%s minutes"%t) for t in [0,5,10,15,20,30,40,60,120]] space_spray_restriction = forms.ChoiceField(choices=SPACE_SPRAY_RESTRICTION_CHOICES) molecular_weight = forms.FloatField(required=False,initial=0, min_value=0.,label="Molecular weight (g/mol)") vapor_pressure = forms.FloatField(required=False,initial=0, min_value=0.,label="Vapor pressure (mmHg)") broadcast_residue = forms.FloatField(required=False,initial=0, min_value=0.,label="Residue deposited on broadcast (ug/cm^2)") coarse_residue = forms.FloatField(required=False,initial=0, min_value=0.,label="Residue deposited on perimeter/spot/bedbug (coarse) (ug/cm^2)") pin_stream_residue = forms.FloatField(required=False,initial=0, min_value=0.,label="Residue deposited on perimeter/spot/bedbug (pin stream) (ug/cm^2)") crack_and_crevice_residue = forms.FloatField(required=False,initial=0, min_value=0.,label="Residue deposited on cracks and crevices (ug/cm^2)") foggers_residue = forms.FloatField(required=False,initial=0, min_value=0.,label="Residue deposited by foggers (ug/cm^2)") space_sprays_residue = forms.FloatField(required=False,initial=0, min_value=0.,label="Residue deposited by space sprays (ug/cm^2)") matress_residue = forms.FloatField(required=False,initial=0, min_value=0.,label="Residue deposited on mattress (ug/cm^2)") class OutdoorMistingForm(forms.Form): title = "Outdoor Misting Data Entry Form" #OASS OASS_fraction_ai = forms.FloatField(required=False,initial=0, min_value=0.,max_value=1.,label="Fraction of ai in Outdoor Aerosol Space Sprays (0-1)") OASS_amount_of_product_in_can = forms.FloatField(required=False,initial=0, min_value=0.,label="Amount of product in Outdoor Aerosol Space Spray can (g/can)") # CCTM CCTM_amount_ai_in_product = forms.FloatField(required=False,initial=0, min_value=0.,label="Amount ai in Candles, Coils, Torches, and/or Mats (mg ai/product)") # ORMS #product app rate on label: ORMS_application_rate = forms.FloatField(required=False,initial=0, min_value=0.,label="Application rate in Outdoor Residential Misting System(oz/1000 cu.ft.)") #else ORMS_dilution_rate = forms.FloatField(required=False,initial=0, min_value=0.,max_value=1.,label="Dilution rate in Outdoor Residential Misting System (vol product/vol total solution) (0-1)") ORMS_fraction_ai = forms.FloatField(required=False,initial=0, min_value=0.,max_value=1.,label="Fraction of ai in Outdoor Residential Misting System (0-1)") # AB #product app rate on label: AB_application_rate = forms.FloatField(required=False,initial=0, min_value=0.,label="Application rate in Animal Barns(oz/1000 cu.ft.)") #else AB_dilution_rate = forms.FloatField(required=False,initial=0, min_value=0.,max_value=1.,label="Dilution rate in Animal Barns (vol product/vol total solution) (0-1)") AB_fraction_ai = forms.FloatField(required=False,initial=0, min_value=0.,max_value=1.,label="Fraction of ai in Animal Barns (0-1)") class OutdoorMistingGeneralHandlerForm(forms.Form): title = "Outdoor Misting General Handler Data Entry Form" OASS_fraction_ai = forms.FloatField(required=False,initial=0, min_value=0.,max_value=1.,label="Fraction of ai in Outdoor Aerosol Space Sprays (0-1)") OASS_amount_of_product_in_can = forms.FloatField(required=False,initial=0, min_value=0.,label="Amount of product in Outdoor Aerosol Space Spray can (g/can)") # ORMS #product app rate on label: ORMS_DRUM_CHOICES = [(30,'30 gallons'), (55, '55 gallons')] ORMS_drum_size = forms.ChoiceField(choices=ORMS_DRUM_CHOICES,required=False, initial=55, label="Outdoor Residential Misting System Drum Size") ORMS_application_rate = forms.FloatField(required=False,initial=0, min_value=0.,label="Application rate in Outdoor Residential Misting System(oz/1000 cu.ft.)") #else ORMS_dilution_rate = forms.FloatField(required=False,initial=0, min_value=0.,label="Dilution rate in Outdoor Residential Misting System (vol product/vol total solution)") ORMS_fraction_ai = forms.FloatField(required=False,initial=0, min_value=0.,max_value=1.,label="Fraction of ai in Outdoor Residential Misting System (0-1)") # AB #product app rate on label: AB_DRUM_CHOICES = [(30,'30 gallons'), (55, '55 gallons'), (125, '125 gallons')] AB_drum_size = forms.ChoiceField(choices=AB_DRUM_CHOICES,required=False, initial=55, label="Animal Barn Drum Size" ) #else AB_dilution_rate = forms.FloatField(required=False,initial=0, min_value=0.,label="Dilution rate in Animal Barns (vol product/vol total solution)") AB_fraction_ai = forms.FloatField(required=False,initial=0, min_value=0.,max_value=1.,label="Fraction of ai in Animal Barns (0-1)")
agpl-3.0
petrus-v/odoo
addons/website_event/controllers/main.py
209
11805
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import babel.dates import time from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta import werkzeug.urls from werkzeug.exceptions import NotFound from openerp import http from openerp import tools from openerp.http import request from openerp.tools.translate import _ from openerp.addons.website.models.website import slug class website_event(http.Controller): @http.route(['/event', '/event/page/<int:page>'], type='http', auth="public", website=True) def events(self, page=1, **searches): cr, uid, context = request.cr, request.uid, request.context event_obj = request.registry['event.event'] type_obj = request.registry['event.type'] country_obj = request.registry['res.country'] searches.setdefault('date', 'all') searches.setdefault('type', 'all') searches.setdefault('country', 'all') domain_search = {} def sdn(date): return date.strftime('%Y-%m-%d 23:59:59') def sd(date): return date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT) today = datetime.today() dates = [ ['all', _('Next Events'), [("date_end", ">", sd(today))], 0], ['today', _('Today'), [ ("date_end", ">", sd(today)), ("date_begin", "<", sdn(today))], 0], ['week', _('This Week'), [ ("date_end", ">=", sd(today + relativedelta(days=-today.weekday()))), ("date_begin", "<", sdn(today + relativedelta(days=6-today.weekday())))], 0], ['nextweek', _('Next Week'), [ ("date_end", ">=", sd(today + relativedelta(days=7-today.weekday()))), ("date_begin", "<", sdn(today + relativedelta(days=13-today.weekday())))], 0], ['month', _('This month'), [ ("date_end", ">=", sd(today.replace(day=1))), ("date_begin", "<", (today.replace(day=1) + relativedelta(months=1)).strftime('%Y-%m-%d 00:00:00'))], 0], ['nextmonth', _('Next month'), [ ("date_end", ">=", sd(today.replace(day=1) + relativedelta(months=1))), ("date_begin", "<", (today.replace(day=1) + relativedelta(months=2)).strftime('%Y-%m-%d 00:00:00'))], 0], ['old', _('Old Events'), [ ("date_end", "<", today.strftime('%Y-%m-%d 00:00:00'))], 0], ] # search domains current_date = None current_type = None current_country = None for date in dates: if searches["date"] == date[0]: domain_search["date"] = date[2] if date[0] != 'all': current_date = date[1] if searches["type"] != 'all': current_type = type_obj.browse(cr, uid, int(searches['type']), context=context) domain_search["type"] = [("type", "=", int(searches["type"]))] if searches["country"] != 'all' and searches["country"] != 'online': current_country = country_obj.browse(cr, uid, int(searches['country']), context=context) domain_search["country"] = ['|', ("country_id", "=", int(searches["country"])), ("country_id", "=", False)] elif searches["country"] == 'online': domain_search["country"] = [("country_id", "=", False)] def dom_without(without): domain = [('state', "in", ['draft','confirm','done'])] for key, search in domain_search.items(): if key != without: domain += search return domain # count by domains without self search for date in dates: if date[0] <> 'old': date[3] = event_obj.search( request.cr, request.uid, dom_without('date') + date[2], count=True, context=request.context) domain = dom_without('type') types = event_obj.read_group( request.cr, request.uid, domain, ["id", "type"], groupby="type", orderby="type", context=request.context) type_count = event_obj.search(request.cr, request.uid, domain, count=True, context=request.context) types.insert(0, { 'type_count': type_count, 'type': ("all", _("All Categories")) }) domain = dom_without('country') countries = event_obj.read_group( request.cr, request.uid, domain, ["id", "country_id"], groupby="country_id", orderby="country_id", context=request.context) country_id_count = event_obj.search(request.cr, request.uid, domain, count=True, context=request.context) countries.insert(0, { 'country_id_count': country_id_count, 'country_id': ("all", _("All Countries")) }) step = 10 # Number of events per page event_count = event_obj.search( request.cr, request.uid, dom_without("none"), count=True, context=request.context) pager = request.website.pager( url="/event", url_args={'date': searches.get('date'), 'type': searches.get('type'), 'country': searches.get('country')}, total=event_count, page=page, step=step, scope=5) order = 'website_published desc, date_begin' if searches.get('date','all') == 'old': order = 'website_published desc, date_begin desc' obj_ids = event_obj.search( request.cr, request.uid, dom_without("none"), limit=step, offset=pager['offset'], order=order, context=request.context) events_ids = event_obj.browse(request.cr, request.uid, obj_ids, context=request.context) values = { 'current_date': current_date, 'current_country': current_country, 'current_type': current_type, 'event_ids': events_ids, 'dates': dates, 'types': types, 'countries': countries, 'pager': pager, 'searches': searches, 'search_path': "?%s" % werkzeug.url_encode(searches), } return request.website.render("website_event.index", values) @http.route(['/event/<model("event.event"):event>/page/<path:page>'], type='http', auth="public", website=True) def event_page(self, event, page, **post): values = { 'event': event, 'main_object': event } if '.' not in page: page = 'website_event.%s' % page try: request.website.get_template(page) except ValueError, e: # page not found raise NotFound return request.website.render(page, values) @http.route(['/event/<model("event.event"):event>'], type='http', auth="public", website=True) def event(self, event, **post): if event.menu_id and event.menu_id.child_id: target_url = event.menu_id.child_id[0].url else: target_url = '/event/%s/register' % str(event.id) if post.get('enable_editor') == '1': target_url += '?enable_editor=1' return request.redirect(target_url); @http.route(['/event/<model("event.event"):event>/register'], type='http', auth="public", website=True) def event_register(self, event, **post): values = { 'event': event, 'main_object': event, 'range': range, } return request.website.render("website_event.event_description_full", values) @http.route('/event/add_event', type='http', auth="user", methods=['POST'], website=True) def add_event(self, event_name="New Event", **kwargs): return self._add_event(event_name, request.context, **kwargs) def _add_event(self, event_name=None, context={}, **kwargs): if not event_name: event_name = _("New Event") Event = request.registry.get('event.event') date_begin = datetime.today() + timedelta(days=(14)) vals = { 'name': event_name, 'date_begin': date_begin.strftime('%Y-%m-%d'), 'date_end': (date_begin + timedelta(days=(1))).strftime('%Y-%m-%d'), } event_id = Event.create(request.cr, request.uid, vals, context=context) event = Event.browse(request.cr, request.uid, event_id, context=context) return request.redirect("/event/%s/register?enable_editor=1" % slug(event)) def get_formated_date(self, event): context = request.context start_date = datetime.strptime(event.date_begin, tools.DEFAULT_SERVER_DATETIME_FORMAT).date() end_date = datetime.strptime(event.date_end, tools.DEFAULT_SERVER_DATETIME_FORMAT).date() month = babel.dates.get_month_names('abbreviated', locale=context.get('lang', 'en_US'))[start_date.month] return _('%(month)s %(start_day)s%(end_day)s') % { 'month': month, 'start_day': start_date.strftime("%e"), 'end_day': (end_date != start_date and ("-"+end_date.strftime("%e")) or "") } @http.route('/event/get_country_event_list', type='http', auth='public', website=True) def get_country_events(self ,**post): cr, uid, context, event_ids = request.cr, request.uid, request.context,[] country_obj = request.registry['res.country'] event_obj = request.registry['event.event'] country_code = request.session['geoip'].get('country_code') result = {'events':[],'country':False} if country_code: country_ids = country_obj.search(cr, uid, [('code', '=', country_code)], context=context) event_ids = event_obj.search(cr, uid, ['|', ('address_id', '=', None),('country_id.code', '=', country_code),('date_begin','>=', time.strftime('%Y-%m-%d 00:00:00')),('state', '=', 'confirm')], order="date_begin", context=context) if not event_ids: event_ids = event_obj.search(cr, uid, [('date_begin','>=', time.strftime('%Y-%m-%d 00:00:00')),('state', '=', 'confirm')], order="date_begin", context=context) for event in event_obj.browse(cr, uid, event_ids, context=context)[:6]: if country_code and event.country_id.code == country_code: result['country'] = country_obj.browse(cr, uid, country_ids[0], context=context) result['events'].append({ "date": self.get_formated_date(event), "event": event, "url": event.website_url}) return request.website.render("website_event.country_events_list",result)
agpl-3.0
iceroad/baresoil-benchmark-image-resizer
lambda/src/PIL/TgaImagePlugin.py
15
4942
# # The Python Imaging Library. # $Id$ # # TGA file handling # # History: # 95-09-01 fl created (reads 24-bit files only) # 97-01-04 fl support more TGA versions, including compressed images # 98-07-04 fl fixed orientation and alpha layer bugs # 98-09-11 fl fixed orientation for runlength decoder # # Copyright (c) Secret Labs AB 1997-98. # Copyright (c) Fredrik Lundh 1995-97. # # See the README file for information on usage and redistribution. # from . import Image, ImageFile, ImagePalette from ._binary import i8, i16le as i16, o8, o16le as o16 __version__ = "0.3" # # -------------------------------------------------------------------- # Read RGA file MODES = { # map imagetype/depth to rawmode (1, 8): "P", (3, 1): "1", (3, 8): "L", (2, 16): "BGR;5", (2, 24): "BGR", (2, 32): "BGRA", } ## # Image plugin for Targa files. class TgaImageFile(ImageFile.ImageFile): format = "TGA" format_description = "Targa" def _open(self): # process header s = self.fp.read(18) idlen = i8(s[0]) colormaptype = i8(s[1]) imagetype = i8(s[2]) depth = i8(s[16]) flags = i8(s[17]) self.size = i16(s[12:]), i16(s[14:]) # validate header fields if colormaptype not in (0, 1) or\ self.size[0] <= 0 or self.size[1] <= 0 or\ depth not in (1, 8, 16, 24, 32): raise SyntaxError("not a TGA file") # image mode if imagetype in (3, 11): self.mode = "L" if depth == 1: self.mode = "1" # ??? elif imagetype in (1, 9): self.mode = "P" elif imagetype in (2, 10): self.mode = "RGB" if depth == 32: self.mode = "RGBA" else: raise SyntaxError("unknown TGA mode") # orientation orientation = flags & 0x30 if orientation == 0x20: orientation = 1 elif not orientation: orientation = -1 else: raise SyntaxError("unknown TGA orientation") self.info["orientation"] = orientation if imagetype & 8: self.info["compression"] = "tga_rle" if idlen: self.info["id_section"] = self.fp.read(idlen) if colormaptype: # read palette start, size, mapdepth = i16(s[3:]), i16(s[5:]), i16(s[7:]) if mapdepth == 16: self.palette = ImagePalette.raw( "BGR;16", b"\0"*2*start + self.fp.read(2*size)) elif mapdepth == 24: self.palette = ImagePalette.raw( "BGR", b"\0"*3*start + self.fp.read(3*size)) elif mapdepth == 32: self.palette = ImagePalette.raw( "BGRA", b"\0"*4*start + self.fp.read(4*size)) # setup tile descriptor try: rawmode = MODES[(imagetype & 7, depth)] if imagetype & 8: # compressed self.tile = [("tga_rle", (0, 0)+self.size, self.fp.tell(), (rawmode, orientation, depth))] else: self.tile = [("raw", (0, 0)+self.size, self.fp.tell(), (rawmode, 0, orientation))] except KeyError: pass # cannot decode # # -------------------------------------------------------------------- # Write TGA file SAVE = { "1": ("1", 1, 0, 3), "L": ("L", 8, 0, 3), "P": ("P", 8, 1, 1), "RGB": ("BGR", 24, 0, 2), "RGBA": ("BGRA", 32, 0, 2), } def _save(im, fp, filename, check=0): try: rawmode, bits, colormaptype, imagetype = SAVE[im.mode] except KeyError: raise IOError("cannot write mode %s as TGA" % im.mode) if check: return check if colormaptype: colormapfirst, colormaplength, colormapentry = 0, 256, 24 else: colormapfirst, colormaplength, colormapentry = 0, 0, 0 if im.mode == "RGBA": flags = 8 else: flags = 0 orientation = im.info.get("orientation", -1) if orientation > 0: flags = flags | 0x20 fp.write(b"\000" + o8(colormaptype) + o8(imagetype) + o16(colormapfirst) + o16(colormaplength) + o8(colormapentry) + o16(0) + o16(0) + o16(im.size[0]) + o16(im.size[1]) + o8(bits) + o8(flags)) if colormaptype: fp.write(im.im.getpalette("RGB", "BGR")) ImageFile._save( im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, orientation))]) # # -------------------------------------------------------------------- # Registry Image.register_open(TgaImageFile.format, TgaImageFile) Image.register_save(TgaImageFile.format, _save) Image.register_extension(TgaImageFile.format, ".tga")
apache-2.0
jmennen/group5
Code/buzzit/buzzit_messaging/urls.py
1
2558
__author__ = 'User' from django.conf.urls import url from . import views urlpatterns = [ url(r'^myfollowers/$', views.beingFollowedByView, name="my_followers"), url(r'^circle/new/$', views.CreateCircleView.as_view(), name="new_circle"), url(r'^circle/(?P<slug>[0-9]+)/$', views.circleDetails, name="circle_details"), url(r'^circle/(?P<circle_id>[0-9]+)/addusers/$', views.add_users_to_circle, name="add_users_to_circle"), url(r'^circle/(?P<user_id>[0-9]+)/adduser/$', views.add_user_to_circles, name="add_user_to_circles"), url(r'^circle/(?P<user_id>[0-9]+)/(?P<circle_id>[0-9]+)/removeuser/$', views.remove_user_from_circle, name="remove_user_from_circle"), url(r'^circle/(?P<slug>[0-9]+)/delete/$', views.RemoveCircleView, name="delete_circle"), url(r'^circles/$', views.CircleOverviewView.as_view(), name="circle_overview"), url(r'^follows/$', views.listfollows, name="list_follows"), url(r'^circlemessage/new/$', views.postCirclemessage, name="new_circlemessage"), url(r'^circlemessage/(?P<message_id>[0-9]+)/delete/$', views.delete_circle_message, name="delete_circlemessage"), url(r'^follow/(?P<user_id>[0-9]+)/$', views.follow, name="follow"), url(r'^unfollow/(?P<user_id>[0-9]+)/$', views.unfollow, name="unfollow"), # new since sprint 4 url(r'^are_there_new_notifications/$', views.information_about_new_directmessages, name="notification_polling"), url(r'^circle/message/(?P<slug>[0-9]+)/$', views.PostDetailsView.as_view(), name="one_circlemessage"), #url(r'^circle/message/(?P<message_id>[0-9]+)/answer$', views.answer_to_circlemessage, name="answer_circlemessage"), url(r'^circle/message/(?P<message_id>[0-9]+)/repost$', views.repost, name="repost_circlemessage"), url(r'^chat/(?P<sender_id>[a-zA-Z0-9]+)/$', views.direct_messages_details, name="chat"), url(r'^chat/(?P<username>[a-zA-Z0-9]+)/poll/json$', views.chat_polling, name="chat_polling"), url(r'^chats/$', views.direct_messages_overview, name="all_chats"), url(r'^search/user/(?P<query>[a-zA-Z0-9]+)/json$', views.search_user_json, name="search_user_json"), url(r'^search/theme/(?P<query>[a-zA-Z0-9]+)/json$', views.search_theme_json, name="search_theme_json"), url(r'^search/theme/(?P<theme>[a-zA-Z0-9]+)', views.showPostsToTheTheme, name="search_theme"), #new sind sprint 5 url(r'^circlemessage/new/json$', views.postCirclemessage_json, name="new_circlemessage_json"), url(r'^circlemessage/getall/json$', views.get_all_circlemessages_json, name="get_circlemessages_json"), ]
bsd-2-clause
aeklant/scipy
scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py
10
24909
import time import numpy as np from scipy.sparse.linalg import LinearOperator from .._differentiable_functions import VectorFunction from .._constraints import ( NonlinearConstraint, LinearConstraint, PreparedConstraint, strict_bounds) from .._hessian_update_strategy import BFGS from ..optimize import OptimizeResult from .._differentiable_functions import ScalarFunction from .equality_constrained_sqp import equality_constrained_sqp from .canonical_constraint import (CanonicalConstraint, initial_constraints_as_canonical) from .tr_interior_point import tr_interior_point from .report import BasicReport, SQPReport, IPReport TERMINATION_MESSAGES = { 0: "The maximum number of function evaluations is exceeded.", 1: "`gtol` termination condition is satisfied.", 2: "`xtol` termination condition is satisfied.", 3: "`callback` function requested termination." } class HessianLinearOperator(object): """Build LinearOperator from hessp""" def __init__(self, hessp, n): self.hessp = hessp self.n = n def __call__(self, x, *args): def matvec(p): return self.hessp(x, p, *args) return LinearOperator((self.n, self.n), matvec=matvec) class LagrangianHessian(object): """The Hessian of the Lagrangian as LinearOperator. The Lagrangian is computed as the objective function plus all the constraints multiplied with some numbers (Lagrange multipliers). """ def __init__(self, n, objective_hess, constraints_hess): self.n = n self.objective_hess = objective_hess self.constraints_hess = constraints_hess def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)): H_objective = self.objective_hess(x) H_constraints = self.constraints_hess(x, v_eq, v_ineq) def matvec(p): return H_objective.dot(p) + H_constraints.dot(p) return LinearOperator((self.n, self.n), matvec) def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints, start_time, tr_radius, constr_penalty, cg_info): state.nit += 1 state.nfev = objective.nfev state.njev = objective.ngev state.nhev = objective.nhev state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0 for c in prepared_constraints] state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0 for c in prepared_constraints] state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0 for c in prepared_constraints] if not last_iteration_failed: state.x = x state.fun = objective.f state.grad = objective.g state.v = [c.fun.v for c in prepared_constraints] state.constr = [c.fun.f for c in prepared_constraints] state.jac = [c.fun.J for c in prepared_constraints] # Compute Lagrangian Gradient state.lagrangian_grad = np.copy(state.grad) for c in prepared_constraints: state.lagrangian_grad += c.fun.J.T.dot(c.fun.v) state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf) # Compute maximum constraint violation state.constr_violation = 0 for i in range(len(prepared_constraints)): lb, ub = prepared_constraints[i].bounds c = state.constr[i] state.constr_violation = np.max([state.constr_violation, np.max(lb - c), np.max(c - ub)]) state.execution_time = time.time() - start_time state.tr_radius = tr_radius state.constr_penalty = constr_penalty state.cg_niter += cg_info["niter"] state.cg_stop_cond = cg_info["stop_cond"] return state def update_state_ip(state, x, last_iteration_failed, objective, prepared_constraints, start_time, tr_radius, constr_penalty, cg_info, barrier_parameter, barrier_tolerance): state = update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints, start_time, tr_radius, constr_penalty, cg_info) state.barrier_parameter = barrier_parameter state.barrier_tolerance = barrier_tolerance return state def _minimize_trustregion_constr(fun, x0, args, grad, hess, hessp, bounds, constraints, xtol=1e-8, gtol=1e-8, barrier_tol=1e-8, sparse_jacobian=None, callback=None, maxiter=1000, verbose=0, finite_diff_rel_step=None, initial_constr_penalty=1.0, initial_tr_radius=1.0, initial_barrier_parameter=0.1, initial_barrier_tolerance=0.1, factorization_method=None, disp=False): """Minimize a scalar function subject to constraints. Parameters ---------- gtol : float, optional Tolerance for termination by the norm of the Lagrangian gradient. The algorithm will terminate when both the infinity norm (i.e., max abs value) of the Lagrangian gradient and the constraint violation are smaller than ``gtol``. Default is 1e-8. xtol : float, optional Tolerance for termination by the change of the independent variable. The algorithm will terminate when ``tr_radius < xtol``, where ``tr_radius`` is the radius of the trust region used in the algorithm. Default is 1e-8. barrier_tol : float, optional Threshold on the barrier parameter for the algorithm termination. When inequality constraints are present, the algorithm will terminate only when the barrier parameter is less than `barrier_tol`. Default is 1e-8. sparse_jacobian : {bool, None}, optional Determines how to represent Jacobians of the constraints. If bool, then Jacobians of all the constraints will be converted to the corresponding format. If None (default), then Jacobians won't be converted, but the algorithm can proceed only if they all have the same format. initial_tr_radius: float, optional Initial trust radius. The trust radius gives the maximum distance between solution points in consecutive iterations. It reflects the trust the algorithm puts in the local approximation of the optimization problem. For an accurate local approximation the trust-region should be large and for an approximation valid only close to the current point it should be a small one. The trust radius is automatically updated throughout the optimization process, with ``initial_tr_radius`` being its initial value. Default is 1 (recommended in [1]_, p. 19). initial_constr_penalty : float, optional Initial constraints penalty parameter. The penalty parameter is used for balancing the requirements of decreasing the objective function and satisfying the constraints. It is used for defining the merit function: ``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``, where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all the constraints. The merit function is used for accepting or rejecting trial points and ``constr_penalty`` weights the two conflicting goals of reducing objective function and constraints. The penalty is automatically updated throughout the optimization process, with ``initial_constr_penalty`` being its initial value. Default is 1 (recommended in [1]_, p 19). initial_barrier_parameter, initial_barrier_tolerance: float, optional Initial barrier parameter and initial tolerance for the barrier subproblem. Both are used only when inequality constraints are present. For dealing with optimization problems ``min_x f(x)`` subject to inequality constraints ``c(x) <= 0`` the algorithm introduces slack variables, solving the problem ``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality constraints ``c(x) + s = 0`` instead of the original problem. This subproblem is solved for decreasing values of ``barrier_parameter`` and with decreasing tolerances for the termination, starting with ``initial_barrier_parameter`` for the barrier parameter and ``initial_barrier_tolerance`` for the barrier tolerance. Default is 0.1 for both values (recommended in [1]_ p. 19). Also note that ``barrier_parameter`` and ``barrier_tolerance`` are updated with the same prefactor. factorization_method : string or None, optional Method to factorize the Jacobian of the constraints. Use None (default) for the auto selection or one of: - 'NormalEquation' (requires scikit-sparse) - 'AugmentedSystem' - 'QRFactorization' - 'SVDFactorization' The methods 'NormalEquation' and 'AugmentedSystem' can be used only with sparse constraints. The projections required by the algorithm will be computed using, respectively, the the normal equation and the augmented system approaches explained in [1]_. 'NormalEquation' computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem' performs the LU factorization of an augmented system. They usually provide similar results. 'AugmentedSystem' is used by default for sparse matrices. The methods 'QRFactorization' and 'SVDFactorization' can be used only with dense constraints. They compute the required projections using, respectively, QR and SVD factorizations. The 'SVDFactorization' method can cope with Jacobian matrices with deficient row rank and will be used whenever other factorization methods fail (which may imply the conversion of sparse matrices to a dense format when required). By default, 'QRFactorization' is used for dense matrices. finite_diff_rel_step : None or array_like, optional Relative step size for the finite difference approximation. maxiter : int, optional Maximum number of algorithm iterations. Default is 1000. verbose : {0, 1, 2}, optional Level of algorithm's verbosity: * 0 (default) : work silently. * 1 : display a termination report. * 2 : display progress during iterations. * 3 : display progress during iterations (more complete report). disp : bool, optional If True (default), then `verbose` will be set to 1 if it was 0. Returns ------- `OptimizeResult` with the fields documented below. Note the following: 1. All values corresponding to the constraints are ordered as they were passed to the solver. And values corresponding to `bounds` constraints are put *after* other constraints. 2. All numbers of function, Jacobian or Hessian evaluations correspond to numbers of actual Python function calls. It means, for example, that if a Jacobian is estimated by finite differences, then the number of Jacobian evaluations will be zero and the number of function evaluations will be incremented by all calls during the finite difference estimation. x : ndarray, shape (n,) Solution found. optimality : float Infinity norm of the Lagrangian gradient at the solution. constr_violation : float Maximum constraint violation at the solution. fun : float Objective function at the solution. grad : ndarray, shape (n,) Gradient of the objective function at the solution. lagrangian_grad : ndarray, shape (n,) Gradient of the Lagrangian function at the solution. nit : int Total number of iterations. nfev : integer Number of the objective function evaluations. njev : integer Number of the objective function gradient evaluations. nhev : integer Number of the objective function Hessian evaluations. cg_niter : int Total number of the conjugate gradient method iterations. method : {'equality_constrained_sqp', 'tr_interior_point'} Optimization method used. constr : list of ndarray List of constraint values at the solution. jac : list of {ndarray, sparse matrix} List of the Jacobian matrices of the constraints at the solution. v : list of ndarray List of the Lagrange multipliers for the constraints at the solution. For an inequality constraint a positive multiplier means that the upper bound is active, a negative multiplier means that the lower bound is active and if a multiplier is zero it means the constraint is not active. constr_nfev : list of int Number of constraint evaluations for each of the constraints. constr_njev : list of int Number of Jacobian matrix evaluations for each of the constraints. constr_nhev : list of int Number of Hessian evaluations for each of the constraints. tr_radius : float Radius of the trust region at the last iteration. constr_penalty : float Penalty parameter at the last iteration, see `initial_constr_penalty`. barrier_tolerance : float Tolerance for the barrier subproblem at the last iteration. Only for problems with inequality constraints. barrier_parameter : float Barrier parameter at the last iteration. Only for problems with inequality constraints. execution_time : float Total execution time. message : str Termination message. status : {0, 1, 2, 3} Termination status: * 0 : The maximum number of function evaluations is exceeded. * 1 : `gtol` termination condition is satisfied. * 2 : `xtol` termination condition is satisfied. * 3 : `callback` function requested termination. cg_stop_cond : int Reason for CG subproblem termination at the last iteration: * 0 : CG subproblem not evaluated. * 1 : Iteration limit was reached. * 2 : Reached the trust-region boundary. * 3 : Negative curvature detected. * 4 : Tolerance was satisfied. References ---------- .. [1] Conn, A. R., Gould, N. I., & Toint, P. L. Trust region methods. 2000. Siam. pp. 19. """ x0 = np.atleast_1d(x0).astype(float) n_vars = np.size(x0) if hess is None: if callable(hessp): hess = HessianLinearOperator(hessp, n_vars) else: hess = BFGS() if disp and verbose == 0: verbose = 1 if bounds is not None: finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub, bounds.keep_feasible, n_vars) else: finite_diff_bounds = (-np.inf, np.inf) # Define Objective Function objective = ScalarFunction(fun, x0, args, grad, hess, finite_diff_rel_step, finite_diff_bounds) # Put constraints in list format when needed. if isinstance(constraints, (NonlinearConstraint, LinearConstraint)): constraints = [constraints] # Prepare constraints. prepared_constraints = [ PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds) for c in constraints] # Check that all constraints are either sparse or dense. n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints) if 0 < n_sparse < len(prepared_constraints): raise ValueError("All constraints must have the same kind of the " "Jacobian --- either all sparse or all dense. " "You can set the sparsity globally by setting " "`sparse_jacobian` to either True of False.") if prepared_constraints: sparse_jacobian = n_sparse > 0 if bounds is not None: if sparse_jacobian is None: sparse_jacobian = True prepared_constraints.append(PreparedConstraint(bounds, x0, sparse_jacobian)) # Concatenate initial constraints to the canonical form. c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical( n_vars, prepared_constraints, sparse_jacobian) # Prepare all canonical constraints and concatenate it into one. canonical_all = [CanonicalConstraint.from_PreparedConstraint(c) for c in prepared_constraints] if len(canonical_all) == 0: canonical = CanonicalConstraint.empty(n_vars) elif len(canonical_all) == 1: canonical = canonical_all[0] else: canonical = CanonicalConstraint.concatenate(canonical_all, sparse_jacobian) # Generate the Hessian of the Lagrangian. lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess) # Choose appropriate method if canonical.n_ineq == 0: method = 'equality_constrained_sqp' else: method = 'tr_interior_point' # Construct OptimizeResult state = OptimizeResult( nit=0, nfev=0, njev=0, nhev=0, cg_niter=0, cg_stop_cond=0, fun=objective.f, grad=objective.g, lagrangian_grad=np.copy(objective.g), constr=[c.fun.f for c in prepared_constraints], jac=[c.fun.J for c in prepared_constraints], constr_nfev=[0 for c in prepared_constraints], constr_njev=[0 for c in prepared_constraints], constr_nhev=[0 for c in prepared_constraints], v=[c.fun.v for c in prepared_constraints], method=method) # Start counting start_time = time.time() # Define stop criteria if method == 'equality_constrained_sqp': def stop_criteria(state, x, last_iteration_failed, optimality, constr_violation, tr_radius, constr_penalty, cg_info): state = update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints, start_time, tr_radius, constr_penalty, cg_info) if verbose == 2: BasicReport.print_iteration(state.nit, state.nfev, state.cg_niter, state.fun, state.tr_radius, state.optimality, state.constr_violation) elif verbose > 2: SQPReport.print_iteration(state.nit, state.nfev, state.cg_niter, state.fun, state.tr_radius, state.optimality, state.constr_violation, state.constr_penalty, state.cg_stop_cond) state.status = None state.niter = state.nit # Alias for callback (backward-compatibility) if callback is not None and callback(np.copy(state.x), state): state.status = 3 elif state.optimality < gtol and state.constr_violation < gtol: state.status = 1 elif state.tr_radius < xtol: state.status = 2 elif state.nit >= maxiter: state.status = 0 return state.status in (0, 1, 2, 3) elif method == 'tr_interior_point': def stop_criteria(state, x, last_iteration_failed, tr_radius, constr_penalty, cg_info, barrier_parameter, barrier_tolerance): state = update_state_ip(state, x, last_iteration_failed, objective, prepared_constraints, start_time, tr_radius, constr_penalty, cg_info, barrier_parameter, barrier_tolerance) if verbose == 2: BasicReport.print_iteration(state.nit, state.nfev, state.cg_niter, state.fun, state.tr_radius, state.optimality, state.constr_violation) elif verbose > 2: IPReport.print_iteration(state.nit, state.nfev, state.cg_niter, state.fun, state.tr_radius, state.optimality, state.constr_violation, state.constr_penalty, state.barrier_parameter, state.cg_stop_cond) state.status = None state.niter = state.nit # Alias for callback (backward compatibility) if callback is not None and callback(np.copy(state.x), state): state.status = 3 elif state.optimality < gtol and state.constr_violation < gtol: state.status = 1 elif (state.tr_radius < xtol and state.barrier_parameter < barrier_tol): state.status = 2 elif state.nit >= maxiter: state.status = 0 return state.status in (0, 1, 2, 3) if verbose == 2: BasicReport.print_header() elif verbose > 2: if method == 'equality_constrained_sqp': SQPReport.print_header() elif method == 'tr_interior_point': IPReport.print_header() # Call inferior function to do the optimization if method == 'equality_constrained_sqp': def fun_and_constr(x): f = objective.fun(x) c_eq, _ = canonical.fun(x) return f, c_eq def grad_and_jac(x): g = objective.grad(x) J_eq, _ = canonical.jac(x) return g, J_eq _, result = equality_constrained_sqp( fun_and_constr, grad_and_jac, lagrangian_hess, x0, objective.f, objective.g, c_eq0, J_eq0, stop_criteria, state, initial_constr_penalty, initial_tr_radius, factorization_method) elif method == 'tr_interior_point': _, result = tr_interior_point( objective.fun, objective.grad, lagrangian_hess, n_vars, canonical.n_ineq, canonical.n_eq, canonical.fun, canonical.jac, x0, objective.f, objective.g, c_ineq0, J_ineq0, c_eq0, J_eq0, stop_criteria, canonical.keep_feasible, xtol, state, initial_barrier_parameter, initial_barrier_tolerance, initial_constr_penalty, initial_tr_radius, factorization_method) # Status 3 occurs when the callback function requests termination, # this is assumed to not be a success. result.success = True if result.status in (1, 2) else False result.message = TERMINATION_MESSAGES[result.status] # Alias (for backward compatibility with 1.1.0) result.niter = result.nit if verbose == 2: BasicReport.print_footer() elif verbose > 2: if method == 'equality_constrained_sqp': SQPReport.print_footer() elif method == 'tr_interior_point': IPReport.print_footer() if verbose >= 1: print(result.message) print("Number of iterations: {}, function evaluations: {}, " "CG iterations: {}, optimality: {:.2e}, " "constraint violation: {:.2e}, execution time: {:4.2} s." .format(result.nit, result.nfev, result.cg_niter, result.optimality, result.constr_violation, result.execution_time)) return result
bsd-3-clause
mathieulavoie/Bitcluster
web/web.py
1
5465
#from web import app from web.dao import getNodeFromAddress, getNodeInformation, getTransations, groupByAllDistribution, groupbyNode, \ groupbyAmount, groupbyDate from flask import * import re import csv import io from datetime import datetime, timedelta app = Flask(__name__) @app.route('/',methods=['POST', 'GET']) def web_root(): if request.method == 'POST': address = request.form['q'] if address.isnumeric(): return redirect(url_for('get_node_request',node_id=address)) else: pattern = re.compile("^([1-9ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz])+$") if pattern.match(address): node_id = getNodeFromAddress(address) if node_id is not None: return redirect(url_for('get_node_request',node_id=node_id)) return render_template('index.html',message="Invalid or inexistant address") return render_template('index.html') @app.route('/nodes/<int:node_id>') def get_node_request(node_id): infos = getNodeInformation(node_id) limit =100 truncated_trx_in,trx_in = trim_collection(infos['transactions']['in'],limit) truncated_trx_out,trx_out = trim_collection(infos['transactions']['out'],limit) truncated_by_node_in,infos['incomes_grouped']['by_node'] = trim_collection(infos['incomes_grouped']['by_node'],limit) truncated_by_node_out,infos['outcomes_grouped']['by_node'] = trim_collection(infos['outcomes_grouped']['by_node'],limit) truncated_by_amount_in,infos['incomes_grouped']['by_amount']['amount_usd'] = trim_collection(infos['incomes_grouped']['by_amount']['amount_usd'],limit) truncated_by_amount_out,infos['outcomes_grouped']['by_amount']['amount_usd'] = trim_collection(infos['outcomes_grouped']['by_amount']['amount_usd'],limit) infos['transactions'] = {'in': trx_in, 'out':trx_out} return render_template('node_details.html',informations=infos, truncated=(truncated_trx_in or truncated_trx_out or truncated_by_node_in or truncated_by_node_out or truncated_by_amount_in or truncated_by_amount_out)) def trim_collection(collection, limit): if len(collection) > limit: return True, collection[0:limit] return False, collection @app.route('/nodes/<int:node_id>/download/json/<direction>') def download_transations_json(node_id,direction): if direction not in ["in","out"]: return Response(response="Invalid direction",status=500) transactions = getTransations(node_id,direction) grouped = groupByAllDistribution(transactions,direction) response = jsonify({"transactions":transactions, "groups":grouped}) response.headers['Content-disposition'] = "attachment;filename=transactions_%d_%s.json"% (node_id, direction) return response @app.route('/nodes/<int:node_id>/download/csv/<direction>') def download_transations_csv(node_id,direction): if direction not in ["in","out"]: return Response(response="Invalid direction",status=500) output = io.StringIO() fieldnames = ['trx_date','block_id','source_n_id','destination_n_id','amount', 'amount_usd','source','destination'] writer = csv.DictWriter(output, fieldnames=fieldnames) writer.writeheader() for trx in getTransations(node_id,direction): writer.writerow(trx) return Response( output.getvalue(), mimetype="text/csv", headers={"Content-disposition":"attachment; filename=transactions_%d_%s.csv"% (node_id, direction)}) @app.route('/nodes/<int:node_id>/download/csv/<direction>/<grouping>') def download_grouped_transactions(node_id,direction,grouping): if direction not in ["in","out"]: return Response(response="Invalid direction",status=500) output = io.StringIO() transactions = getTransations(node_id,direction) writer = csv.writer(output) if grouping == "by_node": writer.writerow(['node_id','amount_usd','amount_btc','transaction_count']) for k,v in groupbyNode(transactions,direction): writer.writerow([k,v['amount_usd'],v['amount_btc'],len(v['transactions'])]) elif grouping == "by_amount": writer.writerow(['amount_usd','frequency']) for k,v in groupbyAmount(transactions)['amount_usd']: writer.writerow([k,v]) elif grouping == "by_date": date_format = '%Y-%m-%d' sorted_by_date = groupbyDate(transactions) min_date = datetime.strptime(sorted_by_date[0][0],date_format) max_date = datetime.strptime(sorted_by_date[-1][0],date_format) delta = max_date - min_date index = 0 writer.writerow(['date','amount_usd','amount_btc','transaction_count']) for date in [min_date + timedelta(days=x) for x in range(0,delta.days+1)]: strdate = date.strftime(date_format) k,v = sorted_by_date[index] if k == strdate: writer.writerow([k,v['amount_usd'],v['amount_btc'],len(v['transactions'])]) index +=1 else: writer.writerow([strdate,0,0,0]) else: return Response(response="Invalid grouping. Possible options : by_node , by_amount , by_date",status=500) return Response( output.getvalue(), mimetype="text/csv", headers={"Content-disposition":"attachment; filename=transactions_%d_%s_%s.csv"% (node_id, direction,grouping)})
mit
alazyer/oscar
frobshop/oscar/apps/offer/migrations/0007_auto__add_field_conditionaloffer_max_global_applications.py
17
15124
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'ConditionalOffer.max_global_applications' db.add_column('offer_conditionaloffer', 'max_global_applications', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'ConditionalOffer.max_global_applications' db.delete_column('offer_conditionaloffer', 'max_global_applications') models = { 'catalogue.attributeentity': { 'Meta': {'object_name': 'AttributeEntity'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}), 'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"}) }, 'catalogue.attributeentitytype': { 'Meta': {'object_name': 'AttributeEntityType'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}) }, 'catalogue.attributeoption': { 'Meta': {'object_name': 'AttributeOption'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'option': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'catalogue.attributeoptiongroup': { 'Meta': {'object_name': 'AttributeOptionGroup'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'catalogue.category': { 'Meta': {'ordering': "['full_name']", 'object_name': 'Category'}, 'depth': ('django.db.models.fields.PositiveIntegerField', [], {}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1024'}) }, 'catalogue.option': { 'Meta': {'object_name': 'Option'}, 'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'}) }, 'catalogue.product': { 'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'}, 'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}), 'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}), 'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}), 'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}), 'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}), 'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}), 'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}) }, 'catalogue.productattribute': { 'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'}, 'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}), 'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}), 'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}) }, 'catalogue.productattributevalue': { 'Meta': {'object_name': 'ProductAttributeValue'}, 'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}), 'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}), 'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}), 'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) }, 'catalogue.productcategory': { 'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}) }, 'catalogue.productclass': { 'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}), 'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}), 'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, 'catalogue.productrecommendation': { 'Meta': {'object_name': 'ProductRecommendation'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}), 'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}) }, 'offer.benefit': { 'Meta': {'object_name': 'Benefit'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max_affected_items': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']", 'null': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}) }, 'offer.condition': { 'Meta': {'object_name': 'Condition'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']"}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'value': ('oscar.models.fields.PositiveDecimalField', [], {'max_digits': '12', 'decimal_places': '2'}) }, 'offer.conditionaloffer': { 'Meta': {'ordering': "['-priority']", 'object_name': 'ConditionalOffer'}, 'benefit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Benefit']"}), 'condition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Condition']"}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'max_global_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}), 'num_orders': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'offer_type': ('django.db.models.fields.CharField', [], {'default': "'Site'", 'max_length': '128'}), 'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'redirect_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'unique': 'True', 'null': 'True'}), 'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'total_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}) }, 'offer.range': { 'Meta': {'object_name': 'Range'}, 'classes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'classes'", 'blank': 'True', 'to': "orm['catalogue.ProductClass']"}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'excluded_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'excludes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'included_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Category']"}), 'included_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}), 'includes_all_products': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}) } } complete_apps = ['offer']
gpl-2.0
NCI-Cloud/horizon
openstack_dashboard/local/dashboards/project_nci/instances/workflows/create_instance.py
1
41960
# openstack_dashboard.local.dashboards.project_nci.instances.workflows.create_instance # # Copyright (c) 2015, NCI, Australian National University. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy import itertools import json import logging import netaddr import operator import os.path #import pdb ## DEBUG import re import socket import time import types from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from django.conf import settings from django.utils.translation import ugettext_lazy as _ from django.views.decorators.debug import sensitive_variables from django.template.defaultfilters import filesizeformat from horizon import exceptions from horizon import forms from horizon import messages from horizon import workflows from openstack_dashboard import api from openstack_dashboard.dashboards.project.instances.workflows import create_instance as base_mod from openstack_dashboard.local.nci import crypto as ncicrypto from openstack_dashboard.local.nci import utils as nciutils from openstack_dashboard.local.nci.constants import * LOG = logging.getLogger(__name__) class SetInstanceDetailsAction(base_mod.SetInstanceDetailsAction): Meta = nciutils.subclass_meta_type(base_mod.SetInstanceDetailsAction) def populate_image_id_choices(self, request, context): choices = super(SetInstanceDetailsAction, self).populate_image_id_choices(request, context) # Find the latest VL image for each unique series tag and add an # alias item to the top of the images list with a more friendly name # so that the user doesn't have to hunt through the entire list # looking for the correct image to use. self.vl_tags = {} for id, image in choices: if not id: continue parts = image.name.split("-") if parts[0] == "vl": if not image.is_public: LOG.debug("Ignoring non-public VL image: {0}".format(image.name)) continue # VL images have the following name format: # vl-<tag_base>[-<tag_variant>-...]-<timestamp> if len(parts) < 3: LOG.warning("Invalid VL image name format: {0}".format(image.name)) continue tag = "-".join(parts[1:-1]) if re.match(r"2[0-9]{7}", parts[-1]): image._vl_ts = parts[-1] else: LOG.warning("Invalid or missing timestamp in VL image name: {0}".format(image.name)) continue if (tag not in self.vl_tags) or (image._vl_ts > self.vl_tags[tag]._vl_ts): self.vl_tags[tag] = image def clone_image(tag): if "-" in tag: (base, variant) = tag.split("-", 1) else: base = tag variant = "" if base.startswith("centos"): title = "CentOS" base = base[6:] elif base.startswith("ubuntu"): title = "Ubuntu" base = base[6:] else: title = tag base = "" variant = "" if base: title += " " + base if variant: title += " " + variant image = copy.copy(self.vl_tags[tag]) image._real_id = image.id image.id = "vltag:" + tag image.name = title self.vl_tags[tag] = image return image if self.vl_tags: choices.insert(1, ("---all", "----- All Images -----")) for tag in reversed(sorted(self.vl_tags.keys())): image = clone_image(tag) choices.insert(1, (image.id, image)) choices.insert(1, ("---vl", "----- VL Images -----")) return choices def clean_name(self): if hasattr(super(SetInstanceDetailsAction, self), "clean_name"): val = super(SetInstanceDetailsAction, self).clean_name() else: val = self.cleaned_data.get("name") val = val.strip() if val and ("." in val): valid_fqdn = r"^([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)*[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?$" if not re.search(valid_fqdn, val): msg = _("The specified FQDN doesn't satisfy the requirements of a valid DNS hostname.") raise forms.ValidationError(msg) return val def clean_image_id(self): if hasattr(super(SetInstanceDetailsAction, self), "clean_image_id"): val = super(SetInstanceDetailsAction, self).clean_image_id() else: val = self.cleaned_data.get("image_id") if val: if val.startswith("---"): val = "" elif val.startswith("vltag:"): # Convert the VL image tag back into the real image ID. tag = val[6:] if tag not in self.vl_tags: msg = _("Image tag doesn't exist") raise forms.ValidationError(msg) val = self.vl_tags[tag]._real_id return val def get_help_text(self): saved = self._images_cache try: # Add our VL image aliases to the image cache temporarily so # that they are included in the list passed to "initWithImages()" # in "horizon/static/horizon/js/horizon.quota.js" (via the # "_flavors_and_quotas.html" template). The result will be # that any flavours which are too small will be disabled when # a given image alias is selected in the drop down. self._images_cache["public_images"].extend(self.vl_tags.values()) return super(SetInstanceDetailsAction, self).get_help_text() finally: self._images_cache = saved class SetInstanceDetails(base_mod.SetInstanceDetails): action_class = SetInstanceDetailsAction class SetAccessControlsAction(base_mod.SetAccessControlsAction): Meta = nciutils.subclass_meta_type(base_mod.SetAccessControlsAction) def __init__(self, request, context, *args, **kwargs): super(SetAccessControlsAction, self).__init__(request, context, *args, **kwargs) # Remove the security groups field since they aren't functional on # our new cloud. del self.fields["groups"] def populate_groups_choices(self, request, context): return [] class SetAccessControls(base_mod.SetAccessControls): action_class = SetAccessControlsAction class FixedIPMultiWidget(forms.MultiWidget): def __init__(self, choices, attrs=None): sub_widgets = ( forms.Select(choices=choices, attrs=attrs), forms.TextInput(attrs=attrs), ) super(FixedIPMultiWidget, self).__init__(sub_widgets, attrs) def has_choice(self, value): for x in self.widgets[0].choices: if isinstance(x[1], (list, tuple)): for y in x[1]: if y[0] == value: return True elif x[0] == value: return True return False def decompress(self, value): if value is not None: if self.has_choice(value): return [value, None] else: return ["manual", value] else: return [None, None] def value_from_datadict(self, data, files, name): v = super(FixedIPMultiWidget, self).value_from_datadict(data, files, name) if v[0] == "manual": return v[1].strip() else: return v[0] # NB: We aren't subclassing the upstream implementation of this action. class SetNetworkAction(workflows.Action): Meta = nciutils.subclass_meta_type(base_mod.SetNetworkAction) @staticmethod def user_has_ext_net_priv(request): return (request.user.is_superuser or request.user.has_perms([settings.NCI_EXTERNAL_NET_PERM])) def __init__(self, request, context, *args, **kwargs): super(SetNetworkAction, self).__init__(request, context, *args, **kwargs) # If the user has access to the external network then retrieve any # fixed public IP allocations defined for this tenant. all_fixed_pub_ips = netaddr.IPSet() self.fixed_pub_ips_pool = False if self.user_has_ext_net_priv(request): try: if request.user.project_name in settings.NCI_FIXED_PUBLIC_IPS: for cidr in settings.NCI_FIXED_PUBLIC_IPS[request.user.project_name]: if cidr == "pool": self.fixed_pub_ips_pool = True else: all_fixed_pub_ips.add(netaddr.IPNetwork(cidr)) elif request.user.project_name == "admin": self.fixed_pub_ips_pool = True except (netaddr.AddrFormatError, ValueError) as e: LOG.exception("Error parsing fixed public IP list: {0}".format(e)) messages.error(request, str(e)) msg = _("Failed to load fixed public IP configuration.") messages.warning(request, msg) all_fixed_pub_ips = netaddr.IPSet() self.fixed_pub_ips_pool = False self.fixed_pub_ips_enabled = (bool(all_fixed_pub_ips) or self.fixed_pub_ips_pool) # Build the list of network choices. networks_list = self.get_networks(request) self.networks = dict([(x.id, x) for x in networks_list]) network_choices = [(x.id, x.name) for x in sorted(networks_list, key=operator.attrgetter('name'))] network_choices.insert(0, ("", "-- Unassigned --")) # Build the fixed and floating IP choice lists. self.pub_ips = self.get_public_ips(request, all_fixed_pub_ips) fixed_ip_choices = [ ("auto", "Automatic"), ("manual", "Manual"), ] if self.fixed_pub_ips_enabled: ext_fixed_ip_choices = [(str(x), str(x)) for x in self.pub_ips["fixed"]] if self.fixed_pub_ips_pool: ext_fixed_ip_choices.append(["ext_pool", "Global Allocation Pool"]) grp_title = "External" if not ext_fixed_ip_choices: grp_title += " (none available)" fixed_ip_choices.append((grp_title, ext_fixed_ip_choices)) else: ext_fixed_ip_choices = [] floating_ip_choices = [(x.id, x.ip) for x in sorted(self.pub_ips["float"].itervalues(), key=lambda x: netaddr.IPAddress(x.ip))] floating_ip_choices.insert(0, ("", "-- None --")) # Create the form fields for each network interface. self.intf_limit = settings.NCI_VM_NETWORK_INTF_LIMIT if not settings.NCI_DUPLICATE_VM_NETWORK_INTF: self.intf_limit = max(1, min(self.intf_limit, len(networks_list))) for i in range(0, self.intf_limit): self.fields["eth{0:d}_network".format(i)] = forms.ChoiceField( label=_("Network"), required=(i == 0), choices=network_choices, initial="", help_text=_("The network that this interface should be attached to.")) self.fields["eth{0:d}_fixed_ip".format(i)] = forms.CharField( widget=FixedIPMultiWidget(fixed_ip_choices), label=_("Fixed IP"), required=True, initial="auto", help_text=_("The fixed IP address to assign to this interface.")) self.fields["eth{0:d}_floating_ip".format(i)] = forms.ChoiceField( label=_("Floating Public IP"), required=False, choices=floating_ip_choices, initial="", help_text=_("A floating IP address to associate with this interface.")) # Select reasonable defaults if there is an obvious choice. We only # consider external networks as an option if there aren't any floating # IPs available. external_net_ids = set([x for x, y in self.networks.iteritems() if y.get("router:external", False)]) private_net_ids = set(self.networks.keys()) - external_net_ids default_priv_net = None if len(private_net_ids) == 1: default_priv_net = iter(private_net_ids).next() elif private_net_ids: # As a convention, when we setup a new tenant we create a network # with the same name as the tenant. search = [request.user.project_name] if request.user.project_name in ["admin", "z00"]: search.append("internal") matches = [x for x in private_net_ids if self.networks[x].name in search] if len(matches) == 1: default_priv_net = matches[0] if len(floating_ip_choices) > 1: if default_priv_net: self.fields["eth0_network"].initial = default_priv_net self.fields["eth0_floating_ip"].initial = floating_ip_choices[1][0] elif ext_fixed_ip_choices: if len(external_net_ids) == 1: self.fields["eth0_network"].initial = iter(external_net_ids).next() self.fields["eth0_fixed_ip"].initial = ext_fixed_ip_choices[0][0] if default_priv_net: assert self.intf_limit > 1 self.fields["eth1_network"].initial = default_priv_net elif default_priv_net: self.fields["eth0_network"].initial = default_priv_net # A list of external network IDs is needed for the client side code. self.external_nets = ";".join(external_net_ids) def get_networks(self, request): networks = [] try: networks = api.neutron.network_list_for_tenant(request, request.user.project_id) except: exceptions.handle(request) msg = _("Unable to retrieve available networks.") messages.warning(request, msg) if not self.fixed_pub_ips_enabled: LOG.debug("Excluding external networks") networks = filter(lambda x: not x.get("router:external", False), networks) # TODO: Workaround until we can unshare the "internal" network. if request.user.project_name not in ["admin", "z00"]: networks = filter(lambda x: x.get("router:external", False) or not x.shared, networks) any_ext_nets = False for net in networks: # Make sure the "name" attribute is defined. net.set_id_as_name_if_empty() any_ext_nets = any_ext_nets or net.get("router:external", False) if self.fixed_pub_ips_enabled and not any_ext_nets: LOG.debug("No external networks found - disabling fixed public IPs") self.fixed_pub_ips_enabled = False return networks def get_public_ips(self, request, all_fixed_pub_ips): ips = {} try: # Select any unassigned floating IPs. floats = api.network.tenant_floating_ip_list(request) ips["float"] = dict([(x.id, x) for x in floats if not x.port_id]) if self.fixed_pub_ips_enabled and all_fixed_pub_ips: # Take note of all floating IPs (including assigned) since they # can't be used as a fixed IP given that a port already exists. used_ips = [x.ip for x in floats] # Locate any fixed IPs already assigned to an external network # port so that we can exclude them from the list. for net_id, net in self.networks.iteritems(): if not net.get("router:external", False): continue LOG.debug("Getting all ports for network: {0}".format(net_id)) ports = api.neutron.port_list(request, tenant_id=request.user.project_id, network_id=net_id) for port in ports: for fip in port.fixed_ips: if fip.get("ip_address"): used_ips.append(fip["ip_address"]) # Select fixed IPs allocated to the tenant that aren't in use. ips["fixed"] = all_fixed_pub_ips - netaddr.IPSet(used_ips) else: ips["fixed"] = [] except: exceptions.handle(request) msg = _("Failed to determine available public IPs.") messages.warning(request, msg) ips["float"] = {} ips["fixed"] = [] return ips def clean(self): data = super(SetNetworkAction, self).clean() nics = [] used_ips = {"_float_": set()} try: for i in range(0, self.intf_limit): nic = {} field_id = "eth{0:d}_network".format(i) net_id = data.get(field_id) if net_id: used_ips.setdefault(net_id, set()) nic["network_id"] = net_id if i != len(nics): msg = _("Network interfaces must be assigned consecutively.") self._errors[field_id] = self.error_class([msg]) elif (not settings.NCI_DUPLICATE_VM_NETWORK_INTF) and (net_id in [n["network_id"] for n in nics]): msg = _("Network is assigned to another interface.") self._errors[field_id] = self.error_class([msg]) # Field level validation will have already checked that the # network ID exists by virtue of being a valid choice. assert net_id in self.networks external = self.networks[net_id].get("router:external", False) else: external = False fixed_subnet_id = None field_id = "eth{0:d}_fixed_ip".format(i) fixed_ip = data.get(field_id) if not fixed_ip: # Value could only be undefined if field level validation # failed since "required=True" for this field. assert self._errors.get(field_id) elif fixed_ip == "auto": if external: msg = _("Selected option is not valid on this network.") self._errors[field_id] = self.error_class([msg]) elif not net_id: msg = _("No network selected.") self._errors[field_id] = self.error_class([msg]) elif fixed_ip == "ext_pool": if external: # Choice won't be available unless global allocation pool # is enabled. assert self.fixed_pub_ips_pool else: msg = _("Selected option is not available on this network.") self._errors[field_id] = self.error_class([msg]) else: try: fixed_ip = netaddr.IPAddress(fixed_ip) except (netaddr.AddrFormatError, ValueError) as e: msg = _("Not a valid IP address format.") self._errors[field_id] = self.error_class([msg]) else: if external: assert self.fixed_pub_ips_enabled if fixed_ip not in self.pub_ips["fixed"]: msg = _("\"{0}\" is not available on this network.".format(fixed_ip)) self._errors[field_id] = self.error_class([msg]) elif fixed_ip in used_ips[net_id]: msg = _("IP address is assigned to another interface.") self._errors[field_id] = self.error_class([msg]) else: nic["fixed_ip"] = fixed_ip used_ips[net_id].add(fixed_ip) else: # Verify that there is a subnet for the selected network # which contains the fixed IP address. subnet_cidr = None for subnet in self.networks[net_id].subnets: subnet_cidr = netaddr.IPNetwork(subnet.cidr) if fixed_ip in subnet_cidr: break else: subnet_cidr = None if not subnet_cidr: msg = _("IP address must be in a subnet range for the selected network.") self._errors[field_id] = self.error_class([msg]) elif fixed_ip == subnet_cidr.network: msg = _("Network address is reserved.") self._errors[field_id] = self.error_class([msg]) elif fixed_ip == subnet_cidr.broadcast: msg = _("Broadcast address is reserved.") self._errors[field_id] = self.error_class([msg]) elif subnet.get("gateway_ip") and (fixed_ip == netaddr.IPAddress(subnet.gateway_ip)): msg = _("IP address is reserved for the subnet gateway.") self._errors[field_id] = self.error_class([msg]) else: fixed_subnet_id = subnet.id # Is the IP address already assigned to a port on # this network? LOG.debug("Getting all ports for network: {0}".format(net_id)) ports = api.neutron.port_list(self.request, tenant_id=self.request.user.project_id, network_id=net_id) found = False for port in ports: for fip in port.fixed_ips: if fip.get("ip_address") and (fixed_ip == netaddr.IPAddress(fip["ip_address"])): found = True break if found: msg = _("IP address is already in use.") self._errors[field_id] = self.error_class([msg]) elif fixed_ip in used_ips[net_id]: msg = _("IP address is assigned to another interface.") self._errors[field_id] = self.error_class([msg]) else: nic["fixed_ip"] = fixed_ip used_ips[net_id].add(fixed_ip) field_id = "eth{0:d}_floating_ip".format(i) floating_ip = data.get(field_id) if floating_ip: assert floating_ip in self.pub_ips["float"] if not net_id: msg = _("No network selected.") self._errors[field_id] = self.error_class([msg]) elif external: msg = _("Floating IPs cannot be used on an external network.") self._errors[field_id] = self.error_class([msg]) elif floating_ip in used_ips["_float_"]: msg = _("IP address is assigned to another interface.") self._errors[field_id] = self.error_class([msg]) else: float_net_id = self.pub_ips["float"][floating_ip].floating_network_id LOG.debug("Looking for a route between the networks {0} and {1}".format(net_id, float_net_id)) ports = api.neutron.port_list(self.request, network_id=net_id, device_owner="network:router_interface") found = False for port in ports: if fixed_subnet_id and (fixed_subnet_id not in [x.get("subnet_id") for x in port.fixed_ips]): LOG.debug("Ignoring port {0} due to subnet mismatch".format(port.id)) continue router = api.neutron.router_get(self.request, port.device_id) if router.get("external_gateway_info", {}).get("network_id") == float_net_id: LOG.debug("Found path to floating IP network via router: {0}".format(router.id)) found = True break if not found: if self.networks[net_id].shared: # The Neutron API doesn't return interface ports for routers # owned by another tenant, even if that network is shared # with us. So we just have to accept the user's request. LOG.warning("Unable to locate router for floating IP on shared network: {0}".format(net_id)) else: msg = _("No router interface found that connects the selected network with the floating IP.") self._errors[field_id] = self.error_class([msg]) else: nic["floating_ip"] = floating_ip used_ips["_float_"].add(floating_ip) if "network_id" in nic: nics.append(nic) except: exceptions.handle(self.request) msg = _("Validation failed with an unexpected error.") raise forms.ValidationError(msg) if not nics: msg = _("At least one network interface must be assigned.") raise forms.ValidationError(msg) if settings.NCI_DUPLICATE_VM_NETWORK_INTF: # See "server_create_hook_func()" for why this check is made. float_nets = set([n["network_id"] for n in nics if "floating_ip" in n]) for net_id in float_nets: if len(filter(lambda x: x["network_id"] == net_id, nics)) > 1: msg = _("Networks with a floating IP specified can only be assigned to one interface.") raise forms.ValidationError(msg) data["nics"] = nics return data # NB: We aren't subclassing the upstream implementation of this step. class SetNetwork(workflows.Step): action_class = SetNetworkAction contributes = ("nics", "network_id") template_name = "project/instances/../instances_nci/_update_networks.html" def contribute(self, data, context): context = super(SetNetwork, self).contribute(data, context) if context["nics"]: # Emulate the network list set in the upstream implementation. context["network_id"] = [n["network_id"] for n in context["nics"]] return context class BootstrapConfigAction(workflows.Action): puppet_action = forms.ChoiceField( label=_("Puppet Action"), required=True, choices=[x for x in PUPPET_ACTION_CHOICES if x[0] == "none"], initial="none", help_text=_("Puppet command to execute.")) puppet_env = forms.RegexField( label=_("Puppet Environment"), required=False, regex=REPO_BRANCH_REGEX, help_text=_("Puppet configuration environment (or equivalent branch name) to deploy.")) install_updates = forms.ChoiceField( label=_("Install Updates"), required=True, choices=[ ("reboot", _("Yes (reboot if required)")), ("yes", _("Yes (don't reboot)")), ("no", _("No")), ], initial="reboot", help_text=_("Whether to install system updates. (Recommended)")) class Meta(object): name = _("Initial Boot") help_text_template = ("project/instances/../instances_nci/_bootstrap_help.html") def __init__(self, request, context, *args, **kwargs): super(BootstrapConfigAction, self).__init__(request, context, *args, **kwargs) # Check if the project's VL config exists. We only assign a default # Puppet action if it does. This will allow projects not using the # VL environment to still be able to launch VMs without having to # change the Puppet action first. is_vl = False try: container = nci_private_container_name(request) config_obj_name = nci_vl_project_config_name() is_vl = api.swift.swift_object_exists(request, container, config_obj_name) except: exceptions.handle(request) if is_vl: obj = None try: obj = api.swift.swift_get_object(request, container, config_obj_name, resp_chunk_size=None) except: exceptions.handle(request) msg = _("VL project configuration not found.") messages.warning(request, msg) if obj: project_cfg = None try: project_cfg = json.loads(obj.data) except ValueError as e: LOG.exception("Error parsing project configuration: {0}".format(e)) messages.error(request, str(e)) msg = _("VL project configuration is corrupt.") messages.warning(request, msg) if project_cfg: self.fields["puppet_env"].initial = project_cfg.get("puppet_env", "") if project_cfg.get("repo_key") and project_cfg.get("eyaml_key") and project_cfg.get("eyaml_cert"): self.fields["puppet_action"].choices = PUPPET_ACTION_CHOICES self.fields["puppet_action"].initial = "apply" default_action = project_cfg.get("puppet_action", "auto") if default_action != "auto": avail_actions = [x[0] for x in self.fields["puppet_action"].choices] if default_action in avail_actions: self.fields["puppet_action"].initial = default_action def clean(self): data = super(BootstrapConfigAction, self).clean() if (data.get("puppet_action", "none") != "none") and not data.get("puppet_env"): msg = _("An environment name is required for the selected Puppet action.") raise forms.ValidationError(msg) return data class BootstrapConfig(workflows.Step): action_class = BootstrapConfigAction contributes = ("puppet_action", "puppet_env", "install_updates") template_name = "project/instances/../instances_nci/_bootstrap_step.html" def server_create_hook_func(request, context, floats): def _impl(*args, **kwargs): float_nets = {} kwargs["nics"] = [] nics = context["nics"] or [] for n in nics: # https://github.com/openstack/python-novaclient/blob/2.20.0/novaclient/v1_1/servers.py#L528 nic = {"net-id": n["network_id"]} ip = n.get("fixed_ip") if ip: if ip.version == 6: nic["v6-fixed-ip"] = str(ip) else: assert ip.version == 4 nic["v4-fixed-ip"] = str(ip) kwargs["nics"].append(nic) if "floating_ip" in n: assert n["network_id"] not in float_nets float_nets[n["network_id"]] = n["floating_ip"] srv = api.nova.server_create(*args, **kwargs) if float_nets: # Find the ports created for the new instance which we need to # associate each floating IP with. We have to wait until the # ports are created by Neutron. Note that the only unique # information we have to identify which port should be paired # with each floating IP is the network ID. Hence we don't # support more than one interface connected to the same network # when floating IPs are specified. try: max_attempts = 15 attempt = 0 while attempt < max_attempts: attempt += 1 LOG.debug("Fetching network ports for instance: {0}".format(srv.id)) ports = api.neutron.port_list(request, device_id=srv.id) for p in ports: LOG.debug("Found port: id={0}; owner={1}; network={2}".format(*[p.get(x) for x in ["id", "device_owner", "network_id"]])) if p.get("device_owner", "").startswith("compute:") and (p.get("network_id") in float_nets): for t in api.network.floating_ip_target_list_by_instance(request, srv.id): LOG.debug("Got floating IP target: {0}".format(t)) if t.startswith(p.id): float_id = float_nets[p.network_id] api.network.floating_ip_associate(request, float_id, t) del float_nets[p.network_id] msg = _("Floating IP {0} associated with new instance.".format(floats[float_id].ip)) messages.info(request, msg) break if not float_nets: # All floating IPs have now been assigned. srv = api.nova.server_get(request, srv.id) break status = api.nova.server_get(request, srv.id).status.lower() if status == "active": if max_attempts != 2: LOG.debug("VM state has become active") max_attempts = 2 attempt = 0 elif status != "build": LOG.debug("Aborting wait loop due to server status: {0}".format(status)) break LOG.debug("Waiting for network port allocation") time.sleep(2) except: exceptions.handle(request) for f in float_nets.itervalues(): msg = _("Failed to associate floating IP {0} with new instance.".format(floats[f].ip)) messages.warning(request, msg) return srv return _impl def step_generator(): for step in base_mod.LaunchInstance.default_steps: if step == base_mod.SetInstanceDetails: yield SetInstanceDetails elif step == base_mod.SetAccessControls: yield SetAccessControls elif step == base_mod.SetNetwork: yield SetNetwork elif step == base_mod.PostCreationStep: # Replace the "Post-Creation" tab with our bootstrap parameters. yield BootstrapConfig else: yield step class NCILaunchInstance(base_mod.LaunchInstance): default_steps = [x for x in step_generator()] @sensitive_variables("context") def validate(self, context): if context["count"] > 1: keys = set(itertools.chain.from_iterable(context["nics"])) if filter(lambda k: k.endswith("_ip"), keys): msg = _("Multiple instances cannot be launched with the same IP address.") self.add_error_to_step(msg, SetNetworkAction.slug) # Missing from "add_error_to_step()"... self.get_step(SetNetworkAction.slug).has_errors = True return False return True @sensitive_variables("context") def handle(self, request, context): cloud_cfg = {} if context["puppet_action"] != "none": # Load the project's VL configuration. try: obj = api.swift.swift_get_object(request, nci_private_container_name(request), nci_vl_project_config_name(), resp_chunk_size=None) except: exceptions.handle(request) msg = _("VL project configuration not found.") messages.error(request, msg) return False try: project_cfg = json.loads(obj.data) except ValueError as e: LOG.exception("Error parsing project configuration: {0}".format(e)) messages.error(request, str(e)) msg = _("VL project configuration is corrupt.") messages.error(request, msg) return False # Add the cloud-config parameters for the "nci.puppet" module. puppet_cfg = cloud_cfg.setdefault("nci", {}).setdefault("puppet", {}) puppet_cfg["action"] = context["puppet_action"] puppet_cfg["environment"] = context["puppet_env"] repo_cfg = puppet_cfg.setdefault("repo", {}) repo_cfg["path"] = project_cfg.get("repo_path", "") eyaml_cfg = puppet_cfg.setdefault("eyaml", {}) try: msg = _("Failed to initialise crypto stash.") stash = ncicrypto.CryptoStash(request, project_cfg.get("stash") or {}) msg = _("Failed to load deployment key.") key = stash.load_private_key(project_cfg.get("repo_key")) repo_cfg["key"] = key.cloud_config_dict() msg = _("Failed to load eyaml key.") key = stash.load_private_key(project_cfg.get("eyaml_key")) eyaml_cfg["key"] = key.cloud_config_dict() msg = _("Failed to load eyaml certificate.") cert = stash.load_x509_cert(project_cfg.get("eyaml_cert")) eyaml_cfg["cert"] = cert.cloud_config_dict() except: exceptions.handle(request) messages.error(request, msg) return False cloud_cfg["package_upgrade"] = (context["install_updates"] != "no") cloud_cfg["package_reboot_if_required"] = (context["install_updates"] == "reboot") if "." in context["name"]: cloud_cfg["fqdn"] = context["name"] # Construct the "user data" to inject into the VM for "cloud-init". user_data = MIMEMultipart() try: # Note that JSON is also valid YAML: # http://yaml.org/spec/1.2/spec.html#id2759572 part = MIMEText(json.dumps(cloud_cfg), "cloud-config") user_data.attach(part) except (ValueError, TypeError) as e: LOG.exception("Error serialising userdata: {0}".format(e)) messages.error(request, str(e)) msg = _("Failed to construct userdata for VM instance.") messages.error(request, msg) return False context["script_data"] = user_data.as_string() # We could copy the contents of the base class function here and make # the changes that we need. But that would create a maintenance # headache since for each OpenStack update we'd have to check whether # anything in the original implementation changed and replicate it # here. Instead, we'll rebind the "api.nova.server_create()" function # in the namespace of the base class function to call our hook closure # instead. api_proxy = nciutils.AttributeProxy(base_mod.api) api_proxy.nova = nciutils.AttributeProxy(base_mod.api.nova) floats = self.get_step(SetNetworkAction.slug).action.pub_ips["float"] api_proxy.nova.server_create = server_create_hook_func(request, context, floats) # We have to strip off any function decorators, otherwise the rebind # won't be visible inside the function. Whilst this does rely on some # Python internals, the chances of those changing is significantly # lower especially since RedHat doesn't change the Python version # in a major release series. base_func = nciutils.undecorate(super(NCILaunchInstance, self).handle.__func__, "handle") g_dict = base_func.__globals__ g_dict.update({"api": api_proxy}) return types.FunctionType(base_func.__code__, g_dict)(self, request, context) # vim:ts=4 et sw=4 sts=4:
apache-2.0
beiko-lab/gengis
bin/Lib/_MozillaCookieJar.py
62
5958
"""Mozilla / Netscape cookie loading / saving.""" import re, time from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError, Cookie, MISSING_FILENAME_TEXT) class MozillaCookieJar(FileCookieJar): """ WARNING: you may want to backup your browser's cookies file if you use this class to save cookies. I *think* it works, but there have been bugs in the past! This class differs from CookieJar only in the format it uses to save and load cookies to and from a file. This class uses the Mozilla/Netscape `cookies.txt' format. lynx uses this file format, too. Don't expect cookies saved while the browser is running to be noticed by the browser (in fact, Mozilla on unix will overwrite your saved cookies if you change them on disk while it's running; on Windows, you probably can't save at all while the browser is running). Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to Netscape cookies on saving. In particular, the cookie version and port number information is lost, together with information about whether or not Path, Port and Discard were specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the domain as set in the HTTP header started with a dot (yes, I'm aware some domains in Netscape files start with a dot and some don't -- trust me, you really don't want to know any more about this). Note that though Mozilla and Netscape use the same format, they use slightly different headers. The class saves cookies using the Netscape header by default (Mozilla can cope with that). """ magic_re = "#( Netscape)? HTTP Cookie File" header = """\ # Netscape HTTP Cookie File # http://www.netscape.com/newsref/std/cookie_spec.html # This is a generated file! Do not edit. """ def _really_load(self, f, filename, ignore_discard, ignore_expires): now = time.time() magic = f.readline() if not re.search(self.magic_re, magic): f.close() raise LoadError( "%r does not look like a Netscape format cookies file" % filename) try: while 1: line = f.readline() if line == "": break # last field may be absent, so keep any trailing tab if line.endswith("\n"): line = line[:-1] # skip comments and blank lines XXX what is $ for? if (line.strip().startswith(("#", "$")) or line.strip() == ""): continue domain, domain_specified, path, secure, expires, name, value = \ line.split("\t") secure = (secure == "TRUE") domain_specified = (domain_specified == "TRUE") if name == "": # cookies.txt regards 'Set-Cookie: foo' as a cookie # with no name, whereas cookielib regards it as a # cookie with no value. name = value value = None initial_dot = domain.startswith(".") assert domain_specified == initial_dot discard = False if expires == "": expires = None discard = True # assume path_specified is false c = Cookie(0, name, value, None, False, domain, domain_specified, initial_dot, path, False, secure, expires, discard, None, None, {}) if not ignore_discard and c.discard: continue if not ignore_expires and c.is_expired(now): continue self.set_cookie(c) except IOError: raise except Exception: _warn_unhandled_exception() raise LoadError("invalid Netscape format cookies file %r: %r" % (filename, line)) def save(self, filename=None, ignore_discard=False, ignore_expires=False): if filename is None: if self.filename is not None: filename = self.filename else: raise ValueError(MISSING_FILENAME_TEXT) f = open(filename, "w") try: f.write(self.header) now = time.time() for cookie in self: if not ignore_discard and cookie.discard: continue if not ignore_expires and cookie.is_expired(now): continue if cookie.secure: secure = "TRUE" else: secure = "FALSE" if cookie.domain.startswith("."): initial_dot = "TRUE" else: initial_dot = "FALSE" if cookie.expires is not None: expires = str(cookie.expires) else: expires = "" if cookie.value is None: # cookies.txt regards 'Set-Cookie: foo' as a cookie # with no name, whereas cookielib regards it as a # cookie with no value. name = "" value = cookie.name else: name = cookie.name value = cookie.value f.write( "\t".join([cookie.domain, initial_dot, cookie.path, secure, expires, name, value])+ "\n") finally: f.close()
gpl-3.0
rohlandm/servo
tests/wpt/harness/wptrunner/wptmanifest/parser.py
133
23444
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. #default_value:foo #include: other.manifest # #[test_name.js] # expected: ERROR # # [subtest 1] # expected: # os == win: FAIL #This is a comment # PASS # # TODO: keep comments in the tree import types from cStringIO import StringIO from node import * class ParseError(Exception): def __init__(self, filename, line, detail): self.line = line self.filename = filename self.detail = detail self.message = "%s: %s line %s" % (self.detail, self.filename, self.line) Exception.__init__(self, self.message) eol = object group_start = object group_end = object digits = "0123456789" open_parens = "[(" close_parens = "])" parens = open_parens + close_parens operator_chars = "=!" unary_operators = ["not"] binary_operators = ["==", "!=", "and", "or"] operators = ["==", "!=", "not", "and", "or"] atoms = {"True": True, "False": False, "Reset": object()} def decode(byte_str): return byte_str.decode("utf8") def precedence(operator_node): return len(operators) - operators.index(operator_node.data) class TokenTypes(object): def __init__(self): for type in ["group_start", "group_end", "paren", "list_start", "list_end", "separator", "ident", "string", "number", "atom", "eof"]: setattr(self, type, type) token_types = TokenTypes() class Tokenizer(object): def __init__(self): self.reset() def reset(self): self.indent_levels = [0] self.state = self.line_start_state self.next_state = self.data_line_state self.line_number = 0 def tokenize(self, stream): self.reset() if type(stream) in types.StringTypes: stream = StringIO(stream) if not hasattr(stream, "name"): self.filename = "" else: self.filename = stream.name self.next_line_state = self.line_start_state for i, line in enumerate(stream): self.state = self.next_line_state assert self.state is not None states = [] self.next_line_state = None self.line_number = i + 1 self.index = 0 self.line = line.rstrip() while self.state != self.eol_state: states.append(self.state) tokens = self.state() if tokens: for token in tokens: yield token self.state() while True: yield (token_types.eof, None) def char(self): if self.index == len(self.line): return eol return self.line[self.index] def consume(self): if self.index < len(self.line): self.index += 1 def peek(self, length): return self.line[self.index:self.index + length] def skip_whitespace(self): while self.char() == " ": self.consume() def eol_state(self): if self.next_line_state is None: self.next_line_state = self.line_start_state def line_start_state(self): self.skip_whitespace() if self.char() == eol: self.state = self.eol_state return if self.index > self.indent_levels[-1]: self.indent_levels.append(self.index) yield (token_types.group_start, None) else: while self.index < self.indent_levels[-1]: self.indent_levels.pop() yield (token_types.group_end, None) # This is terrible; if we were parsing an expression # then the next_state will be expr_or_value but when we deindent # it must always be a heading or key next so we go back to data_line_state self.next_state = self.data_line_state if self.index != self.indent_levels[-1]: raise ParseError(self.filename, self.line_number, "Unexpected indent") self.state = self.next_state def data_line_state(self): if self.char() == "[": yield (token_types.paren, self.char()) self.consume() self.state = self.heading_state else: self.state = self.key_state def heading_state(self): rv = "" while True: c = self.char() if c == "\\": rv += self.consume_escape() elif c == "]": break elif c == eol: raise ParseError(self.filename, self.line_number, "EOL in heading") else: rv += c self.consume() yield (token_types.string, decode(rv)) yield (token_types.paren, "]") self.consume() self.state = self.line_end_state self.next_state = self.data_line_state def key_state(self): rv = "" while True: c = self.char() if c == " ": self.skip_whitespace() if self.char() != ":": raise ParseError(self.filename, self.line_number, "Space in key name") break elif c == ":": break elif c == eol: raise ParseError(self.filename, self.line_number, "EOL in key name (missing ':'?)") elif c == "\\": rv += self.consume_escape() else: rv += c self.consume() yield (token_types.string, decode(rv)) yield (token_types.separator, ":") self.consume() self.state = self.after_key_state def after_key_state(self): self.skip_whitespace() c = self.char() if c == "#": self.next_state = self.expr_or_value_state self.state = self.comment_state elif c == eol: self.next_state = self.expr_or_value_state self.state = self.eol_state elif c == "[": self.state = self.list_start_state else: self.state = self.value_state def list_start_state(self): yield (token_types.list_start, "[") self.consume() self.state = self.list_value_start_state def list_value_start_state(self): self.skip_whitespace() if self.char() == "]": self.state = self.list_end_state elif self.char() in ("'", '"'): quote_char = self.char() self.consume() yield (token_types.string, self.consume_string(quote_char)) self.skip_whitespace() if self.char() == "]": self.state = self.list_end_state elif self.char() != ",": raise ParseError(self.filename, self.line_number, "Junk after quoted string") self.consume() elif self.char() == "#": self.state = self.comment_state self.next_line_state = self.list_value_start_state elif self.char() == eol: self.next_line_state = self.list_value_start_state self.state = self.eol_state elif self.char() == ",": raise ParseError(self.filename, self.line_number, "List item started with separator") elif self.char() == "@": self.state = self.list_value_atom_state else: self.state = self.list_value_state def list_value_state(self): rv = "" spaces = 0 while True: c = self.char() if c == "\\": escape = self.consume_escape() rv += escape elif c == eol: raise ParseError(self.filename, self.line_number, "EOL in list value") elif c == "#": raise ParseError(self.filename, self.line_number, "EOL in list value (comment)") elif c == ",": self.state = self.list_value_start_state self.consume() break elif c == " ": spaces += 1 self.consume() elif c == "]": self.state = self.list_end_state self.consume() break else: rv += " " * spaces spaces = 0 rv += c self.consume() if rv: yield (token_types.string, decode(rv)) def list_value_atom_state(self): self.consume() for _, value in self.list_value_state(): yield token_types.atom, value def list_end_state(self): self.consume() yield (token_types.list_end, "]") self.state = self.line_end_state def value_state(self): self.skip_whitespace() if self.char() in ("'", '"'): quote_char = self.char() self.consume() yield (token_types.string, self.consume_string(quote_char)) if self.char() == "#": self.state = self.comment_state else: self.state = self.line_end_state elif self.char() == "@": self.consume() for _, value in self.value_inner_state(): yield token_types.atom, value else: self.state = self.value_inner_state def value_inner_state(self): rv = "" spaces = 0 while True: c = self.char() if c == "\\": rv += self.consume_escape() elif c == "#": self.state = self.comment_state break elif c == " ": # prevent whitespace before comments from being included in the value spaces += 1 self.consume() elif c == eol: self.state = self.line_end_state break else: rv += " " * spaces spaces = 0 rv += c self.consume() yield (token_types.string, decode(rv)) def comment_state(self): while self.char() is not eol: self.consume() self.state = self.eol_state def line_end_state(self): self.skip_whitespace() c = self.char() if c == "#": self.state = self.comment_state elif c == eol: self.state = self.eol_state else: raise ParseError(self.filename, self.line_number, "Junk before EOL %s" % c) def consume_string(self, quote_char): rv = "" while True: c = self.char() if c == "\\": rv += self.consume_escape() elif c == quote_char: self.consume() break elif c == eol: raise ParseError(self.filename, self.line_number, "EOL in quoted string") else: rv += c self.consume() return decode(rv) def expr_or_value_state(self): if self.peek(3) == "if ": self.state = self.expr_state else: self.state = self.value_state def expr_state(self): self.skip_whitespace() c = self.char() if c == eol: raise ParseError(self.filename, self.line_number, "EOL in expression") elif c in "'\"": self.consume() yield (token_types.string, self.consume_string(c)) elif c == "#": raise ParseError(self.filename, self.line_number, "Comment before end of expression") elif c == ":": yield (token_types.separator, c) self.consume() self.state = self.value_state elif c in parens: self.consume() yield (token_types.paren, c) elif c in ("!", "="): self.state = self.operator_state elif c in digits: self.state = self.digit_state else: self.state = self.ident_state def operator_state(self): # Only symbolic operators index_0 = self.index while True: c = self.char() if c == eol: break elif c in operator_chars: self.consume() else: self.state = self.expr_state break yield (token_types.ident, self.line[index_0:self.index]) def digit_state(self): index_0 = self.index seen_dot = False while True: c = self.char() if c == eol: break elif c in digits: self.consume() elif c == ".": if seen_dot: raise ParseError(self.filename, self.line_number, "Invalid number") self.consume() seen_dot = True elif c in parens: break elif c in operator_chars: break elif c == " ": break elif c == ":": break else: raise ParseError(self.filename, self.line_number, "Invalid character in number") self.state = self.expr_state yield (token_types.number, self.line[index_0:self.index]) def ident_state(self): index_0 = self.index while True: c = self.char() if c == eol: break elif c == ".": break elif c in parens: break elif c in operator_chars: break elif c == " ": break elif c == ":": break else: self.consume() self.state = self.expr_state yield (token_types.ident, self.line[index_0:self.index]) def consume_escape(self): assert self.char() == "\\" self.consume() c = self.char() self.consume() if c == "x": return self.decode_escape(2) elif c == "u": return self.decode_escape(4) elif c == "U": return self.decode_escape(6) elif c in ["a", "b", "f", "n", "r", "t", "v"]: return eval("'\%s'" % c) elif c is eol: raise ParseError(self.filename, self.line_number, "EOL in escape") else: return c def decode_escape(self, length): value = 0 for i in xrange(length): c = self.char() value *= 16 value += self.escape_value(c) self.consume() return unichr(value).encode("utf8") def escape_value(self, c): if '0' <= c <= '9': return ord(c) - ord('0') elif 'a' <= c <= 'f': return ord(c) - ord('a') + 10 elif 'A' <= c <= 'F': return ord(c) - ord('A') + 10 else: raise ParseError(self.filename, self.line_number, "Invalid character escape") class Parser(object): def __init__(self): self.reset() def reset(self): self.token = None self.unary_operators = "!" self.binary_operators = frozenset(["&&", "||", "=="]) self.tokenizer = Tokenizer() self.token_generator = None self.tree = Treebuilder(DataNode(None)) self.expr_builder = None self.expr_builders = [] def parse(self, input): self.reset() self.token_generator = self.tokenizer.tokenize(input) self.consume() self.manifest() return self.tree.node def consume(self): self.token = self.token_generator.next() def expect(self, type, value=None): if self.token[0] != type: raise ParseError if value is not None: if self.token[1] != value: raise ParseError self.consume() def manifest(self): self.data_block() self.expect(token_types.eof) def data_block(self): while self.token[0] == token_types.string: self.tree.append(KeyValueNode(self.token[1])) self.consume() self.expect(token_types.separator) self.value_block() self.tree.pop() while self.token == (token_types.paren, "["): self.consume() if self.token[0] != token_types.string: raise ParseError self.tree.append(DataNode(self.token[1])) self.consume() self.expect(token_types.paren, "]") if self.token[0] == token_types.group_start: self.consume() self.data_block() self.eof_or_end_group() self.tree.pop() def eof_or_end_group(self): if self.token[0] != token_types.eof: self.expect(token_types.group_end) def value_block(self): if self.token[0] == token_types.list_start: self.consume() self.list_value() elif self.token[0] == token_types.string: self.value() elif self.token[0] == token_types.group_start: self.consume() self.expression_values() if self.token[0] == token_types.string: self.value() self.eof_or_end_group() elif self.token[0] == token_types.atom: self.atom() else: raise ParseError def list_value(self): self.tree.append(ListNode()) while self.token[0] in (token_types.atom, token_types.string): if self.token[0] == token_types.atom: self.atom() else: self.value() self.expect(token_types.list_end) self.tree.pop() def expression_values(self): while self.token == (token_types.ident, "if"): self.consume() self.tree.append(ConditionalNode()) self.expr_start() self.expect(token_types.separator) if self.token[0] == token_types.string: self.value() else: raise ParseError self.tree.pop() def value(self): self.tree.append(ValueNode(self.token[1])) self.consume() self.tree.pop() def atom(self): if self.token[1] not in atoms: raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Unrecognised symbol @%s" % self.token[1]) self.tree.append(AtomNode(atoms[self.token[1]])) self.consume() self.tree.pop() def expr_start(self): self.expr_builder = ExpressionBuilder(self.tokenizer) self.expr_builders.append(self.expr_builder) self.expr() expression = self.expr_builder.finish() self.expr_builders.pop() self.expr_builder = self.expr_builders[-1] if self.expr_builders else None if self.expr_builder: self.expr_builder.operands[-1].children[-1].append(expression) else: self.tree.append(expression) self.tree.pop() def expr(self): self.expr_operand() while (self.token[0] == token_types.ident and self.token[1] in binary_operators): self.expr_bin_op() self.expr_operand() def expr_operand(self): if self.token == (token_types.paren, "("): self.consume() self.expr_builder.left_paren() self.expr() self.expect(token_types.paren, ")") self.expr_builder.right_paren() elif self.token[0] == token_types.ident and self.token[1] in unary_operators: self.expr_unary_op() self.expr_operand() elif self.token[0] in [token_types.string, token_types.ident]: self.expr_value() elif self.token[0] == token_types.number: self.expr_number() else: raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Unrecognised operand") def expr_unary_op(self): if self.token[1] in unary_operators: self.expr_builder.push_operator(UnaryOperatorNode(self.token[1])) self.consume() else: raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Expected unary operator") def expr_bin_op(self): if self.token[1] in binary_operators: self.expr_builder.push_operator(BinaryOperatorNode(self.token[1])) self.consume() else: raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Expected binary operator") def expr_value(self): node_type = {token_types.string: StringNode, token_types.ident: VariableNode}[self.token[0]] self.expr_builder.push_operand(node_type(self.token[1])) self.consume() if self.token == (token_types.paren, "["): self.consume() self.expr_builder.operands[-1].append(IndexNode()) self.expr_start() self.expect(token_types.paren, "]") def expr_number(self): self.expr_builder.push_operand(NumberNode(self.token[1])) self.consume() class Treebuilder(object): def __init__(self, root): self.root = root self.node = root def append(self, node): self.node.append(node) self.node = node return node def pop(self): node = self.node self.node = self.node.parent return node class ExpressionBuilder(object): def __init__(self, tokenizer): self.operands = [] self.operators = [None] self.tokenizer = tokenizer def finish(self): while self.operators[-1] is not None: self.pop_operator() rv = self.pop_operand() assert self.is_empty() return rv def left_paren(self): self.operators.append(None) def right_paren(self): while self.operators[-1] is not None: self.pop_operator() if not self.operators: raise ParseError(self.tokenizer.filename, self.tokenizer.line, "Unbalanced parens") assert self.operators.pop() is None def push_operator(self, operator): assert operator is not None while self.precedence(self.operators[-1]) > self.precedence(operator): self.pop_operator() self.operators.append(operator) def pop_operator(self): operator = self.operators.pop() if isinstance(operator, BinaryOperatorNode): operand_1 = self.operands.pop() operand_0 = self.operands.pop() self.operands.append(BinaryExpressionNode(operator, operand_0, operand_1)) else: operand_0 = self.operands.pop() self.operands.append(UnaryExpressionNode(operator, operand_0)) def push_operand(self, node): self.operands.append(node) def pop_operand(self): return self.operands.pop() def is_empty(self): return len(self.operands) == 0 and all(item is None for item in self.operators) def precedence(self, operator): if operator is None: return 0 return precedence(operator) def parse(stream): p = Parser() return p.parse(stream)
mpl-2.0
jjyycchh/phantomjs
src/qt/qtwebkit/Tools/gtk/common.py
116
4728
#!/usr/bin/env python # Copyright (C) 2011 Igalia S.L. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import errno import os import select import subprocess import sys script_dir = None build_dir = None def script_path(*args): global script_dir if not script_dir: script_dir = os.path.join(os.path.dirname(__file__), '..', 'Scripts') return os.path.join(*(script_dir,) + args) def top_level_path(*args): return os.path.join(*((script_path('..', '..'),) + args)) def get_build_path(build_types=('Release', 'Debug')): global build_dir if build_dir: return build_dir def is_valid_build_directory(path): return os.path.exists(os.path.join(path, 'GNUmakefile')) or \ os.path.exists(os.path.join(path, 'Programs', 'DumpRenderTree')) if len(sys.argv[1:]) > 1 and os.path.exists(sys.argv[-1]) and is_valid_build_directory(sys.argv[-1]): return sys.argv[-1] # Debian and Ubuntu build both flavours of the library (with gtk2 # and with gtk3); they use directories build-2.0 and build-3.0 for # that, which is not handled by the above cases; we check that the # directory where we are called from is a valid build directory, # which should handle pretty much all other non-standard cases. build_dir = os.getcwd() if is_valid_build_directory(build_dir): return build_dir for build_type in build_types: build_dir = top_level_path('WebKitBuild', build_type) if is_valid_build_directory(build_dir): return build_dir # distcheck builds in a directory named _build in the top-level path. build_dir = top_level_path("_build") if is_valid_build_directory(build_dir): return build_dir build_dir = top_level_path() if is_valid_build_directory(build_dir): return build_dir build_dir = top_level_path("WebKitBuild") if is_valid_build_directory(build_dir): return build_dir print('Could not determine build directory.') sys.exit(1) def build_path_for_build_types(build_types, *args): return os.path.join(*(get_build_path(build_types),) + args) def build_path(*args): return build_path_for_build_types(('Release', 'Debug'), *args) def pkg_config_file_variable(package, variable): process = subprocess.Popen(['pkg-config', '--variable=%s' % variable, package], stdout=subprocess.PIPE) stdout = process.communicate()[0].decode("utf-8") if process.returncode: return None return stdout.strip() def prefix_of_pkg_config_file(package): return pkg_config_file_variable(package, 'prefix') def gtk_version_of_pkg_config_file(pkg_config_path): process = subprocess.Popen(['pkg-config', pkg_config_path, '--print-requires'], stdout=subprocess.PIPE) stdout = process.communicate()[0].decode("utf-8") if 'gtk+-3.0' in stdout: return 3 return 2 def parse_output_lines(fd, parse_line_callback): output = '' read_set = [fd] while read_set: try: rlist, wlist, xlist = select.select(read_set, [], []) except select.error as e: parse_line_callback("WARNING: error while waiting for fd %d to become readable\n" % fd) parse_line_callback(" error code: %d, error message: %s\n" % (e[0], e[1])) continue if fd in rlist: try: chunk = os.read(fd, 1024) except OSError as e: if e.errno == errno.EIO: # Child process finished. chunk = '' else: raise e if not chunk: read_set.remove(fd) output += chunk while '\n' in output: pos = output.find('\n') parse_line_callback(output[:pos + 1]) output = output[pos + 1:] if not chunk and output: parse_line_callback(output) output = ''
bsd-3-clause
JianyuWang/nova
nova/api/openstack/compute/disk_config.py
13
5375
# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Disk Config extension.""" from oslo_utils import strutils from webob import exc from nova.api.openstack.compute.schemas import disk_config from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.i18n import _ ALIAS = 'os-disk-config' API_DISK_CONFIG = "OS-DCF:diskConfig" INTERNAL_DISK_CONFIG = "auto_disk_config" authorize = extensions.os_compute_soft_authorizer(ALIAS) def disk_config_to_api(value): return 'AUTO' if value else 'MANUAL' def disk_config_from_api(value): if value == 'AUTO': return True elif value == 'MANUAL': return False else: msg = _("%s must be either 'MANUAL' or 'AUTO'.") % API_DISK_CONFIG raise exc.HTTPBadRequest(explanation=msg) class ImageDiskConfigController(wsgi.Controller): def _add_disk_config(self, context, images): for image in images: metadata = image['metadata'] if INTERNAL_DISK_CONFIG in metadata: raw_value = metadata[INTERNAL_DISK_CONFIG] value = strutils.bool_from_string(raw_value) image[API_DISK_CONFIG] = disk_config_to_api(value) @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['nova.context'] if 'image' in resp_obj.obj and authorize(context): image = resp_obj.obj['image'] self._add_disk_config(context, [image]) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['nova.context'] if 'images' in resp_obj.obj and authorize(context): images = resp_obj.obj['images'] self._add_disk_config(context, images) class ServerDiskConfigController(wsgi.Controller): def _add_disk_config(self, req, servers): for server in servers: db_server = req.get_db_instance(server['id']) # server['id'] is guaranteed to be in the cache due to # the core API adding it in its 'show'/'detail' methods. value = db_server.get(INTERNAL_DISK_CONFIG) server[API_DISK_CONFIG] = disk_config_to_api(value) def _show(self, req, resp_obj): if 'server' in resp_obj.obj: server = resp_obj.obj['server'] self._add_disk_config(req, [server]) @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): self._show(req, resp_obj) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['nova.context'] if 'servers' in resp_obj.obj and authorize(context): servers = resp_obj.obj['servers'] self._add_disk_config(req, servers) @wsgi.extends def create(self, req, resp_obj, body): context = req.environ['nova.context'] if authorize(context): self._show(req, resp_obj) @wsgi.extends def update(self, req, resp_obj, id, body): context = req.environ['nova.context'] if authorize(context): self._show(req, resp_obj) @wsgi.extends(action='rebuild') def _action_rebuild(self, req, resp_obj, id, body): context = req.environ['nova.context'] if authorize(context): self._show(req, resp_obj) class DiskConfig(extensions.V21APIExtensionBase): """Disk Management Extension.""" name = "DiskConfig" alias = ALIAS version = 1 def get_controller_extensions(self): servers_extension = extensions.ControllerExtension( self, 'servers', ServerDiskConfigController()) images_extension = extensions.ControllerExtension( self, 'images', ImageDiskConfigController()) return [servers_extension, images_extension] def get_resources(self): return [] # NOTE(gmann): This function is not supposed to use 'body_deprecated_param' # parameter as this is placed to handle scheduler_hint extension for V2.1. # making 'body_deprecated_param' as optional to avoid changes for # server_update & server_rebuild def server_create(self, server_dict, create_kwargs, body_deprecated_param=None): if API_DISK_CONFIG in server_dict: api_value = server_dict[API_DISK_CONFIG] internal_value = disk_config_from_api(api_value) create_kwargs[INTERNAL_DISK_CONFIG] = internal_value server_update = server_create server_rebuild = server_create server_resize = server_create def get_server_create_schema(self): return disk_config.server_create get_server_update_schema = get_server_create_schema get_server_rebuild_schema = get_server_create_schema get_server_resize_schema = get_server_create_schema
apache-2.0
basho-labs/riak-cxx-client
deps/boost-1.47.0/tools/build/v2/build_system.py
10
34262
# Status: mostly ported. Missing is --out-xml support, 'configure' integration # and some FIXME. # Base revision: 64351 # Copyright 2003, 2005 Dave Abrahams # Copyright 2006 Rene Rivera # Copyright 2003, 2004, 2005, 2006, 2007 Vladimir Prus # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) from b2.build.engine import Engine from b2.manager import Manager from b2.util.path import glob from b2.build import feature, property_set import b2.build.virtual_target from b2.build.targets import ProjectTarget from b2.util.sequence import unique import b2.build.build_request from b2.build.errors import ExceptionWithUserContext import b2.tools.common import b2.build.project as project import b2.build.virtual_target as virtual_target import b2.build.build_request as build_request import b2.util.regex from b2.manager import get_manager from b2.util import cached from b2.util import option import bjam import os import sys import re ################################################################################ # # Module global data. # ################################################################################ # Flag indicating we should display additional debugging information related to # locating and loading Boost Build configuration files. debug_config = False # Legacy option doing too many things, some of which are not even documented. # Should be phased out. # * Disables loading site and user configuration files. # * Disables auto-configuration for toolsets specified explicitly on the # command-line. # * Causes --toolset command-line options to be ignored. # * Prevents the default toolset from being used even if no toolset has been # configured at all. legacy_ignore_config = False # The cleaning is tricky. Say, if user says 'bjam --clean foo' where 'foo' is a # directory, then we want to clean targets which are in 'foo' as well as those # in any children Jamfiles under foo but not in any unrelated Jamfiles. To # achieve this we collect a list of projects under which cleaning is allowed. project_targets = [] # Virtual targets obtained when building main targets references on the command # line. When running 'bjam --clean main_target' we want to clean only files # belonging to that main target so we need to record which targets are produced # for it. results_of_main_targets = [] # Was an XML dump requested? out_xml = False # Default toolset & version to be used in case no other toolset has been used # explicitly by either the loaded configuration files, the loaded project build # scripts or an explicit toolset request on the command line. If not specified, # an arbitrary default will be used based on the current host OS. This value, # while not strictly necessary, has been added to allow testing Boost-Build's # default toolset usage functionality. default_toolset = None default_toolset_version = None ################################################################################ # # Public rules. # ################################################################################ # Returns the property set with the free features from the currently processed # build request. # def command_line_free_features(): return command_line_free_features # Sets the default toolset & version to be used in case no other toolset has # been used explicitly by either the loaded configuration files, the loaded # project build scripts or an explicit toolset request on the command line. For # more detailed information see the comment related to used global variables. # def set_default_toolset(toolset, version=None): default_toolset = toolset default_toolset_version = version pre_build_hook = [] def add_pre_build_hook(callable): pre_build_hook.append(callable) post_build_hook = None def set_post_build_hook(callable): post_build_hook = callable ################################################################################ # # Local rules. # ################################################################################ # Returns actual Jam targets to be used for executing a clean request. # def actual_clean_targets(targets): # Construct a list of projects explicitly detected as targets on this build # system run. These are the projects under which cleaning is allowed. for t in targets: if isinstance(t, b2.build.targets.ProjectTarget): project_targets.append(t.project_module()) # Construct a list of targets explicitly detected on this build system run # as a result of building main targets. targets_to_clean = set() for t in results_of_main_targets: # Do not include roots or sources. targets_to_clean.update(virtual_target.traverse(t)) to_clean = [] for t in get_manager().virtual_targets().all_targets(): # Remove only derived targets. if t.action(): p = t.project() if t in targets_to_clean or should_clean_project(p.project_module()): to_clean.append(t) return [t.actualize() for t in to_clean] _target_id_split = re.compile("(.*)//(.*)") # Given a target id, try to find and return the corresponding target. This is # only invoked when there is no Jamfile in ".". This code somewhat duplicates # code in project-target.find but we can not reuse that code without a # project-targets instance. # def find_target(target_id): projects = get_manager().projects() m = _target_id_split.match(target_id) if m: pm = projects.find(m.group(1), ".") else: pm = projects.find(target_id, ".") if pm: result = projects.target(pm) if m: result = result.find(m.group(2)) return result def initialize_config_module(module_name, location=None): get_manager().projects().initialize(module_name, location) # Helper rule used to load configuration files. Loads the first configuration # file with the given 'filename' at 'path' into module with name 'module-name'. # Not finding the requested file may or may not be treated as an error depending # on the must-find parameter. Returns a normalized path to the loaded # configuration file or nothing if no file was loaded. # def load_config(module_name, filename, paths, must_find=False): if debug_config: print "notice: Searching '%s' for '%s' configuration file '%s." \ % (paths, module_name, filename) where = None for path in paths: t = os.path.join(path, filename) if os.path.exists(t): where = t break if where: where = os.path.realpath(where) if debug_config: print "notice: Loading '%s' configuration file '%s' from '%s'." \ % (module_name, filename, where) # Set source location so that path-constant in config files # with relative paths work. This is of most importance # for project-config.jam, but may be used in other # config files as well. attributes = get_manager().projects().attributes(module_name) ; attributes.set('source-location', os.path.dirname(where), True) get_manager().projects().load_standalone(module_name, where) else: msg = "Configuration file '%s' not found in '%s'." % (filename, path) if must_find: get_manager().errors()(msg) elif debug_config: print msg return where # Loads all the configuration files used by Boost Build in the following order: # # -- test-config -- # Loaded only if specified on the command-line using the --test-config # command-line parameter. It is ok for this file not to exist even if specified. # If this configuration file is loaded, regular site and user configuration # files will not be. If a relative path is specified, file is searched for in # the current folder. # # -- site-config -- # Always named site-config.jam. Will only be found if located on the system # root path (Windows), /etc (non-Windows), user's home folder or the Boost Build # path, in that order. Not loaded in case the test-config configuration file is # loaded or either the --ignore-site-config or the --ignore-config command-line # option is specified. # # -- user-config -- # Named user-config.jam by default or may be named explicitly using the # --user-config command-line option or the BOOST_BUILD_USER_CONFIG environment # variable. If named explicitly the file is looked for from the current working # directory and if the default one is used then it is searched for in the # user's home directory and the Boost Build path, in that order. Not loaded in # case either the test-config configuration file is loaded, --ignore-config # command-line option is specified or an empty file name is explicitly # specified. If the file name has been given explicitly then the file must # exist. # # Test configurations have been added primarily for use by Boost Build's # internal unit testing system but may be used freely in other places as well. # def load_configuration_files(): # Flag indicating that site configuration should not be loaded. ignore_site_config = "--ignore-site-config" in sys.argv if legacy_ignore_config and debug_config: print "notice: Regular site and user configuration files will be ignored" print "notice: due to the --ignore-config command-line option." initialize_config_module("test-config") test_config = None for a in sys.argv: m = re.match("--test-config=(.*)$", a) if m: test_config = b2.util.unquote(m.group(1)) break if test_config: where = load_config("test-config", os.path.basename(test_config), [os.path.dirname(test_config)]) if where: if debug_config and not legacy_ignore_config: print "notice: Regular site and user configuration files will" print "notice: be ignored due to the test configuration being loaded." user_path = [os.path.expanduser("~")] + bjam.variable("BOOST_BUILD_PATH") site_path = ["/etc"] + user_path if os.name in ["nt"]: site_path = [os.getenv("SystemRoot")] + user_path if ignore_site_config and not legacy_ignore_config: print "notice: Site configuration files will be ignored due to the" print "notice: --ignore-site-config command-line option." initialize_config_module("site-config") if not test_config and not ignore_site_config and not legacy_ignore_config: load_config('site-config', 'site-config.jam', site_path) initialize_config_module('user-config') if not test_config and not legacy_ignore_config: user_config = None for a in sys.argv: m = re.match("--user-config=(.*)$", a) if m: user_config = m.group(1) break if not user_config: user_config = os.getenv("BOOST_BUILD_USER_CONFIG") # Special handling for the case when the OS does not strip the quotes # around the file name, as is the case when using Cygwin bash. user_config = b2.util.unquote(user_config) explicitly_requested = user_config if not user_config: user_config = "user-config.jam" if explicitly_requested: user_config = os.path.abspath(user_config) if debug_config: print "notice: Loading explicitly specified user configuration file:" print " " + user_config load_config('user-config', os.path.basename(user_config), [os.path.dirname(user_config)], True) else: load_config('user-config', os.path.basename(user_config), user_path) elif debug_config: print "notice: User configuration file loading explicitly disabled." ; # We look for project-config.jam from "." upward. # I am not sure this is 100% right decision, we might as well check for # it only alonside the Jamroot file. However: # # - We need to load project-root.jam before Jamroot # - We probably would need to load project-root.jam even if there's no # Jamroot - e.g. to implement automake-style out-of-tree builds. if os.path.exists("project-config.jam"): file = ["project-config.jam"] else: file = b2.util.path.glob_in_parents(".", ["project-config.jam"]) if file: initialize_config_module('project-config', os.path.dirname(file[0])) load_config('project-config', "project-config.jam", [os.path.dirname(file[0])], True) # Autoconfigure toolsets based on any instances of --toolset=xx,yy,...zz or # toolset=xx,yy,...zz in the command line. May return additional properties to # be processed as if they had been specified by the user. # def process_explicit_toolset_requests(): extra_properties = [] option_toolsets = [e for option in b2.util.regex.transform(sys.argv, "^--toolset=(.*)$") for e in option.split(',')] feature_toolsets = [e for option in b2.util.regex.transform(sys.argv, "^toolset=(.*)$") for e in option.split(',')] for t in option_toolsets + feature_toolsets: # Parse toolset-version/properties. (toolset_version, toolset, version) = re.match("(([^-/]+)-?([^/]+)?)/?.*", t).groups() if debug_config: print "notice: [cmdline-cfg] Detected command-line request for '%s': toolset= %s version=%s" \ % (toolset_version, toolset, version) # If the toolset is not known, configure it now. known = False if toolset in feature.values("toolset"): known = True if known and version and not feature.is_subvalue("toolset", toolset, "version", version): known = False # TODO: we should do 'using $(toolset)' in case no version has been # specified and there are no versions defined for the given toolset to # allow the toolset to configure its default version. For this we need # to know how to detect whether a given toolset has any versions # defined. An alternative would be to do this whenever version is not # specified but that would require that toolsets correctly handle the # case when their default version is configured multiple times which # should be checked for all existing toolsets first. if not known: if debug_config: print "notice: [cmdline-cfg] toolset '%s' not previously configured; attempting to auto-configure now" % toolset_version toolset.using(toolset, version) else: if debug_config: print "notice: [cmdline-cfg] toolset '%s' already configured" % toolset_version # Make sure we get an appropriate property into the build request in # case toolset has been specified using the "--toolset=..." command-line # option form. if not t in sys.argv and not t in feature_toolsets: if debug_config: print "notice: [cmdline-cfg] adding toolset=%s) to the build request." % t ; extra_properties += "toolset=%s" % t return extra_properties # Returns 'true' if the given 'project' is equal to or is a (possibly indirect) # child to any of the projects requested to be cleaned in this build system run. # Returns 'false' otherwise. Expects the .project-targets list to have already # been constructed. # @cached def should_clean_project(project): if project in project_targets: return True else: parent = get_manager().projects().attribute(project, "parent-module") if parent and parent != "user-config": return should_clean_project(parent) else: return False ################################################################################ # # main() # ------ # ################################################################################ def main(): sys.argv = bjam.variable("ARGV") # FIXME: document this option. if "--profiling" in sys.argv: import cProfile r = cProfile.runctx('main_real()', globals(), locals(), "stones.prof") import pstats stats = pstats.Stats("stones.prof") stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_callers(20) return r else: try: return main_real() except ExceptionWithUserContext, e: e.report() def main_real(): global debug_config, legacy_ignore_config, out_xml debug_config = "--debug-configuration" in sys.argv legacy_ignore_config = "--ignore_config" in sys.argv out_xml = any(re.match("^--out-xml=(.*)$", a) for a in sys.argv) engine = Engine() global_build_dir = option.get("build-dir") manager = Manager(engine, global_build_dir) import b2.build.configure as configure if "--version" in sys.argv: version.report() return # This module defines types and generator and what not, # and depends on manager's existence import b2.tools.builtin b2.tools.common.init(manager) load_configuration_files() extra_properties = [] # Note that this causes --toolset options to be ignored if --ignore-config # is specified. if not legacy_ignore_config: extra_properties = process_explicit_toolset_requests() # We always load project in "." so that 'use-project' directives have any # chance of being seen. Otherwise, we would not be able to refer to # subprojects using target ids. current_project = None projects = get_manager().projects() if projects.find(".", "."): current_project = projects.target(projects.load(".")) # In case there are no toolsets currently defined makes the build run using # the default toolset. if not legacy_ignore_config and not feature.values("toolset"): dt = default_toolset dtv = None if default_toolset: dtv = default_toolset_version else: dt = "gcc" if os.name == 'nt': dt = "msvc" # FIXME: #else if [ os.name ] = MACOSX #{ # default-toolset = darwin ; #} print "warning: No toolsets are configured." print "warning: Configuring default toolset '%s'." % dt print "warning: If the default is wrong, your build may not work correctly." print "warning: Use the \"toolset=xxxxx\" option to override our guess." print "warning: For more configuration options, please consult" print "warning: http://boost.org/boost-build2/doc/html/bbv2/advanced/configuration.html" toolset.using(dt, dtv) # Parse command line for targets and properties. Note that this requires # that all project files already be loaded. (target_ids, properties) = build_request.from_command_line(sys.argv[1:] + extra_properties) # Expand properties specified on the command line into multiple property # sets consisting of all legal property combinations. Each expanded property # set will be used for a single build run. E.g. if multiple toolsets are # specified then requested targets will be built with each of them. if properties: expanded = build_request.expand_no_defaults(properties) else: expanded = [property_set.empty()] # Check that we actually found something to build. if not current_project and not target_ids: get_manager().errors()("no Jamfile in current directory found, and no target references specified.") # FIXME: # EXIT # Flags indicating that this build system run has been started in order to # clean existing instead of create new targets. Note that these are not the # final flag values as they may get changed later on due to some special # targets being specified on the command line. clean = "--clean" in sys.argv cleanall = "--clean-all" in sys.argv # List of explicitly requested files to build. Any target references read # from the command line parameter not recognized as one of the targets # defined in the loaded Jamfiles will be interpreted as an explicitly # requested file to build. If any such files are explicitly requested then # only those files and the targets they depend on will be built and they # will be searched for among targets that would have been built had there # been no explicitly requested files. explicitly_requested_files = [] # List of Boost Build meta-targets, virtual-targets and actual Jam targets # constructed in this build system run. targets = [] virtual_targets = [] actual_targets = [] explicitly_requested_files = [] # Process each target specified on the command-line and convert it into # internal Boost Build target objects. Detect special clean target. If no # main Boost Build targets were explictly requested use the current project # as the target. for id in target_ids: if id == "clean": clean = 1 else: t = None if current_project: t = current_project.find(id, no_error=1) else: t = find_target(id) if not t: print "notice: could not find main target '%s'" % id print "notice: assuming it's a name of file to create " ; explicitly_requested_files.append(id) else: targets.append(t) if not targets: targets = [projects.target(projects.module_name("."))] # FIXME: put this BACK. ## if [ option.get dump-generators : : true ] ## { ## generators.dump ; ## } # We wish to put config.log in the build directory corresponding # to Jamroot, so that the location does not differ depending on # directory where we do build. The amount of indirection necessary # here is scary. first_project = targets[0].project() first_project_root_location = first_project.get('project-root') first_project_root_module = manager.projects().load(first_project_root_location) first_project_root = manager.projects().target(first_project_root_module) first_build_build_dir = first_project_root.build_dir() configure.set_log_file(os.path.join(first_build_build_dir, "config.log")) virtual_targets = [] global results_of_main_targets # Now that we have a set of targets to build and a set of property sets to # build the targets with, we can start the main build process by using each # property set to generate virtual targets from all of our listed targets # and any of their dependants. for p in expanded: manager.set_command_line_free_features(property_set.create(p.free())) for t in targets: try: g = t.generate(p) if not isinstance(t, ProjectTarget): results_of_main_targets.extend(g.targets()) virtual_targets.extend(g.targets()) except ExceptionWithUserContext, e: e.report() except Exception: raise # Convert collected virtual targets into actual raw Jam targets. for t in virtual_targets: actual_targets.append(t.actualize()) # FIXME: restore ## # If XML data output has been requested prepare additional rules and targets ## # so we can hook into Jam to collect build data while its building and have ## # it trigger the final XML report generation after all the planned targets ## # have been built. ## if $(.out-xml) ## { ## # Get a qualified virtual target name. ## rule full-target-name ( target ) ## { ## local name = [ $(target).name ] ; ## local project = [ $(target).project ] ; ## local project-path = [ $(project).get location ] ; ## return $(project-path)//$(name) ; ## } ## # Generate an XML file containing build statistics for each constituent. ## # ## rule out-xml ( xml-file : constituents * ) ## { ## # Prepare valid XML header and footer with some basic info. ## local nl = " ## " ; ## local jam = [ version.jam ] ; ## local os = [ modules.peek : OS OSPLAT JAMUNAME ] "" ; ## local timestamp = [ modules.peek : JAMDATE ] ; ## local cwd = [ PWD ] ; ## local command = $(.sys.argv) ; ## local bb-version = [ version.boost-build ] ; ## .header on $(xml-file) = ## "<?xml version=\"1.0\" encoding=\"utf-8\"?>" ## "$(nl)<build format=\"1.0\" version=\"$(bb-version)\">" ## "$(nl) <jam version=\"$(jam:J=.)\" />" ## "$(nl) <os name=\"$(os[1])\" platform=\"$(os[2])\"><![CDATA[$(os[3-]:J= )]]></os>" ## "$(nl) <timestamp><![CDATA[$(timestamp)]]></timestamp>" ## "$(nl) <directory><![CDATA[$(cwd)]]></directory>" ## "$(nl) <command><![CDATA[\"$(command:J=\" \")\"]]></command>" ## ; ## .footer on $(xml-file) = ## "$(nl)</build>" ; ## # Generate the target dependency graph. ## .contents on $(xml-file) += ## "$(nl) <targets>" ; ## for local t in [ virtual-target.all-targets ] ## { ## local action = [ $(t).action ] ; ## if $(action) ## # If a target has no action, it has no dependencies. ## { ## local name = [ full-target-name $(t) ] ; ## local sources = [ $(action).sources ] ; ## local dependencies ; ## for local s in $(sources) ## { ## dependencies += [ full-target-name $(s) ] ; ## } ## local path = [ $(t).path ] ; ## local jam-target = [ $(t).actual-name ] ; ## .contents on $(xml-file) += ## "$(nl) <target>" ## "$(nl) <name><![CDATA[$(name)]]></name>" ## "$(nl) <dependencies>" ## "$(nl) <dependency><![CDATA[$(dependencies)]]></dependency>" ## "$(nl) </dependencies>" ## "$(nl) <path><![CDATA[$(path)]]></path>" ## "$(nl) <jam-target><![CDATA[$(jam-target)]]></jam-target>" ## "$(nl) </target>" ## ; ## } ## } ## .contents on $(xml-file) += ## "$(nl) </targets>" ; ## # Build $(xml-file) after $(constituents). Do so even if a ## # constituent action fails and regenerate the xml on every bjam run. ## INCLUDES $(xml-file) : $(constituents) ; ## ALWAYS $(xml-file) ; ## __ACTION_RULE__ on $(xml-file) = build-system.out-xml.generate-action ; ## out-xml.generate $(xml-file) ; ## } ## # The actual build actions are here; if we did this work in the actions ## # clause we would have to form a valid command line containing the ## # result of @(...) below (the name of the XML file). ## # ## rule out-xml.generate-action ( args * : xml-file ## : command status start end user system : output ? ) ## { ## local contents = ## [ on $(xml-file) return $(.header) $(.contents) $(.footer) ] ; ## local f = @($(xml-file):E=$(contents)) ; ## } ## # Nothing to do here; the *real* actions happen in ## # out-xml.generate-action. ## actions quietly out-xml.generate { } ## # Define the out-xml file target, which depends on all the targets so ## # that it runs the collection after the targets have run. ## out-xml $(.out-xml) : $(actual-targets) ; ## # Set up a global __ACTION_RULE__ that records all the available ## # statistics about each actual target in a variable "on" the --out-xml ## # target. ## # ## rule out-xml.collect ( xml-file : target : command status start end user ## system : output ? ) ## { ## local nl = " ## " ; ## # Open the action with some basic info. ## .contents on $(xml-file) += ## "$(nl) <action status=\"$(status)\" start=\"$(start)\" end=\"$(end)\" user=\"$(user)\" system=\"$(system)\">" ; ## # If we have an action object we can print out more detailed info. ## local action = [ on $(target) return $(.action) ] ; ## if $(action) ## { ## local action-name = [ $(action).action-name ] ; ## local action-sources = [ $(action).sources ] ; ## local action-props = [ $(action).properties ] ; ## # The qualified name of the action which we created the target. ## .contents on $(xml-file) += ## "$(nl) <name><![CDATA[$(action-name)]]></name>" ; ## # The sources that made up the target. ## .contents on $(xml-file) += ## "$(nl) <sources>" ; ## for local source in $(action-sources) ## { ## local source-actual = [ $(source).actual-name ] ; ## .contents on $(xml-file) += ## "$(nl) <source><![CDATA[$(source-actual)]]></source>" ; ## } ## .contents on $(xml-file) += ## "$(nl) </sources>" ; ## # The properties that define the conditions under which the ## # target was built. ## .contents on $(xml-file) += ## "$(nl) <properties>" ; ## for local prop in [ $(action-props).raw ] ## { ## local prop-name = [ MATCH ^<(.*)>$ : $(prop:G) ] ; ## .contents on $(xml-file) += ## "$(nl) <property name=\"$(prop-name)\"><![CDATA[$(prop:G=)]]></property>" ; ## } ## .contents on $(xml-file) += ## "$(nl) </properties>" ; ## } ## local locate = [ on $(target) return $(LOCATE) ] ; ## locate ?= "" ; ## .contents on $(xml-file) += ## "$(nl) <jam-target><![CDATA[$(target)]]></jam-target>" ## "$(nl) <path><![CDATA[$(target:G=:R=$(locate))]]></path>" ## "$(nl) <command><![CDATA[$(command)]]></command>" ## "$(nl) <output><![CDATA[$(output)]]></output>" ; ## .contents on $(xml-file) += ## "$(nl) </action>" ; ## } ## # When no __ACTION_RULE__ is set "on" a target, the search falls back to ## # the global module. ## module ## { ## __ACTION_RULE__ = build-system.out-xml.collect ## [ modules.peek build-system : .out-xml ] ; ## } ## IMPORT ## build-system : ## out-xml.collect ## out-xml.generate-action ## : : ## build-system.out-xml.collect ## build-system.out-xml.generate-action ## ; ## } j = option.get("jobs") if j: bjam.call("set-variable", PARALLELISM, j) k = option.get("keep-going", "true", "true") if k in ["on", "yes", "true"]: bjam.call("set-variable", "KEEP_GOING", "1") elif k in ["off", "no", "false"]: bjam.call("set-variable", "KEEP_GOING", "0") else: print "error: Invalid value for the --keep-going option" sys.exit() # The 'all' pseudo target is not strictly needed expect in the case when we # use it below but people often assume they always have this target # available and do not declare it themselves before use which may cause # build failures with an error message about not being able to build the # 'all' target. bjam.call("NOTFILE", "all") # And now that all the actual raw Jam targets and all the dependencies # between them have been prepared all that is left is to tell Jam to update # those targets. if explicitly_requested_files: # Note that this case can not be joined with the regular one when only # exact Boost Build targets are requested as here we do not build those # requested targets but only use them to construct the dependency tree # needed to build the explicitly requested files. # FIXME: add $(.out-xml) bjam.call("UPDATE", ["<e>%s" % x for x in explicitly_requested_files]) elif cleanall: bjam.call("UPDATE", "clean-all") elif clean: manager.engine().set_update_action("common.Clean", "clean", actual_clean_targets(targets)) bjam.call("UPDATE", "clean") else: # FIXME: #configure.print-configure-checks-summary ; if pre_build_hook: for h in pre_build_hook: h() bjam.call("DEPENDS", "all", actual_targets) ok = bjam.call("UPDATE_NOW", "all") # FIXME: add out-xml if post_build_hook: post_build_hook(ok) # Prevent automatic update of the 'all' target, now that # we have explicitly updated what we wanted. bjam.call("UPDATE") if manager.errors().count() == 0: return ["ok"] else: return []
apache-2.0
CodingVault/LeetCodeInPython
sorted_array_to_binary_tree.py
1
1250
#!/usr/bin/env python # encoding: utf-8 """ sorted_array_to_binary_tree.py Created by Shengwei on 2014-07-03. """ # https://oj.leetcode.com/problems/convert-sorted-array-to-binary-search-tree/ # tags: easy, tree, array, sorted, convert, D&C """ Given an array where elements are sorted in ascending order, convert it to a height balanced BST. """ # Definition for a binary tree node # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: # @param num, a list of integers # @return a tree node def sortedArrayToBST(self, num): def convert_array(left, right): """Convert num[left:right] to a (sub)tree.""" # num[x:x] is an empty list (x can be any number) if left >= right: return None # mid point at the very middle of num[left:right] # or the right one of the middle two mid = (left + right) / 2 root = TreeNode(num[mid]) root.left = convert_array(left, mid) root.right = convert_array(mid + 1, right) return root return convert_array(0, len(num))
apache-2.0
sgerhart/ansible
test/units/module_utils/basic/test_imports.py
30
5537
# -*- coding: utf-8 -*- # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type import sys from units.mock.procenv import ModuleTestCase from units.compat import unittest from units.compat.mock import patch, MagicMock from ansible.module_utils.six.moves import builtins realimport = builtins.__import__ class TestImports(ModuleTestCase): def clear_modules(self, mods): for mod in mods: if mod in sys.modules: del sys.modules[mod] @patch.object(builtins, '__import__') def test_module_utils_basic_import_syslog(self, mock_import): def _mock_import(name, *args, **kwargs): if name == 'syslog': raise ImportError return realimport(name, *args, **kwargs) self.clear_modules(['syslog', 'ansible.module_utils.basic']) mod = builtins.__import__('ansible.module_utils.basic') self.assertTrue(mod.module_utils.basic.HAS_SYSLOG) self.clear_modules(['syslog', 'ansible.module_utils.basic']) mock_import.side_effect = _mock_import mod = builtins.__import__('ansible.module_utils.basic') self.assertFalse(mod.module_utils.basic.HAS_SYSLOG) @patch.object(builtins, '__import__') def test_module_utils_basic_import_selinux(self, mock_import): def _mock_import(name, *args, **kwargs): if name == 'selinux': raise ImportError return realimport(name, *args, **kwargs) try: self.clear_modules(['selinux', 'ansible.module_utils.basic']) mod = builtins.__import__('ansible.module_utils.basic') self.assertTrue(mod.module_utils.basic.HAVE_SELINUX) except ImportError: # no selinux on test system, so skip pass self.clear_modules(['selinux', 'ansible.module_utils.basic']) mock_import.side_effect = _mock_import mod = builtins.__import__('ansible.module_utils.basic') self.assertFalse(mod.module_utils.basic.HAVE_SELINUX) @patch.object(builtins, '__import__') def test_module_utils_basic_import_json(self, mock_import): def _mock_import(name, *args, **kwargs): if name == 'json': raise ImportError return realimport(name, *args, **kwargs) self.clear_modules(['json', 'ansible.module_utils.basic']) builtins.__import__('ansible.module_utils.basic') self.clear_modules(['json', 'ansible.module_utils.basic']) mock_import.side_effect = _mock_import with self.assertRaises(SystemExit): builtins.__import__('ansible.module_utils.basic') # FIXME: doesn't work yet # @patch.object(builtins, 'bytes') # def test_module_utils_basic_bytes(self, mock_bytes): # mock_bytes.side_effect = NameError() # from ansible.module_utils import basic @patch.object(builtins, '__import__') @unittest.skipIf(sys.version_info[0] >= 3, "literal_eval is available in every version of Python3") def test_module_utils_basic_import_literal_eval(self, mock_import): def _mock_import(name, *args, **kwargs): try: fromlist = kwargs.get('fromlist', args[2]) except IndexError: fromlist = [] if name == 'ast' and 'literal_eval' in fromlist: raise ImportError return realimport(name, *args, **kwargs) mock_import.side_effect = _mock_import self.clear_modules(['ast', 'ansible.module_utils.basic']) mod = builtins.__import__('ansible.module_utils.basic') self.assertEqual(mod.module_utils.basic.literal_eval("'1'"), "1") self.assertEqual(mod.module_utils.basic.literal_eval("1"), 1) self.assertEqual(mod.module_utils.basic.literal_eval("-1"), -1) self.assertEqual(mod.module_utils.basic.literal_eval("(1,2,3)"), (1, 2, 3)) self.assertEqual(mod.module_utils.basic.literal_eval("[1]"), [1]) self.assertEqual(mod.module_utils.basic.literal_eval("True"), True) self.assertEqual(mod.module_utils.basic.literal_eval("False"), False) self.assertEqual(mod.module_utils.basic.literal_eval("None"), None) # self.assertEqual(mod.module_utils.basic.literal_eval('{"a": 1}'), dict(a=1)) self.assertRaises(ValueError, mod.module_utils.basic.literal_eval, "asdfasdfasdf") @patch.object(builtins, '__import__') def test_module_utils_basic_import_systemd_journal(self, mock_import): def _mock_import(name, *args, **kwargs): try: fromlist = kwargs.get('fromlist', args[2]) except IndexError: fromlist = [] if name == 'systemd' and 'journal' in fromlist: raise ImportError return realimport(name, *args, **kwargs) self.clear_modules(['systemd', 'ansible.module_utils.basic']) mod = builtins.__import__('ansible.module_utils.basic') self.assertTrue(mod.module_utils.basic.has_journal) self.clear_modules(['systemd', 'ansible.module_utils.basic']) mock_import.side_effect = _mock_import mod = builtins.__import__('ansible.module_utils.basic') self.assertFalse(mod.module_utils.basic.has_journal)
mit
cwilkes/event_store_meta
tests/test_functional.py
1
3668
# -*- coding: utf-8 -*- """Functional tests using WebTest. See: http://webtest.readthedocs.org/ """ import pytest from flask import url_for from event_store_meta.user.models import User from .factories import UserFactory class TestLoggingIn: def test_can_log_in_returns_200(self, user, testapp): # Goes to homepage res = testapp.get("/") # Fills out login form in navbar form = res.forms['loginForm'] form['username'] = user.username form['password'] = 'myprecious' # Submits res = form.submit().follow() assert res.status_code == 200 def test_sees_alert_on_log_out(self, user, testapp): res = testapp.get("/") # Fills out login form in navbar form = res.forms['loginForm'] form['username'] = user.username form['password'] = 'myprecious' # Submits res = form.submit().follow() res = testapp.get(url_for('public.logout')).follow() # sees alert assert 'You are logged out.' in res def test_sees_error_message_if_password_is_incorrect(self, user, testapp): # Goes to homepage res = testapp.get("/") # Fills out login form, password incorrect form = res.forms['loginForm'] form['username'] = user.username form['password'] = 'wrong' # Submits res = form.submit() # sees error assert "Invalid password" in res def test_sees_error_message_if_username_doesnt_exist(self, user, testapp): # Goes to homepage res = testapp.get("/") # Fills out login form, password incorrect form = res.forms['loginForm'] form['username'] = 'unknown' form['password'] = 'myprecious' # Submits res = form.submit() # sees error assert "Unknown user" in res class TestRegistering: def test_can_register(self, user, testapp): old_count = len(User.query.all()) # Goes to homepage res = testapp.get("/") # Clicks Create Account button res = res.click("Create account") # Fills out the form form = res.forms["registerForm"] form['username'] = 'foobar' form['email'] = 'foo@bar.com' form['password'] = 'secret' form['confirm'] = 'secret' # Submits res = form.submit().follow() assert res.status_code == 200 # A new user was created assert len(User.query.all()) == old_count + 1 def test_sees_error_message_if_passwords_dont_match(self, user, testapp): # Goes to registration page res = testapp.get(url_for("public.register")) # Fills out form, but passwords don't match form = res.forms["registerForm"] form['username'] = 'foobar' form['email'] = 'foo@bar.com' form['password'] = 'secret' form['confirm'] = 'secrets' # Submits res = form.submit() # sees error message assert "Passwords must match" in res def test_sees_error_message_if_user_already_registered(self, user, testapp): user = UserFactory(active=True) # A registered user user.save() # Goes to registration page res = testapp.get(url_for("public.register")) # Fills out form, but username is already registered form = res.forms["registerForm"] form['username'] = user.username form['email'] = 'foo@bar.com' form['password'] = 'secret' form['confirm'] = 'secret' # Submits res = form.submit() # sees error assert "Username already registered" in res
bsd-3-clause
Parkjihooni6186/TizenRT
external/iotivity/iotivity_1.2-rel/tools/scons/URLDownload.py
29
7661
# -*- coding: utf-8 -*- # -- Dual Licence ---------------------------------------------------------- ############################################################################ # GPL License # # # # This file is a SCons (http://www.scons.org/) builder # # Copyright (c) 2012-14, Philipp Kraus, <philipp.kraus@flashpixx.de> # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as # # published by the Free Software Foundation, either version 3 of the # # License, or (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################ # -------------------------------------------------------------------------- ############################################################################ # BSD 3-Clause License # # # # This file is a SCons (http://www.scons.org/) builder # # Copyright (c) 2012-14, Philipp Kraus, <philipp.kraus@flashpixx.de> # # All rights reserved. # # # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions are # # met: # # # # 1. Redistributions of source code must retain the above copyright # # notice, this list of conditions and the following disclaimer. # # # # 2. Redistributions in binary form must reproduce the above copyright # # notice, this list of conditions and the following disclaimer in the # # documentation and/or other materials provided with the distribution. # # # # 3. Neither the name of the copyright holder nor the names of its # # contributors may be used to endorse or promote products derived from # # this software without specific prior written permission. # # # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED # # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ############################################################################ # the URLDownload-Builder can be download any data from an URL into a target file # and can replace the target file name with the URL filename (the setting variable # within the environment object is a boolean type with the name "URLDOWNLOAD_USEURLFILENAM", # default setting replaces the target name with the URL filename) import urllib2, urlparse import SCons.Builder, SCons.Node, SCons.Errors # define an own node, for checking the data behind the URL, # we must download only than, if the data is changed, the # node derivates from the Python.Value node class URLNode(SCons.Node.Python.Value) : # overload the get_csig (copy the source from the # Python.Value node and append the data of the URL header def get_csig(self, calc=None): try: return self.ninfo.csig except AttributeError: pass try : response = urllib2.urlopen( str(self.value) ).info() except Exception, e : raise SCons.Errors.StopError( "%s [%s]" % (e, self.value) ) contents = "" if "Last-Modified" in response : contents = contents + response["Last-Modified"] if "Content-Length" in response : contents = contents + response["Content-Length"] if not contents : contents = self.get_contents() self.get_ninfo().csig = contents return contents # creates the downloading output message # @param s original message # @param target target name # @param source source name # @param env environment object def __message( s, target, source, env ) : print "downloading [%s] to [%s] ..." % (source[0], target[0]) # the download function, which reads the data from the URL # and writes it down to the file # @param target target file on the local drive # @param source URL for download # @@param env environment object def __action( target, source, env ) : try : stream = urllib2.urlopen( str(source[0]) ) file = open( str(target[0]), "wb" ) file.write(stream.read()) file.close() stream.close() except Exception, e : raise SCons.Errors.StopError( "%s [%s]" % (e, source[0]) ) # defines the emitter of the builder # @param target target file on the local drive # @param source URL for download # @param env environment object def __emitter( target, source, env ) : # we need a temporary file, because the dependency graph # of Scons need a physical existing file - so we prepare it target[0].prepare() if not env.get("URLDOWNLOAD_USEURLFILENAME", False) : return target, source try : url = urlparse.urlparse( urllib2.urlopen( str(source[0]) ).geturl() ) except Exception, e : raise SCons.Errors.StopError( "%s [%s]" % (e, source[0]) ) return url.path.split("/")[-1], source # generate function, that adds the builder to the environment, # the value "DOWNLOAD_USEFILENAME" replaces the target name with # the filename of the URL # @param env environment object def generate( env ) : env["BUILDERS"]["URLDownload"] = SCons.Builder.Builder( action = __action, emitter = __emitter, target_factory = SCons.Node.FS.File, source_factory = URLNode, single_source = True, PRINT_CMD_LINE_FUNC = __message ) env.Replace(URLDOWNLOAD_USEURLFILENAME = True ) # existing function of the builder # @param env environment object # @return true def exists(env) : return 1
apache-2.0