repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
devs1991/test_edx_docmode
common/lib/xmodule/xmodule/mixin.py
70
2601
""" Reusable mixins for XBlocks and/or XModules """ from xblock.fields import Scope, String, XBlockMixin # Make '_' a no-op so we can scrape strings. Using lambda instead of # `django.utils.translation.ugettext_noop` because Django cannot be imported in this file _ = lambda text: text class LicenseMixin(XBlockMixin): """ Mixin that allows an author to indicate a license on the contents of an XBlock. For example, a video could be marked as Creative Commons SA-BY licensed. You can even indicate the license on an entire course. If this mixin is not applied to an XBlock, or if the license field is blank, then the content is subject to whatever legal licensing terms that apply to content by default. For example, in the United States, that content is exclusively owned by the creator of the content by default. Other countries may have similar laws. """ license = String( display_name=_("License"), help=_("A license defines how the contents of this block can be shared and reused."), default=None, scope=Scope.content, ) @classmethod def parse_license_from_xml(cls, definition, node): """ When importing an XBlock from XML, this method will parse the license information out of the XML and attach it to the block. It is defined here so that classes that use this mixin can simply refer to this method, rather than reimplementing it in their XML import functions. """ license = node.get('license', default=None) # pylint: disable=redefined-builtin if license: definition['license'] = license return definition def add_license_to_xml(self, node, default=None): """ When generating XML from an XBlock, this method will add the XBlock's license to the XML representation before it is serialized. It is defined here so that classes that use this mixin can simply refer to this method, rather than reimplementing it in their XML export functions. """ if getattr(self, "license", default): node.set('license', self.license) def wrap_with_license(block, view, frag, context): # pylint: disable=unused-argument """ In the LMS, display the custom license underneath the XBlock. """ license = getattr(block, "license", None) # pylint: disable=redefined-builtin if license: context = {"license": license} frag.content += block.runtime.render_template('license_wrapper.html', context) return frag
agpl-3.0
henrymzhao/csss-minion
classes.py
4
4219
#pylint: disable=C import discord from discord.ext import commands FROZEN_ROLES = [228765603756900352, 289285166436843521, 314296819272122368, 296466915235332106, 228985701792743424, 229014335299649536] class Classes(): def __init__(self, bot): self.bot = bot @commands.command(pass_context = True) async def newclass(self, ctx, course): """Create a new discord class/role Usage: newclass <someclass> Creating a class/role places you in that class/role """ course = course.lower() dupe = False for j in range(0, len(ctx.message.server.roles)): if ctx.message.server.roles[j].name == course: dupe = True if dupe == True: await self.bot.say("Class already exists") else: # temp value flag = True # temp value # flag = False # for i in range(0, len(ctx.message.author.roles)): # if ctx.message.author.roles[i].name == "Regular": # flag = True if flag == True: newRole = await self.bot.create_role(ctx.message.server, name = course, mentionable = True)# , hoist = True) await self.bot.add_roles(ctx.message.author, newRole) await self.bot.say(course+" class has been created. You have been placed in it.") else: await self.bot.say("You need to be level 10 and above to create classes! My master said this is to reduce spam.") @commands.command(pass_context = True) async def whois(self, ctx, course): """List people in a discord class/role Usage: whois <someclass> """ get = 0 for i in ctx.message.server.roles: if i.name == course: get = i if get == 0: # not role specified not found await self.bot.say("That course doesn't exist!") else: # role specified is found # await self.bot.say("Found {}".format(get.name)) people = [] for i in ctx.message.server.members: if get in i.roles: if i.nick == None: people.append(i.name) else: people.append(i.nick) if len(people) == 0: # no users found in that group await self.bot.say("No one in this group!") else: result = "```I found these people: \n \n" for i in people: result += str(i) + "\n" result += "```" await self.bot.say(result) # Remove user from role @commands.command(pass_context = True) async def iamn(self, ctx, course : str): """Remove yourself from a discord class/role Usage: iamn <someclass> """ course = course.lower() found = 0 for i in range(0, len(ctx.message.author.roles)): if course == ctx.message.author.roles[i].name: found = i if found == 0: await self.bot.say("You are not currently in this class.") else: await self.bot.remove_roles(ctx.message.author, ctx.message.author.roles[found]) await self.bot.say("You've been removed from " + course) @commands.command(pass_context = True) async def iam(self, ctx, course : str): """Place yourself into a discord class/role Usage: iam <someclass> """ course = course.lower() for role in ctx.message.server.roles: if course == role.name: # found a match if int(role.id) in FROZEN_ROLES: await self.bot.say("This role is locked.") return else: print(role.id) await self.bot.add_roles(ctx.message.author, role) await self.bot.say("You've been placed in "+ course) return await self.bot.say("This class doesn't exist. Try creating it with .newclass name") def setup(bot): bot.add_cog(Classes(bot))
gpl-2.0
he7d3r/revscoring
tests/languages/test_japanese.py
2
3077
import pickle from revscoring.languages import japanese from .util import compare_extraction BAD = [ "ๆญปใญ", "ใ—ใญ", "ใ‚ทใƒ", "ใ‚ใป", "ใ‚ขใƒ›", "ใฐใ‹", "ใƒใ‚ซ", "ใ‚„ใ‚Šใพใ‚“", "ใƒคใƒชใƒžใƒณ", "ใพใ‚“ใ“", "ใƒžใƒณใ‚ณ", "ใ†ใ‚“ใ“", "ใ‚ฆใƒณใ‚ณ", "ใใ‚‚ใ„", "ใ‚ญใƒขใ‚ค", "็—ดๅฅณ", "ๆทซไนฑ", "ๅœจๆ—ฅ", "ใƒใƒงใƒณ", "ๆ”ฏ้‚ฃ", "ใ†ใ–ใ„", "ใ†ใœใƒผ", "๏ฝ—๏ฝ—๏ฝ—๏ฝ—", "wwww", "๏ฝ—๏ฝ—๏ฝ—๏ฝ—๏ฝ—๏ฝ—๏ฝ—๏ฝ—", "wwwwwwwwwwwwwww" ] INFORMAL = [ # Words "๏ผˆ็ฌ‘๏ผ‰", "(็ฌ‘)", "ใƒปใƒปใƒป", "ใŠ้ก˜ใ„ใ—ใพใ™", "ใ“ใ‚“ใซใกใฏ", "ใฏใ˜ใ‚ใพใ—ใฆ", "ใ‚ใ‚ŠใŒใจใ†ใ”ใ–ใ„ใพใ™", "ใ‚ใ‚ŠใŒใจใ†ใ”ใ–ใ„ใพใ—ใŸ", "ใ™ใฟใพใ›ใ‚“", "ๆ€ใ„ใพใ™", "ใฏใ„", "ใ„ใ„ใˆ", "ใงใ™ใŒ", "ใ‚ใชใŸ", "ใŠใฃใ—ใ‚ƒใ‚‹", # sub-word patterns "ใญใ€‚", "ใชใ€‚", "ใ‚ˆใ€‚", "ใ‚ใ€‚", "ใŒใ€‚", "ใฏใ€‚", "ใซใ€‚", "ใ‹๏ผŸ", "ใ‚“ใ‹ใ€‚", "ใ™ใ‹ใ€‚", "ใพใ™ใ€‚", "ใ›ใ‚“ใ€‚", "ใงใ™ใ€‚", "ใพใ—ใŸใ€‚", "ใงใ—ใŸใ€‚", "ใ—ใ‚‡ใ†ใ€‚", "ใ—ใ‚‡ใ†ใ‹ใ€‚", "ใใ ใ•ใ„ใ€‚", "ไธ‹ใ•ใ„ใ€‚", "ใพใ™ใŒ", "ใงใ™ใŒ", "ใพใ—ใŸใŒ", "ใงใ—ใŸใŒ", "ใ•ใ‚“ใ€", "ๆง˜ใ€", "ใกใ‚ƒใ„", "ใกใ‚ƒใ†", "ใกใ‚ƒใˆ", "ใกใ‚ƒใฃ", "ใฃใกใ‚ƒ", "ใ˜ใ‚ƒใชใ„", "ใ˜ใ‚ƒใชใ" ] OTHER = [ """ ๆœฌ้ …ใง่งฃ่ชฌใ™ใ‚‹ๅœฐๆ–น็—…ใจใฏใ€ๅฑฑๆขจ็œŒใซใŠใ‘ใ‚‹ๆ—ฅๆœฌไฝ่ก€ๅธ่™ซ็—‡ใฎๅ‘ผ็งฐใงใ‚ใ‚Šใ€ ้•ทใ„้–“ใใฎๅŽŸๅ› ใŒๆ˜Žใ‚‰ใ‹ใซใชใ‚‰ใšไฝๆฐ‘ใ‚’่‹ฆใ—ใ‚ใŸๆ„ŸๆŸ“็—‡ใงใ‚ใ‚‹ใ€‚ใ“ใ“ใงใฏใ€ ใใฎๅ…‹ๆœใƒปๆ’ฒๆป…ใซ่‡ณใ‚‹ๆญดๅฒใซใคใ„ใฆ่ชฌๆ˜Žใ™ใ‚‹ใ€‚ ใ“ใฎ็–พๆ‚ฃใฏไฝ่ก€ๅธ่™ซ้กžใซๅˆ†้กžใ•ใ‚Œใ‚‹ๅฏ„็”Ÿ่™ซใงใ‚ใ‚‹ๆ—ฅๆœฌไฝ่ก€ๅธ่™ซใฎๅฏ„็”Ÿใซใ‚ˆใฃใฆ็™บ็—‡ใ™ใ‚‹ๅฏ„็”Ÿ่™ซ็—…ใงใ‚ใ‚Šใ€ ใƒ’ใƒˆใ‚’ๅซใ‚€ๅ“บไนณ้กžๅ…จ่ˆฌใฎ่ก€็ฎกๅ†…้ƒจใซๅฏ„็”Ÿๆ„ŸๆŸ“ใ™ใ‚‹ไบบ็ฃๅ…ฑ้€šๆ„ŸๆŸ“็—‡ใงใ‚‚ใ‚ใ‚‹ใ€‚ ็—…ๅใŠใ‚ˆใณๅŽŸ่™ซใซๆ—ฅๆœฌใฎๅ›ฝๅใŒๅ† ใ•ใ‚Œใฆใ„ใ‚‹ใฎใฏใ€ ็–พๆ‚ฃใฎๅŽŸๅ› ใจใชใ‚‹็—…ๅŽŸไฝ“๏ผˆๆ—ฅๆœฌไฝ่ก€ๅธ่™ซ๏ผ‰ใฎ็”Ÿไฝ“ใŒใ€ ไธ–็•Œใงๆœ€ๅˆใซๆ—ฅๆœฌๅ›ฝๅ†…๏ผˆ็พ๏ผšๅฑฑๆขจ็œŒ็”ฒๅบœๅธ‚๏ผ‰ใง็™บ่ฆ‹ใ•ใ‚ŒใŸใ“ใจใซใ‚ˆใ‚‹ใ‚‚ใฎใงใ‚ใฃใฆใ€ ๆ—ฅๆœฌๅ›บๆœ‰ใฎ็–พๆ‚ฃใจใ„ใ†ใ‚ใ‘ใงใฏใชใ„ใ€‚ๆ—ฅๆœฌไฝ่ก€ๅธ่™ซ็—‡ใฏใ€ไธญๅ›ฝใ€ใƒ•ใ‚ฃใƒชใƒ”ใƒณใ€ ใ‚คใƒณใƒ‰ใƒใ‚ทใ‚ขใฎ3ใ‚ซๅ›ฝใ‚’ไธญๅฟƒใซใ€ ๅนด้–“ๆ•ฐๅƒไบบใ‹ใ‚‰ๆ•ฐไธ‡ไบบ่ฆๆจกใฎๆ–ฐ่ฆๆ„ŸๆŸ“ๆ‚ฃ่€…ใŒ็™บ็”Ÿใ—ใฆใŠใ‚Šใ€ ไธ–็•ŒไฟๅฅๆฉŸ้–ขใ€€(WHO)ใชใฉใซใ‚ˆใฃใฆใ€ใ•ใพใ–ใพใชๅฏพ็ญ–ใŒ่กŒใ‚ใ‚Œใฆใ„ใ‚‹ใ€‚ """ ] def test_badwords(): compare_extraction(japanese.badwords.revision.datasources.matches, BAD, OTHER) assert japanese.badwords == pickle.loads(pickle.dumps(japanese.badwords)) def test_informals(): compare_extraction(japanese.informals.revision.datasources.matches, INFORMAL, OTHER) assert japanese.informals == pickle.loads(pickle.dumps(japanese.informals))
mit
robmcmullen/peppy
peppy/i18n/zh_CN.py
1
24750
# -*- coding: utf-8 -*- #This is generated code - do not edit encoding = 'utf-8' dict = { '&About...': '\xe5\x85\xb3\xe4\xba\x8e(&A)...', '&Close Document': '\xe5\x85\xb3\xe9\x97\xad\xe6\x96\x87\xe6\xa1\xa3(&C)', '&Comment Region': '\xe6\xb3\xa8\xe9\x87\x8a\xe5\x8c\xba\xe5\x9f\x9f(&C)', '&Delete Window': '\xe5\x88\xa0\xe9\x99\xa4\xe7\xaa\x97\xe5\x8f\xa3(&D)', '&Describe Action': '\xe6\x8f\x8f\xe8\xbf\xb0\xe8\xa1\x8c\xe4\xb8\xba(&D)', '&Execute Action': '\xe6\x89\xa7\xe8\xa1\x8c\xe8\xa1\x8c\xe5\x8a\xa8(&E)', '&Execute Macro': '\xe6\x89\xa7\xe8\xa1\x8c\xe5\xae\x8f(&E)', '&Folding': '\xe6\x8a\x98\xe5\x8f\xa0(&F)', '&Help': '\xe5\xb8\xae\xe5\x8a\xa9(&H)', '&Line Numbers': 'Line Numbers(&L)', '&Line Wrapping': '\xe6\x96\x87\xe6\x9c\xac\xe6\x8d\xa2\xe8\xa1\x8c(&L)', '&New Window': '\xe6\x96\xb0\xe5\xbb\xba\xe7\xaa\x97\xe5\x8f\xa3(&N)', '&Open Hex Editor...': '\xe6\x89\x93\xe5\xbc\x80\xe5\x8d\x81\xe5\x85\xad\xe8\xbf\x9b\xe5\x88\xb6\xe7\xbc\x96\xe8\xbe\x91\xe5\x99\xa8(&O)...', '&Open Image Viewer...': '\xe6\x89\x93\xe5\xbc\x80\xe5\x9b\xbe\xe5\x83\x8f\xe6\xb5\x8f\xe8\xa7\x88\xe5\x99\xa8(&O)...', '&Open Sample Graphviz dot file': '\xe6\x89\x93\xe5\xbc\x80Graphviz dot \xe7\xa4\xba\xe4\xbe\x8b\xe6\x96\x87\xe4\xbb\xb6(&O)', '&Open Sample Python': '\xe6\x89\x93\xe5\xbc\x80 Python \xe7\xa4\xba\xe4\xbe\x8b(&O)', '&Preferences...': '\xe9\xa6\x96\xe9\x80\x89\xe9\xa1\xb9(&P)...', '&Revert': '\xe8\xbf\x98\xe5\x8e\x9f(&R)', '&Save...': '\xe4\xbf\x9d\xe5\xad\x98(&S)...', '&Show Key Bindings': '\xe6\x98\xbe\xe7\xa4\xba\xe9\x94\xae\xe7\xbb\x91\xe5\xae\x9a(&S)', '&Show Toolbars': '\xe6\x98\xbe\xe7\xa4\xba\xe5\xb7\xa5\xe5\x85\xb7\xe6\xa0\x8f(&S)', '&Uncomment Region': '\xe9\x9d\x9e\xe6\xb3\xa8\xe9\x87\x8a\xe5\x8c\xba\xe5\x9f\x9f(&U)', '&Word Count': '\xe5\x8d\x95\xe8\xaf\x8d\xe7\xbb\x9f\xe8\xae\xa1(&W)', '&Wrap Words': '\xe5\x8d\x95\xe8\xaf\x8d\xe6\x8d\xa2\xe8\xa1\x8c(&W)', '. Do you wish to continue?': '. \xe8\xa6\x81\xe7\xbb\xa7\xe7\xbb\xad\xe5\x90\x97\xef\xbc\x9f', 'Abort': '\xe4\xb8\xad\xe6\xad\xa2', 'About this program': '\xe5\x85\xb3\xe4\xba\x8e\xe6\x9c\xac\xe7\xa8\x8b\xe5\xba\x8f', 'Act on the marked buffers according to their flags': '\xe6\xa0\xb9\xe6\x8d\xae\xe6\xa0\x87\xe5\xbf\x97\xe6\x89\xa7\xe8\xa1\x8c\xe5\xaf\xb9\xe9\x80\x89\xe4\xb8\xad\xe7\xbc\x93\xe5\x86\xb2\xe5\x8c\xba\xe7\x9a\x84\xe6\x93\x8d\xe4\xbd\x9c', 'Actions': '\xe5\x8a\xa8\xe4\xbd\x9c', 'Add ChangeLog Entry': '\xe6\xb7\xbb\xe5\x8a\xa0\xe6\x9b\xb4\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95\xe6\x9d\xa1\xe7\x9b\xae', 'Add a Multi-Key Binding': '\xe6\xb7\xbb\xe5\x8a\xa0\xe5\xa4\x9a\xe9\x94\xae\xe7\xbb\x91\xe5\xae\x9a', 'Add a Single Key Binding': '\xe6\xb7\xbb\xe5\x8a\xa0\xe5\x8d\x95\xe9\x94\xae\xe7\xbb\x91\xe5\xae\x9a', 'Add a Three Key Binding': '\xe6\xb7\xbb\xe5\x8a\xa0\xe4\xb8\x89\xe9\x94\xae\xe7\xbb\x91\xe5\xae\x9a', 'Add a Two Key Binding': '\xe6\xb7\xbb\xe5\x8a\xa0\xe5\x8f\x8c\xe9\x94\xae\xe7\xbb\x91\xe5\xae\x9a', 'Add new ChangeLog entry to the top of the ChangeLog': '\xe5\x9c\xa8\xe6\x9b\xb4\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95\xe7\x9a\x84\xe6\x9c\x80\xe9\xa1\xb6\xe9\x83\xa8\xe6\xb7\xbb\xe5\x8a\xa0\xe6\x96\xb0\xe7\x9a\x84\xe6\x9b\xb4\xe6\x94\xb9\xe8\xae\xb0\xe5\xbd\x95', 'Add to repository': '\xe6\xb7\xbb\xe5\x8a\xa0\xe5\x88\xb0\xe5\xba\x93', 'All Macros': '\xe5\x85\xa8\xe9\x83\xa8\xe5\xae\x8f', 'All Minor Modes': '\xe5\x85\xa8\xe9\x83\xa8\xe6\x9c\x80\xe5\xb0\x8f\xe6\xa8\xa1\xe5\xbc\x8f', 'All Sidebars': '\xe5\x85\xa8\xe9\x83\xa8\xe4\xbe\xa7\xe8\xbe\xb9\xe6\xa0\x8f', 'All Sidebars and Minor Modes': '\xe5\x85\xa8\xe9\x83\xa8\xe4\xbe\xa7\xe8\xbe\xb9\xe6\xa0\x8f\xe5\x92\x8c\xe6\x9c\x80\xe5\xb0\x8f\xe6\xa8\xa1\xe5\xbc\x8f', 'An error occurred when attempting to remove ': '\xe7\xa7\xbb\xe9\x99\xa4\xe6\x97\xb6\xe4\xba\xa7\xe7\x94\x9f\xe9\x94\x99\xe8\xaf\xaf ', 'Apply Settings': '\xe5\xba\x94\xe7\x94\xa8\xe8\xae\xbe\xe7\xbd\xae', 'As Defaults for %s Mode': '\xe4\xbd\x9c\xe4\xb8\xba %s \xe6\xa8\xa1\xe5\xbc\x8f\xe7\x9a\x84\xe9\xbb\x98\xe8\xae\xa4\xe5\x80\xbc', 'As Defaults for All Modes': '\xe4\xbd\x9c\xe4\xb8\xba\xe6\x89\x80\xe6\x9c\x89\xe6\xa8\xa1\xe5\xbc\x8f\xe7\x9a\x84\xe9\xbb\x98\xe8\xae\xa4\xe5\x80\xbc', 'Attributes': '\xe5\xb1\x9e\xe6\x80\xa7', 'Background': '\xe8\x83\x8c\xe6\x99\xaf', 'Bad input': '\xe5\x9d\x8f\xe7\x9a\x84\xe8\xbe\x93\xe5\x85\xa5', 'Base new theme on existing one': '\xe6\x96\xb0\xe4\xb8\xbb\xe9\xa2\x98\xe5\x9f\xba\xe4\xba\x8e\xe5\xb7\xb2\xe6\x9c\x89\xe4\xb8\xbb\xe9\xa2\x98', 'Bookmarks': '\xe4\xb9\xa6\xe7\xad\xbe', 'Bring All to Front': '\xe5\x89\x8d\xe7\xbd\xae\xe5\x85\xa8\xe9\x83\xa8\xe7\xaa\x97\xe5\x8f\xa3', 'Build...': '\xe7\xbc\x96\xe8\xaf\x91...', 'Cancel': '\xe5\x8f\x96\xe6\xb6\x88', 'Cancel Minibuffer': '\xe5\x8f\x96\xe6\xb6\x88\xe5\xbe\xae\xe5\x9e\x8b\xe7\xbc\x93\xe5\x86\xb2\xe5\x8c\xba', 'Capitalize': '\xe9\xa6\x96\xe5\xad\x97\xe6\xaf\x8d\xe5\xa4\xa7\xe5\x86\x99', 'Caret Width': '\xe6\x8f\x92\xe5\x85\xa5\xe7\xac\xa6\xe5\xae\xbd\xe5\xba\xa6', 'Case': '\xe5\xa4\xa7\xe5\xb0\x8f\xe5\x86\x99', 'Case Sensitive Search': '\xe6\x90\x9c\xe7\xb4\xa2\xe5\x8c\xba\xe5\x88\x86\xe5\xa4\xa7\xe5\xb0\x8f\xe5\x86\x99', 'Changed color scheme to %s': '\xe9\xa2\x9c\xe8\x89\xb2\xe6\x96\xb9\xe6\xa1\x88\xe6\x94\xb9\xe4\xb8\xba "%s"', 'Clear Flags': '\xe6\xb8\x85\xe9\x99\xa4\xe6\xa0\x87\xe8\xae\xb0', 'Clear Playlist': '\xe6\xb8\x85\xe9\x99\xa4\xe6\x92\xad\xe6\x94\xbe\xe5\x88\x97\xe8\xa1\xa8', 'Clear all flags from the selected item(s)': '\xe6\xb8\x85\xe9\x99\xa4\xe9\x80\x89\xe6\x8b\xa9\xe9\xa1\xb9\xe7\x9a\x84\xe6\x89\x80\xe6\x9c\x89\xe6\xa0\x87\xe8\xae\xb0', 'Close Tab': '\xe5\x85\xb3\xe9\x97\xad\xe6\xa0\x87\xe7\xad\xbe', 'Close the current tab': '\xe5\x85\xb3\xe9\x97\xad\xe5\xbd\x93\xe5\x89\x8d\xe6\xa0\x87\xe7\xad\xbe', 'Color': '\xe9\xa2\x9c\xe8\x89\xb2', 'Color Map': '\xe8\x89\xb2\xe5\xbd\xa9\xe6\x98\xa0\xe5\xb0\x84\xe5\x9b\xbe', 'Commit Dialog': '\xe6\x8f\x90\xe4\xba\xa4\xe5\xaf\xb9\xe8\xaf\x9d\xe6\xa1\x86', 'Commit changes': '\xe6\x8f\x90\xe4\xba\xa4\xe6\x9b\xb4\xe6\x94\xb9', 'Compare to previous version': '\xe6\xaf\x94\xe8\xbe\x83\xe4\xba\x8e\xe4\xbb\xa5\xe5\x89\x8d\xe7\x89\x88\xe6\x9c\xac', 'Complete word': '\xe8\x87\xaa\xe5\x8a\xa8\xe5\xae\x8c\xe6\x88\x90', 'Contrast': '\xe5\xaf\xb9\xe6\xaf\x94\xe5\xba\xa6', 'Contributions by:': '\xe8\xb4\xa1\xe7\x8c\xae\xe8\x80\x85:', 'Copy': '\xe5\xa4\x8d\xe5\x88\xb6', 'Cut': '\xe5\x89\xaa\xe5\x88\x87', 'Debug': '\xe8\xb0\x83\xe8\xaf\x95', 'Decrease Size': '\xe5\x87\x8f\xe5\xb0\x8f\xe5\xa4\xa7\xe5\xb0\x8f', 'Decrease Volume': '\xe5\x87\x8f\xe5\xb0\x8f\xe9\x9f\xb3\xe9\x87\x8f', 'Decrease the volume': '\xe9\x99\x8d\xe4\xbd\x8e\xe9\x9f\xb3\xe9\x87\x8f', 'Delete Bookmark': '\xe5\x88\xa0\xe9\x99\xa4\xe4\xb9\xa6\xe7\xad\xbe', 'Delete Macro': '\xe5\x88\xa0\xe9\x99\xa4\xe5\xae\x8f', 'Delete Playlist Entry': '\xe5\x88\xa0\xe9\x99\xa4\xe6\x92\xad\xe6\x94\xbe\xe5\x88\x97\xe8\xa1\xa8\xe6\x9d\xa1\xe7\x9b\xae', 'Delete current window': '\xe5\x88\xa0\xe9\x99\xa4\xe5\xbd\x93\xe5\x89\x8d\xe7\xaa\x97\xe5\x8f\xa3', 'Delete selected songs from playlist': '\xe4\xbb\x8e\xe6\x92\xad\xe6\x94\xbe\xe5\x88\x97\xe8\xa1\xa8\xe5\x88\xa0\xe9\x99\xa4\xe9\x80\x89\xe6\x8b\xa9\xe7\x9a\x84\xe6\xad\x8c\xe6\x9b\xb2', 'Describe an action by name': '\xe7\x94\xa8\xe5\x90\x8d\xe5\xad\x97\xe6\x8f\x8f\xe8\xbf\xb0\xe4\xb8\x80\xe4\xb8\xaa\xe5\x8a\xa8\xe4\xbd\x9c', 'Display a list of all buffers': '\xe6\x98\xbe\xe7\xa4\xba\xe4\xb8\x80\xe4\xb8\xaa\xe5\x8c\x85\xe5\x90\xab\xe6\x89\x80\xe6\x9c\x89\xe7\xbc\x93\xe5\x86\xb2\xe5\x8c\xba\xe7\x9a\x84\xe5\x88\x97\xe8\xa1\xa8', 'Display and edit key bindings': '\xe6\x98\xbe\xe7\xa4\xba\xe5\x92\x8c\xe7\xbc\x96\xe8\xbe\x91\xe9\x94\xae\xe7\xbb\x91\xe5\xae\x9a\xe3\x80\x82', 'Documents': '\xe6\x96\x87\xe6\xa1\xa3', 'E&xit': '\xe9\x80\x80\xe5\x87\xba(&X)', 'EOL Characters': '\xe8\xa1\x8c\xe7\xbb\x93\xe6\x9d\x9f(EOL) \xe5\xad\x97\xe7\xac\xa6', 'Edit': '\xe7\xbc\x96\xe8\xbe\x91', 'Edit Macro': '\xe7\xbc\x96\xe8\xbe\x91\xe5\xae\x8f', 'Enter a hex color value': '\xe8\xbe\x93\xe5\x85\xa5\xe4\xb8\x80\xe4\xb8\xaa\xe5\x8d\x81\xe5\x85\xad\xe8\xbf\x9b\xe5\x88\xb6\xe7\x9a\x84\xe9\xa2\x9c\xe8\x89\xb2\xe5\x80\xbc', 'Enter command to execute on all files': '\xe8\xbe\x93\xe5\x85\xa5\xe5\x91\xbd\xe4\xbb\xa4\xe8\xbf\x90\xe8\xa1\x8c\xe4\xba\x8e\xe6\x89\x80\xe6\x9c\x89\xe6\x96\x87\xe4\xbb\xb6', 'Enter your commit message:': '\xe8\xbe\x93\xe5\x85\xa5\xe6\x8f\x90\xe4\xba\xa4\xe4\xbf\xa1\xe6\x81\xaf\xef\xbc\x9a', 'Error Traceback:': '\xe9\x94\x99\xe8\xaf\xaf\xe8\xb7\x9f\xe8\xb8\xaa\xef\xbc\x9a', 'Error occurred when copying/moving files': '\xe5\x9c\xa8\xe5\xa4\x8d\xe5\x88\xb6/\xe7\xa7\xbb\xe5\x8a\xa8\xe6\x96\x87\xe4\xbb\xb6\xe6\x97\xb6\xe5\x8f\x91\xe7\x94\x9f\xe9\x94\x99\xe8\xaf\xaf', 'Error occurred when removing files': '\xe7\xa7\xbb\xe5\x8a\xa8\xe6\x96\x87\xe4\xbb\xb6\xe6\x97\xb6\xe4\xba\xa7\xe7\x94\x9f\xe9\x94\x99\xe8\xaf\xaf', 'Error/Crash Reporter': '\xe9\x94\x99\xe8\xaf\xaf/\xe5\xb4\xa9\xe6\xba\x83\xe6\x8a\xa5\xe5\x91\x8a', 'Execute Macro By Keystroke': '\xe9\x80\x9a\xe8\xbf\x87\xe5\x87\xbb\xe9\x94\xae\xe6\x89\xa7\xe8\xa1\x8c\xe5\xae\x8f', 'Execute an action by name': '\xe4\xbb\xa5\xe5\x90\x8d\xe5\xad\x97\xe6\x89\xa7\xe8\xa1\x8c\xe4\xb8\x80\xe4\xb8\xaa\xe5\x8a\xa8\xe4\xbd\x9c', 'Execute command...': '\xe8\xbf\x90\xe8\xa1\x8c\xe5\x91\xbd\xe4\xbb\xa4...', 'Exit the application': '\xe9\x80\x80\xe5\x87\xba\xe5\xba\x94\xe7\x94\xa8\xe7\xa8\x8b\xe5\xba\x8f', 'Export': '\xe5\xaf\xbc\xe5\x87\xba', 'Fast test of the progress bar': '\xe8\xbf\x9b\xe5\xba\xa6\xe6\xa0\x8f\xe7\x9a\x84\xe5\xbf\xab\xe9\x80\x9f\xe6\xb5\x8b\xe8\xaf\x95', 'File': '\xe6\x96\x87\xe4\xbb\xb6', 'File/Export': '\xe6\x96\x87\xe4\xbb\xb6/\xe5\xaf\xbc\xe5\x87\xba', 'Fill Paragraph': '\xe5\xa1\xab\xe5\x85\x85\xe6\xae\xb5\xe8\x90\xbd', 'Filter': '\xe8\xbf\x87\xe6\xbb\xa4\xe5\x99\xa8', 'Find Wildcard...': '\xe6\x9f\xa5\xe6\x89\xbe\xe9\x80\x9a\xe9\x85\x8d\xe7\xac\xa6...', 'Find...': '\xe6\x9f\xa5\xe6\x89\xbe...', 'Floating Point': '\xe6\xb5\xae\xe7\x82\xb9\xe6\x95\xb0', 'Folder': '\xe6\x96\x87\xe4\xbb\xb6\xe5\xa4\xb9', 'Font': '\xe5\xad\x97\xe4\xbd\x93', 'Font Settings': '\xe5\xad\x97\xe4\xbd\x93\xe8\xae\xbe\xe7\xbd\xae', 'Font Size': '\xe5\xad\x97\xe5\x8f\xb7', 'Foreground': '\xe5\x89\x8d\xe6\x99\xaf', 'Frames': '\xe5\xb8\xa7\xe6\x95\xb0', 'Games': '\xe6\xb8\xb8\xe6\x88\x8f', 'Garbage Objects': '\xe5\xba\x9f\xe5\xbc\x83\xe5\xaf\xb9\xe8\xb1\xa1', 'General': '\xe9\x80\x9a\xe7\x94\xa8', 'Goto Band': '\xe8\xbd\xac\xe5\x88\xb0\xe4\xb9\x90\xe9\x98\x9f', 'Goto Line...': '\xe8\xbd\xac\xe5\x88\xb0\xe8\xa1\x8c...', 'Goto Offset...': '\xe8\xbd\xac\xe5\x88\xb0\xe5\x81\x8f\xe7\xa7\xbb\xe9\x87\x8f...', 'Goto a line in the text': '\xe8\xb7\xb3\xe8\xbd\xac\xe5\x88\xb0\xe6\x96\x87\xe6\x9c\xac\xe4\xb8\xad\xe7\x9a\x84\xe4\xb8\x80\xe8\xa1\x8c', 'Goto an offset': '\xe8\xbd\xac\xe5\x88\xb0\xe4\xb8\x80\xe4\xb8\xaa\xe5\x81\x8f\xe7\xa7\xbb', 'Hide': '\xe9\x9a\x90\xe8\x97\x8f', 'Highlight Caret Line': '\xe9\xab\x98\xe4\xba\xae\xe5\xbd\x93\xe5\x89\x8d\xe8\xa1\x8c', 'Image View': '\xe5\x9b\xbe\xe5\x83\x8f\xe8\xa7\x86\xe5\x9b\xbe', 'Incomplete regex': '\xe4\xb8\x8d\xe5\xae\x8c\xe6\x95\xb4\xe7\x9a\x84\xe6\xad\xa3\xe5\x88\x99\xe8\xa1\xa8\xe8\xbe\xbe\xe5\xbc\x8f', 'Increase Size': '\xe5\xa2\x9e\xe5\xa4\xa7', 'Increase Volume': '\xe5\xa2\x9e\xe5\xa4\xa7\xe9\x9f\xb3\xe9\x87\x8f', 'Increase the volume': '\xe6\x8f\x90\xe9\xab\x98\xe9\x9f\xb3\xe9\x87\x8f', 'Indent Character': '\xe7\xbc\xa9\xe8\xbf\x9b\xe5\xad\x97\xe7\xac\xa6', 'Indent the next line following a return': "\xe6\x89\x80\xe8\xbf\x9b'return'\xe7\x9a\x84\xe4\xb8\x8b\xe4\xb8\x80\xe8\xa1\x8c", 'Indentation Size': '\xe7\xbc\xa9\xe8\xbf\x9b\xe5\xa4\xa7\xe5\xb0\x8f', 'Input:': '\xe8\xbe\x93\xe5\x85\xa5:', "Insert 'Hello, world' at the current cursor position": "\xe5\x9c\xa8\xe5\xbd\x93\xe5\x89\x8d\xe5\x85\x89\xe6\xa0\x87\xe4\xbd\x8d\xe7\xbd\xae\xe6\x8f\x92\xe5\x85\xa5' Hello, world'", 'Insert Unicode': '\xe6\x8f\x92\xe5\x85\xa5 Unicode', 'Integer': '\xe6\x95\xb4\xe6\x95\xb0', 'Invalid range': '\xe9\x9d\x9e\xe6\xb3\x95\xe8\x8c\x83\xe5\x9b\xb4\xe3\x80\x82', 'Line Endings': '\xe8\xa1\x8c\xe7\xbb\x88\xe6\xad\xa2', 'List All Documents': '\xe5\x88\x97\xe5\x87\xba\xe6\x89\x80\xe6\x9c\x89\xe6\x96\x87\xe6\xa1\xa3', 'Local settings (each view can have different values for these settings)': '\xe6\x9c\xac\xe5\x9c\xb0\xe8\xae\xbe\xe7\xbd\xae (\xe6\xaf\x8f\xe4\xb8\xaa\xe8\xa7\x86\xe5\x9b\xbe\xe7\x9a\x84\xe8\xae\xbe\xe7\xbd\xae\xe5\x9d\x87\xe5\x8f\xaf\xe4\xb8\x8d\xe5\x90\x8c)', 'Login': '\xe7\x99\xbb\xe5\xbd\x95', 'MPD Server...': 'MPD\xe6\x9c\x8d\xe5\x8a\xa1...', 'Major Mode': '\xe6\xa0\xb8\xe5\xbf\x83\xe6\xa8\xa1\xe5\xbc\x8f', 'Mark for Deletion': '\xe6\xa0\x87\xe8\xae\xb0\xe5\x88\xa0\xe9\x99\xa4', 'Mark for Deletion and Move Backwards': '\xe6\xa0\x87\xe8\xae\xb0\xe5\x88\xa0\xe9\x99\xa4\xe4\xb8\x94\xe5\x90\x91\xe5\x90\x8e\xe7\xa7\xbb', 'Mark for Display': '\xe6\xa0\x87\xe8\xae\xb0\xe6\x98\xbe\xe7\xa4\xba', 'Mark for Display and Move Backwards': '\xe6\xa0\x87\xe8\xae\xb0\xe6\x98\xbe\xe7\xa4\xba\xe4\xb8\x94\xe5\x90\x91\xe5\x90\x8e\xe7\xa7\xbb', 'Mark for Save': '\xe6\xa0\x87\xe8\xae\xb0\xe4\xbf\x9d\xe5\xad\x98', 'Mark for Save and Move Backwards': '\xe6\xa0\x87\xe8\xae\xb0\xe4\xbf\x9d\xe5\xad\x98\xe4\xb8\x94\xe5\x90\x91\xe5\x90\x8e\xe7\xa7\xbb', 'Mark the selected buffer for deletion': '\xe6\xa0\x87\xe8\xae\xb0\xe9\x80\x89\xe6\x8b\xa9\xe7\x9a\x84\xe7\xbc\x93\xe5\x86\xb2\xe5\x8c\xba\xe4\xbb\xa5\xe5\x87\x86\xe5\xa4\x87\xe5\x88\xa0\xe9\x99\xa4', 'Mark the selected buffer for deletion and move to the previous item': '\xe6\xa0\x87\xe8\xae\xb0\xe9\x80\x89\xe6\x8b\xa9\xe7\x9a\x84\xe7\xbc\x93\xe5\x86\xb2\xe5\x8c\xba\xe4\xbb\xa5\xe5\x87\x86\xe5\xa4\x87\xe5\x88\xa0\xe9\x99\xa4,\xe7\x84\xb6\xe5\x90\x8e\xe7\xa7\xbb\xe5\x88\xb0\xe5\x89\x8d\xe4\xb8\x80\xe9\xa1\xb9', 'Mark the selected buffer to be displayed': '\xe6\xa0\x87\xe8\xae\xb0\xe9\x80\x89\xe6\x8b\xa9\xe7\x9a\x84\xe7\xbc\x93\xe5\x86\xb2\xe5\x8c\xba\xe7\x94\xa8\xe4\xba\x8e\xe6\x98\xbe\xe7\xa4\xba', 'Mark the selected buffer to be displayed and move to the previous item': '\xe6\xa0\x87\xe8\xae\xb0\xe9\x80\x89\xe6\x8b\xa9\xe7\x9a\x84\xe7\xbc\x93\xe5\x86\xb2\xe5\x8c\xba\xe7\x94\xa8\xe4\xba\x8e\xe6\x98\xbe\xe7\xa4\xba,\xe7\x84\xb6\xe5\x90\x8e\xe7\xa7\xbb\xe5\x88\xb0\xe5\x89\x8d\xe4\xb8\x80\xe9\xa1\xb9', 'Mark the selected buffer to be saved': '\xe6\xa0\x87\xe8\xae\xb0\xe9\x80\x89\xe6\x8b\xa9\xe7\x9a\x84\xe7\xbc\x93\xe5\x86\xb2\xe5\x8c\xba\xe7\x94\xa8\xe4\xba\x8e\xe4\xbf\x9d\xe5\xad\x98', 'Minor Modes': '\xe6\xac\xa1\xe8\xa6\x81\xe6\xa8\xa1\xe5\xbc\x8f', 'Modes': '\xe6\x96\xb9\xe5\xbc\x8f', 'Move the selection to the next item in the list': '\xe7\xa7\xbb\xe5\x8a\xa8\xe9\x80\x89\xe6\x8b\xa9\xe5\x88\xb0\xe5\x88\x97\xe8\xa1\xa8\xe4\xb8\xad\xe7\x9a\x84\xe4\xb8\x8b\xe4\xb8\x80\xe9\xa1\xb9', 'Move the selection to the previous item in the list': '\xe7\xa7\xbb\xe5\x8a\xa8\xe9\x80\x89\xe6\x8b\xa9\xe5\x88\xb0\xe5\x88\x97\xe8\xa1\xa8\xe4\xb8\xad\xe7\x9a\x84\xe5\x89\x8d\xe4\xb8\x80\xe9\xa1\xb9', 'Move to Next Item': '\xe7\xa7\xbb\xe5\x8a\xa8\xe5\x88\xb0\xe4\xb8\x8b\xe4\xb8\x80\xe9\xa1\xb9', 'Move to Previous Item': '\xe7\xa7\xbb\xe5\x8a\xa8\xe5\x88\xb0\xe5\x89\x8d\xe4\xb8\x80\xe9\xa1\xb9', 'Mute': '\xe9\x9d\x99\xe9\x9f\xb3', 'Mute the volume': '\xe9\x9d\x99\xe9\x9f\xb3', 'New': '\xe6\x96\xb0\xe5\xbb\xba', 'New Tab': '\xe6\x96\xb0\xe5\xbb\xba\xe6\xa0\x87\xe7\xad\xbe', 'New plain text file': '\xe6\x96\xb0\xe5\xbb\xba\xe7\xba\xaf\xe6\x96\x87\xe6\x9c\xac\xe6\x96\x87\xe4\xbb\xb6', 'Next Band': '\xe4\xb8\x80\xe4\xb8\x8b\xe4\xb8\xaa\xe4\xb9\x90\xe9\x98\x9f', 'Next Song': '\xe4\xb8\x8b\xe4\xb8\x80\xe9\xa6\x96\xe6\xad\x8c', 'Not a numeric expression': '\xe9\x9d\x9e\xe6\x95\xb0\xe5\xad\x97\xe8\xa1\xa8\xe8\xbe\xbe\xe5\xbc\x8f', 'Not an integer expression': '\xe9\x9d\x9e\xe6\x95\xb4\xe6\x95\xb0\xe8\xa1\xa8\xe8\xbe\xbe\xe5\xbc\x8f', 'Ok': '\xe6\xad\xa3\xe5\xb8\xb8', 'Open': '\xe6\x89\x93\xe5\xbc\x80', 'Open File Using Minibuffer...': '\xe4\xbd\xbf\xe7\x94\xa8Minibuffer\xe6\x89\x93\xe5\xbc\x80\xe6\x96\x87\xe4\xbb\xb6...', 'Open File...': '\xe6\x89\x93\xe5\xbc\x80\xe6\x96\x87\xe4\xbb\xb6...', 'Open Recent': '\xe6\x89\x93\xe5\xbc\x80\xe6\x9c\x80\xe8\xbf\x91\xe8\xae\xbf\xe9\x97\xae\xe7\x9a\x84', 'Open URL Using Minibuffer...': '\xe7\x94\xa8Minibuffer\xe6\x89\x93\xe5\xbc\x80URL...', 'Open a Hex Editor': '\xe6\x89\x93\xe5\xbc\x80\xe5\x8d\x81\xe5\x85\xad\xe8\xbf\x9b\xe5\x88\xb6\xe7\xbc\x96\xe8\xbe\x91\xe5\x99\xa8', 'Open a file': '\xe6\x89\x93\xe5\xbc\x80\xe6\x96\x87\xe4\xbb\xb6', 'Open a file using URL name completion': '\xe4\xbb\xa5\xe5\xae\x8c\xe6\x95\xb4\xe7\x9a\x84URL\xe5\x90\x8d\xe6\x89\x93\xe5\xbc\x80\xe6\x96\x87\xe4\xbb\xb6', 'Open a file using filename completion': '\xe4\xbb\xa5\xe5\xae\x8c\xe6\x95\xb4\xe7\x9a\x84\xe6\x96\x87\xe4\xbb\xb6\xe5\x90\x8d\xe6\x89\x93\xe5\xbc\x80\xe6\x96\x87\xe4\xbb\xb6', 'Open a new tab': '\xe6\x89\x93\xe5\xbc\x80\xe4\xb8\x80\xe4\xb8\xaa\xe6\x96\xb0\xe6\xa0\x87\xe7\xad\xbe', 'Open a new window': '\xe6\x89\x93\xe5\xbc\x80\xe4\xb8\x80\xe4\xb8\xaa\xe6\x96\xb0\xe7\x9a\x84\xe7\xaa\x97\xe5\x8f\xa3', 'Open a sample Graphviz file': '\xe6\x89\x93\xe5\xbc\x80\xe4\xb8\x80\xe4\xb8\xaaGraphviz\xe7\xa4\xba\xe4\xbe\x8b\xe6\x96\x87\xe4\xbb\xb6', 'Open a sample Python file': '\xe6\x89\x93\xe5\xbc\x80\xe4\xb8\x80\xe4\xb8\xaaPython\xe7\xa4\xba\xe4\xbe\x8b\xe6\x96\x87\xe4\xbb\xb6', 'Open an Image Viewer': '\xe6\x89\x93\xe5\xbc\x80\xe4\xb8\x80\xe4\xb8\xaa\xe5\x9b\xbe\xe5\x83\x8f\xe6\xb5\x8f\xe8\xa7\x88\xe5\x99\xa8', 'Open an MPD server through a URL': '\xe9\x80\x9a\xe8\xbf\x87URL\xe6\x89\x93\xe5\xbc\x80MPD\xe6\x9c\x8d\xe5\x8a\xa1', "Open the STC Style Editor to edit the current mode's text display": '\xe6\x89\x93\xe5\xbc\x80STC\xe9\xa3\x8e\xe6\xa0\xbc\xe7\xbc\x96\xe8\xbe\x91\xe5\x99\xa8\xe7\xbc\x96\xe8\xbe\x91\xe5\xbd\x93\xe5\x89\x8d\xe6\xa8\xa1\xe5\xbc\x8f\xe7\x9a\x84\xe6\x96\x87\xe6\x9c\xac\xe6\x98\xbe\xe7\xa4\xba', 'Open the wxPython widget inspector': '\xe6\x89\x93\xe5\xbc\x80 wxPython widget \xe6\xa3\x80\xe6\x9f\xa5\xe5\x99\xa8', 'Paste': '\xe7\xb2\x98\xe8\xb4\xb4', 'Paste at Column': '\xe7\xb2\x98\xe8\xb4\xb4\xe6\xa0\x8f', 'Play/Pause Song': '\xe5\xbc\x80\xe5\xa7\x8b/\xe6\x9a\x82\xe5\x81\x9c \xe6\xad\x8c\xe6\x9b\xb2', 'Plugins': '\xe6\x8f\x92\xe4\xbb\xb6', 'Preferences, settings, and configurations...': '\xe5\x8f\x82\xe6\x95\xb0\xe9\x80\x89\xe6\x8b\xa9, \xe8\xae\xbe\xe7\xbd\xae, \xe5\x92\x8c\xe4\xb8\x80\xe4\xba\x9b\xe9\x85\x8d\xe7\xbd\xae', 'Prev Band': '\xe5\x89\x8d\xe4\xb8\x80\xe4\xb8\xaa\xe4\xb9\x90\xe9\x98\x9f', 'Prev Song': '\xe4\xb8\x8a\xe4\xb8\x80\xe9\xa6\x96\xe6\xad\x8c', 'Preview': '\xe9\xa2\x84\xe8\xa7\x88', 'Previous Song': '\xe5\x89\x8d\xe4\xb8\x80\xe9\xa6\x96\xe6\xad\x8c', 'Project Homepage': '\xe9\xa1\xb9\xe7\x9b\xae\xe9\xa6\x96\xe9\xa1\xb5', 'Quit the program': '\xe9\x80\x80\xe5\x87\xba\xe7\xa8\x8b\xe5\xba\x8f', 'Record Format...': '\xe8\xae\xb0\xe5\xbd\x95\xe6\xa0\xbc\xe5\xbc\x8f...', 'Redo': '\xe9\x87\x8d\xe5\x81\x9a', 'Refresh': '\xe5\x88\xb7\xe6\x96\xb0', 'Refresh the current view to show any changes': '\xe5\x88\xb7\xe6\x96\xb0\xe5\xbd\x93\xe5\x89\x8d\xe8\xa7\x86\xe5\x9b\xbe\xe6\x9d\xa5\xe6\x98\xbe\xe7\xa4\xba\xe4\xbb\xbb\xe4\xbd\x95\xe5\x8f\x98\xe5\x8c\x96', 'Reindent': '\xe9\x87\x8d\xe6\x96\xb0\xe7\xbc\xa9\xe8\xbf\x9b', 'Remove all songs from the current playlist': '\xe7\xa7\xbb\xe9\x99\xa4\xe5\xbd\x93\xe5\x89\x8d\xe6\x92\xad\xe6\x94\xbe\xe5\x88\x97\xe8\xa1\xa8\xe7\x9a\x84\xe6\x89\x80\xe6\x9c\x89\xe6\xad\x8c\xe6\x9b\xb2', 'Replace Buffer': '\xe6\x9b\xbf\xe6\x8d\xa2\xe7\xbc\x93\xe5\x86\xb2\xe5\x99\xa8', 'Replace...': '\xe6\x9b\xbf\xe6\x8d\xa2...', 'Report a bug': '\xe6\x8a\xa5\xe5\x91\x8a\xe7\xbc\xba\xe9\x99\xb7', 'Rescan the filesystem and update the MPD database': '\xe9\x87\x8d\xe6\x96\xb0\xe6\x89\xab\xe6\x8f\x8f\xe6\x96\x87\xe4\xbb\xb6\xe7\xb3\xbb\xe7\xbb\x9f\xe5\xb9\xb6\xe4\xb8\x94\xe6\x9b\xb4\xe8\xa1\x8cMPD\xe6\x95\xb0\xe6\x8d\xae\xe5\xba\x93', 'Restart Game': '\xe9\x87\x8d\xe6\x96\xb0\xe5\xbc\x80\xe5\xa7\x8b\xe6\xb8\xb8\xe6\x88\x8f', 'Revert to last saved version': '\xe8\xbf\x98\xe5\x8e\x9f\xe5\x88\xb0\xe6\x9c\x80\xe5\x90\x8e\xe4\xb8\x80\xe6\xac\xa1\xe4\xbf\x9d\xe5\xad\x98\xe7\x9a\x84\xe7\x89\x88\xe6\x9c\xac', 'Run': '\xe8\xbf\x90\xe8\xa1\x8c', 'Run this script through the interpreter': '\xe9\x80\x9a\xe8\xbf\x87\xe8\xa7\xa3\xe9\x87\x8a\xe7\xa8\x8b\xe5\xba\x8f\xe8\xbf\x90\xe8\xa1\x8c\xe6\xad\xa4\xe8\x84\x9a\xe6\x9c\xac', 'Running Jobs': '\xe8\xbf\x90\xe8\xa1\x8c\xe7\x9a\x84\xe5\xb7\xa5\xe4\xbd\x9c', 'Same Major Mode': '\xe5\x90\x8c\xe6\xa0\xb8\xe5\xbf\x83\xe6\xa8\xa1\xe5\xbc\x8f', 'Samples': '\xe4\xbe\x8b\xe5\xad\x90', 'Save &As...': '\xe5\x8f\xa6\xe5\xad\x98\xe4\xb8\xba(&A)...', 'Save Styles': '\xe4\xbf\x9d\xe5\xad\x98\xe9\xa3\x8e\xe6\xa0\xbc', 'Save or Delete Marked Buffers': '\xe4\xbf\x9d\xe5\xad\x98\xe6\x88\x96\xe8\x80\x85\xe5\x88\xa0\xe9\x99\xa4\xe5\xb7\xb2\xe8\xa2\xab\xe6\xa0\x87\xe8\xae\xb0\xe7\x9a\x84\xe7\xbc\x93\xe5\x86\xb2\xe5\x8c\xba', 'Save the current file': '\xe4\xbf\x9d\xe5\xad\x98\xe5\xbd\x93\xe5\x89\x8d\xe6\x96\x87\xe4\xbb\xb6', 'Search for a string in the text': '\xe5\x9c\xa8\xe6\x96\x87\xe6\x9c\xac\xe4\xb8\xad\xe6\x90\x9c\xe7\xb4\xa2\xe5\xad\x97\xe7\xac\xa6\xe4\xb8\xb2', 'Select All': '\xe5\x85\xa8\xe9\x80\x89', 'Select Rect': '\xe9\x80\x89\xe6\x8b\xa9\xe7\x9f\xa9\xe5\xbd\xa2', 'Select rectangular region': '\xe9\x80\x89\xe6\x8b\xa9\xe7\x9f\xa9\xe5\xbd\xa2\xe5\x8c\xba\xe5\x9f\x9f', 'Set the preview file type': '\xe8\xae\xbe\xe7\xbd\xae\xe9\xa2\x84\xe8\xa7\x88\xe6\x96\x87\xe4\xbb\xb6\xe6\xa0\xbc\xe5\xbc\x8f', 'Show Buffer': '\xe6\x98\xbe\xe7\xa4\xba\xe7\xbc\x93\xe5\x86\xb2\xe5\x8c\xba', 'Show Hex Digits': '\xe6\x98\xbe\xe7\xa4\xba\xe5\x8d\x81\xe5\x85\xad\xe8\xbf\x9b\xe5\x88\xb6\xe6\x95\xb0\xe5\xad\x97', 'Show Line Style': '\xe6\x98\xbe\xe7\xa4\xba\xe8\xa1\x8c\xe6\xa0\xb7\xe5\xbc\x8f', 'Show Pixel Values': '\xe6\x98\xbe\xe7\xa4\xba\xe5\x83\x8f\xe7\xb4\xa0\xe5\x80\xbc', 'Show Record Numbers': '\xe6\x98\xbe\xe7\xa4\xba\xe8\xae\xb0\xe5\xbd\x95\xe6\x95\xb0\xe7\x9b\xae', 'Show the buffer in a new tab': '\xe5\x9c\xa8\xe6\x96\xb0\xe6\xa0\x87\xe7\xad\xbe\xe9\x87\x8c\xe6\x98\xbe\xe7\xa4\xba\xe7\xbc\x93\xe5\x86\xb2\xe5\x8c\xba', 'Show the buffer in place of this tab': '\xe5\x9c\xa8\xe6\xad\xa4\xe6\xa0\x87\xe7\xad\xbe\xe5\x86\x85\xe6\x98\xbe\xe7\xa4\xba\xe7\xbc\x93\xe5\x86\xb2\xe5\x8c\xba', 'Show the styling information of the current line': '\xe6\x98\xbe\xe7\xa4\xba\xe5\xbd\x93\xe5\x89\x8d\xe8\xa1\x8c\xe7\x9a\x84\xe9\xa3\x8e\xe6\xa0\xbc\xe4\xbf\xa1\xe6\x81\xaf', 'Show uncollectable objects': '\xe6\x98\xbe\xe7\xa4\xba\xe4\xb8\x8d\xe5\x8f\xaf\xe5\x9b\x9e\xe6\x94\xb6\xe5\xaf\xb9\xe8\xb1\xa1', 'Sidebars': '\xe4\xbe\xa7\xe6\xa0\x8f', 'Size': '\xe5\xa4\xa7\xe5\xb0\x8f', 'Slow test of the progress bar': '\xe6\x85\xa2\xe9\x80\x9f\xe6\xb5\x8b\xe8\xaf\x95\xe8\xbf\x9b\xe5\xba\xa6\xe6\xa0\x8f', 'Some styles have been changed would you like to save before exiting?': '\xe6\x9c\x89\xe4\xba\x9b\xe6\xa0\xb7\xe5\xbc\x8f\xe5\xb7\xb2\xe7\xbb\x8f\xe8\xa2\xab\xe6\x94\xb9\xe5\x8f\x98,\xe4\xbd\xa0\xe8\xa6\x81\xe5\x9c\xa8\xe9\x80\x80\xe5\x87\xba\xe5\x89\x8d\xe4\xbf\x9d\xe5\xad\x98\xe5\x90\x97?', 'Sort Order': '\xe6\x8e\x92\xe5\xba\x8f', 'Start a blank new style': '\xe6\x96\xb0\xe5\xbb\xba\xe7\xa9\xba\xe7\x99\xbd\xe6\xa0\xb7\xe5\xbc\x8f', 'Stop': '\xe5\x81\x9c\xe6\xad\xa2', 'Stop the currently running script': '\xe5\x81\x9c\xe6\xad\xa2\xe5\xbd\x93\xe5\x89\x8d\xe8\xbf\x90\xe8\xa1\x8c\xe7\x9a\x84\xe8\x84\x9a\xe6\x9c\xac', 'Style Editor': '\xe6\xa0\xb7\xe5\xbc\x8f\xe7\xbc\x96\xe8\xbe\x91\xe5\x99\xa8', 'Style Tags': '\xe6\xa0\xb7\xe5\xbc\x8f\xe6\xa0\x87\xe7\xad\xbe', 'Style Theme': '\xe6\xa0\xb7\xe5\xbc\x8f\xe4\xb8\xbb\xe9\xa2\x98', 'Syntax Files': '\xe8\xaf\xad\xe6\xb3\x95\xe6\x96\x87\xe4\xbb\xb6', 'Tests': '\xe6\xb5\x8b\xe8\xaf\x95', 'Text': '\xe6\x96\x87\xe5\xad\x97', 'Text Styles...': '\xe6\x96\x87\xe6\x9c\xac\xe6\xa0\xb7\xe5\xbc\x8f...', 'Text file': '\xe6\x96\x87\xe6\x9c\xac\xe6\x96\x87\xe4\xbb\xb6', 'Tools': '\xe5\xb7\xa5\xe5\x85\xb7', 'Transform': '\xe5\x8f\x98\xe6\x8d\xa2', 'Undo': '\xe6\x92\xa4\xe9\x94\x80', 'Update Database': '\xe6\x9b\xb4\xe6\x96\xb0\xe6\x95\xb0\xe6\x8d\xae\xe5\xba\x93', 'View': '\xe8\xa7\x86\xe5\x9b\xbe', 'View Direction': '\xe6\x9f\xa5\xe7\x9c\x8b\xe6\x96\xb9\xe5\x90\x91', 'Widget Inspector...': '\xe5\xb0\x8f\xe5\xb7\xa5\xe5\x85\xb7\xe6\xa3\x80\xe6\x9f\xa5\xe5\x99\xa8...', 'Window': '\xe7\xaa\x97\xe5\x8f\xa3', 'Zoom In': '\xe5\xa2\x9e\xe5\xa4\xa7\xe5\xad\x97\xe4\xbd\x93', 'Zoom Out': '\xe7\xbc\xa9\xe5\xb0\x8f\xe5\xad\x97\xe4\xbd\x93', 'Zoom in (magnify) image': '\xe6\x94\xbe\xe5\xa4\xa7\xe5\x9b\xbe\xe5\x83\x8f', 'Zoom out (demagnify) image': '\xe7\xbc\xa9\xe5\xb0\x8f\xe5\x9b\xbe\xe5\x83\x8f', 'bold': '\xe7\xb2\x97\xe4\xbd\x93', 'eol': '\xe7\xbb\x93\xe6\x9d\x9f\xe7\xac\xa6', 'italic': '\xe6\x96\x9c\xe4\xbd\x93', 'restart-game': '\xe9\x87\x8d\xe6\x96\xb0\xe5\x90\xaf\xe5\x8a\xa8\xe6\xb8\xb8\xe6\x88\x8f', 'underline': '\xe4\xb8\x8b\xe5\x88\x92\xe7\xba\xbf', 'unknown': '\xe6\x9c\xaa\xe7\x9f\xa5', }
gpl-2.0
Stinkdigital/three.js
utils/exporters/maya/plug-ins/threeJsFileTranslator.py
51
23623
__author__ = 'Sean Griffin' __version__ = '1.0.0' __email__ = 'sean@thoughtbot.com' import sys import os.path import json import shutil from pymel.core import * from maya.OpenMaya import * from maya.OpenMayaMPx import * kPluginTranslatorTypeName = 'Three.js' kOptionScript = 'ThreeJsExportScript' kDefaultOptionsString = '0' FLOAT_PRECISION = 8 class ThreeJsWriter(object): def __init__(self): self.componentKeys = ['vertices', 'normals', 'colors', 'uvs', 'faces', 'materials', 'colorMaps', 'specularMaps', 'bumpMaps', 'copyTextures', 'bones', 'skeletalAnim', 'bakeAnimations', 'prettyOutput'] def write(self, path, optionString, accessMode): self.path = path self.accessMode = accessMode self._parseOptions(optionString) self.verticeOffset = 0 self.uvOffset = 0 self.normalOffset = 0 self.vertices = [] self.materials = [] self.faces = [] self.normals = [] self.uvs = [] self.morphTargets = [] self.bones = [] self.animations = [] self.skinIndices = [] self.skinWeights = [] print("exporting meshes") self._exportMeshes() if self.options["materials"]: print("exporting materials") self._exportMaterials() if not self.accessMode == MPxFileTranslator.kExportActiveAccessMode : if self.options["bakeAnimations"]: print("exporting animations") self._exportAnimations() self._goToFrame(self.options["startFrame"]) if self.options["bones"]: print("exporting bones") select(map(lambda m: m.getParent(), ls(type='mesh'))) runtime.GoToBindPose() self._exportBones() print("exporting skins") self._exportSkins() if self.options["skeletalAnim"]: print("exporting keyframe animations") self._exportKeyframeAnimations() print("writing file") output = { 'metadata': { 'formatVersion': 3.1, 'generatedBy': 'Maya Exporter' }, 'vertices': self.vertices, 'uvs': [self.uvs], 'faces': self.faces, 'normals': self.normals, 'materials': self.materials, } if not self.accessMode == MPxFileTranslator.kExportActiveAccessMode : if self.options['bakeAnimations']: output['morphTargets'] = self.morphTargets if self.options['bones']: output['bones'] = self.bones output['skinIndices'] = self.skinIndices output['skinWeights'] = self.skinWeights output['influencesPerVertex'] = self.options["influencesPerVertex"] if self.options['skeletalAnim']: output['animations'] = self.animations with file(path, 'w') as f: if self.options['prettyOutput']: f.write(json.dumps(output, indent=4, separators=(", ", ": "))) else: f.write(json.dumps(output, separators=(",",":"))) def _allMeshes(self): if not self.accessMode == MPxFileTranslator.kExportActiveAccessMode : print("*** Exporting ALL (NEW) ***") self.__allMeshes = filter(lambda m: len(m.listConnections()) > 0, ls(type='mesh')) else : print("### Exporting SELECTED ###") self.__allMeshes = ls(selection=True) return self.__allMeshes def _parseOptions(self, optionsString): self.options = dict([(x, False) for x in self.componentKeys]) for key in self.componentKeys: self.options[key] = key in optionsString if self.options["bones"]: boneOptionsString = optionsString[optionsString.find("bones"):] boneOptions = boneOptionsString.split(' ') self.options["influencesPerVertex"] = int(boneOptions[1]) if self.options["bakeAnimations"]: bakeAnimOptionsString = optionsString[optionsString.find("bakeAnimations"):] bakeAnimOptions = bakeAnimOptionsString.split(' ') self.options["startFrame"] = int(bakeAnimOptions[1]) self.options["endFrame"] = int(bakeAnimOptions[2]) self.options["stepFrame"] = int(bakeAnimOptions[3]) def _exportMeshes(self): if self.options['vertices']: self._exportVertices() for mesh in self._allMeshes(): self._exportMesh(mesh) def _exportMesh(self, mesh): print("Exporting " + mesh.name()) if self.options['faces']: print("Exporting faces") self._exportFaces(mesh) self.verticeOffset += len(mesh.getPoints()) self.uvOffset += mesh.numUVs() self.normalOffset += mesh.numNormals() if self.options['normals']: print("Exporting normals") self._exportNormals(mesh) if self.options['uvs']: print("Exporting UVs") self._exportUVs(mesh) def _getMaterialIndex(self, face, mesh): if not hasattr(self, '_materialIndices'): self._materialIndices = dict([(mat['DbgName'], i) for i, mat in enumerate(self.materials)]) if self.options['materials']: for engine in mesh.listConnections(type='shadingEngine'): if sets(engine, isMember=face) or sets(engine, isMember=mesh): for material in engine.listConnections(type='lambert'): if self._materialIndices.has_key(material.name()): return self._materialIndices[material.name()] return -1 def _exportVertices(self): self.vertices += self._getVertices() def _exportAnimations(self): for frame in self._framesToExport(): self._exportAnimationForFrame(frame) def _framesToExport(self): return range(self.options["startFrame"], self.options["endFrame"], self.options["stepFrame"]) def _exportAnimationForFrame(self, frame): print("exporting frame " + str(frame)) self._goToFrame(frame) self.morphTargets.append({ 'name': "frame_" + str(frame), 'vertices': self._getVertices() }) def _getVertices(self): return [coord for mesh in self._allMeshes() for point in mesh.getPoints(space='world') for coord in [round(point.x, FLOAT_PRECISION), round(point.y, FLOAT_PRECISION), round(point.z, FLOAT_PRECISION)]] def _goToFrame(self, frame): currentTime(frame) def _exportFaces(self, mesh): typeBitmask = self._getTypeBitmask() for face in mesh.faces: materialIndex = self._getMaterialIndex(face, mesh) hasMaterial = materialIndex != -1 self._exportFaceBitmask(face, typeBitmask, hasMaterial=hasMaterial) self.faces += map(lambda x: x + self.verticeOffset, face.getVertices()) if self.options['materials']: if hasMaterial: self.faces.append(materialIndex) if self.options['uvs'] and face.hasUVs(): self.faces += map(lambda v: face.getUVIndex(v) + self.uvOffset, range(face.polygonVertexCount())) if self.options['normals']: self._exportFaceVertexNormals(face) def _exportFaceBitmask(self, face, typeBitmask, hasMaterial=True): if face.polygonVertexCount() == 4: faceBitmask = 1 else: faceBitmask = 0 if hasMaterial: faceBitmask |= (1 << 1) if self.options['uvs'] and face.hasUVs(): faceBitmask |= (1 << 3) self.faces.append(typeBitmask | faceBitmask) def _exportFaceVertexNormals(self, face): for i in range(face.polygonVertexCount()): self.faces.append(face.normalIndex(i) + self.normalOffset) def _exportNormals(self, mesh): for normal in mesh.getNormals(): self.normals += [round(normal.x, FLOAT_PRECISION), round(normal.y, FLOAT_PRECISION), round(normal.z, FLOAT_PRECISION)] def _exportUVs(self, mesh): us, vs = mesh.getUVs() for i, u in enumerate(us): self.uvs.append(u) self.uvs.append(vs[i]) def _getTypeBitmask(self): bitmask = 0 if self.options['normals']: bitmask |= 32 return bitmask def _exportMaterials(self): hist = listHistory( self._allMeshes(), f=1 ) mats = listConnections( hist, type='lambert' ) for mat in mats: print("material: " + mat) self.materials.append(self._exportMaterial(mat)) def _exportMaterial(self, mat): result = { "DbgName": mat.name(), "blending": "NormalBlending", "colorDiffuse": map(lambda i: i * mat.getDiffuseCoeff(), mat.getColor().rgb), "depthTest": True, "depthWrite": True, "shading": mat.__class__.__name__, "opacity": mat.getTransparency().r, "transparent": mat.getTransparency().r != 1.0, "vertexColors": False } if isinstance(mat, nodetypes.Phong): result["colorSpecular"] = mat.getSpecularColor().rgb result["reflectivity"] = mat.getReflectivity() result["specularCoef"] = mat.getCosPower() if self.options["specularMaps"]: self._exportSpecularMap(result, mat) if self.options["bumpMaps"]: self._exportBumpMap(result, mat) if self.options["colorMaps"]: self._exportColorMap(result, mat) return result def _exportBumpMap(self, result, mat): for bump in mat.listConnections(type='bump2d'): for f in bump.listConnections(type='file'): result["mapNormalFactor"] = 1 self._exportFile(result, f, "Normal") def _exportColorMap(self, result, mat): for f in mat.attr('color').inputs(): result["colorDiffuse"] = f.attr('defaultColor').get() self._exportFile(result, f, "Diffuse") def _exportSpecularMap(self, result, mat): for f in mat.attr('specularColor').inputs(): result["colorSpecular"] = f.attr('defaultColor').get() self._exportFile(result, f, "Specular") def _exportFile(self, result, mapFile, mapType): src = mapFile.ftn.get() targetDir = os.path.dirname(self.path) fName = os.path.basename(src) if self.options['copyTextures']: shutil.copy2(src, os.path.join(targetDir, fName)) result["map" + mapType] = fName result["map" + mapType + "Repeat"] = [1, 1] result["map" + mapType + "Wrap"] = ["repeat", "repeat"] result["map" + mapType + "Anisotropy"] = 4 def _exportBones(self): hist = listHistory( self._allMeshes(), f=1 ) joints = listConnections( hist, type="joint") for joint in joints: if joint.getParent(): parentIndex = self._indexOfJoint(joint.getParent().name()) else: parentIndex = -1 rotq = joint.getRotation(quaternion=True) * joint.getOrientation() pos = joint.getTranslation() self.bones.append({ "parent": parentIndex, "name": joint.name(), "pos": self._roundPos(pos), "rotq": self._roundQuat(rotq) }) def _indexOfJoint(self, name): if not hasattr(self, '_jointNames'): self._jointNames = dict([(joint.name(), i) for i, joint in enumerate(ls(type='joint'))]) if name in self._jointNames: return self._jointNames[name] else: return -1 def _exportKeyframeAnimations(self): hierarchy = [] i = -1 frameRate = FramesPerSecond(currentUnit(query=True, time=True)).value() hist = listHistory( self._allMeshes(), f=1 ) joints = listConnections( hist, type="joint") for joint in joints: hierarchy.append({ "parent": i, "keys": self._getKeyframes(joint, frameRate) }) i += 1 self.animations.append({ "name": "skeletalAction.001", "length": (playbackOptions(maxTime=True, query=True) - playbackOptions(minTime=True, query=True)) / frameRate, "fps": 1, "hierarchy": hierarchy }) def _getKeyframes(self, joint, frameRate): firstFrame = playbackOptions(minTime=True, query=True) lastFrame = playbackOptions(maxTime=True, query=True) frames = sorted(list(set(keyframe(joint, query=True) + [firstFrame, lastFrame]))) keys = [] print("joint " + joint.name() + " has " + str(len(frames)) + " keyframes") for frame in frames: self._goToFrame(frame) keys.append(self._getCurrentKeyframe(joint, frame, frameRate)) return keys def _getCurrentKeyframe(self, joint, frame, frameRate): pos = joint.getTranslation() rot = joint.getRotation(quaternion=True) * joint.getOrientation() return { 'time': (frame - playbackOptions(minTime=True, query=True)) / frameRate, 'pos': self._roundPos(pos), 'rot': self._roundQuat(rot), 'scl': [1,1,1] } def _roundPos(self, pos): return map(lambda x: round(x, FLOAT_PRECISION), [pos.x, pos.y, pos.z]) def _roundQuat(self, rot): return map(lambda x: round(x, FLOAT_PRECISION), [rot.x, rot.y, rot.z, rot.w]) def _exportSkins(self): for mesh in self._allMeshes(): print("exporting skins for mesh: " + mesh.name()) hist = listHistory( mesh, f=1 ) skins = listConnections( hist, type='skinCluster') if len(skins) > 0: print("mesh has " + str(len(skins)) + " skins") skin = skins[0] joints = skin.influenceObjects() for weights in skin.getWeights(mesh.vtx): numWeights = 0 for i in range(0, len(weights)): if weights[i] > 0: self.skinWeights.append(weights[i]) self.skinIndices.append(self._indexOfJoint(joints[i].name())) numWeights += 1 if numWeights > self.options["influencesPerVertex"]: raise Exception("More than " + str(self.options["influencesPerVertex"]) + " influences on a vertex in " + mesh.name() + ".") for i in range(0, self.options["influencesPerVertex"] - numWeights): self.skinWeights.append(0) self.skinIndices.append(0) else: print("mesh has no skins, appending 0") for i in range(0, len(mesh.getPoints()) * self.options["influencesPerVertex"]): self.skinWeights.append(0) self.skinIndices.append(0) class NullAnimCurve(object): def getValue(self, index): return 0.0 class ThreeJsTranslator(MPxFileTranslator): def __init__(self): MPxFileTranslator.__init__(self) def haveWriteMethod(self): return True def filter(self): return '*.json' def defaultExtension(self): return 'json' def writer(self, fileObject, optionString, accessMode): path = fileObject.fullName() writer = ThreeJsWriter() writer.write(path, optionString, accessMode) def translatorCreator(): return asMPxPtr(ThreeJsTranslator()) def initializePlugin(mobject): mplugin = MFnPlugin(mobject) try: mplugin.registerFileTranslator(kPluginTranslatorTypeName, None, translatorCreator, kOptionScript, kDefaultOptionsString) except: sys.stderr.write('Failed to register translator: %s' % kPluginTranslatorTypeName) raise def uninitializePlugin(mobject): mplugin = MFnPlugin(mobject) try: mplugin.deregisterFileTranslator(kPluginTranslatorTypeName) except: sys.stderr.write('Failed to deregister translator: %s' % kPluginTranslatorTypeName) raise class FramesPerSecond(object): MAYA_VALUES = { 'game': 15, 'film': 24, 'pal': 25, 'ntsc': 30, 'show': 48, 'palf': 50, 'ntscf': 60 } def __init__(self, fpsString): self.fpsString = fpsString def value(self): if self.fpsString in FramesPerSecond.MAYA_VALUES: return FramesPerSecond.MAYA_VALUES[self.fpsString] else: return int(filter(lambda c: c.isdigit(), self.fpsString)) ################################################################### ## The code below was taken from the Blender 3JS Exporter ## It's purpose is to fix the JSON output so that it does not ## put each array value on it's own line, which is ridiculous ## for this type of output. ################################################################### ROUND = 6 ## THREE override function def _json_floatstr(o): if ROUND is not None: o = round(o, ROUND) return '%g' % o def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, ## HACK: hand-optimized bytecode; turn globals into locals ValueError=ValueError, basestring=basestring, dict=dict, float=float, id=id, int=int, isinstance=isinstance, list=list, long=long, str=str, tuple=tuple, ): def _iterencode_list(lst, _current_indent_level): if not lst: yield '[]' return if markers is not None: markerid = id(lst) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = lst buf = '[' #if _indent is not None: # _current_indent_level += 1 # newline_indent = '\n' + (' ' * (_indent * _current_indent_level)) # separator = _item_separator + newline_indent # buf += newline_indent #else: newline_indent = None separator = _item_separator first = True for value in lst: if first: first = False else: buf = separator if isinstance(value, basestring): yield buf + _encoder(value) elif value is None: yield buf + 'null' elif value is True: yield buf + 'true' elif value is False: yield buf + 'false' elif isinstance(value, (int, long)): yield buf + str(value) elif isinstance(value, float): yield buf + _floatstr(value) else: yield buf if isinstance(value, (list, tuple)): chunks = _iterencode_list(value, _current_indent_level) elif isinstance(value, dict): chunks = _iterencode_dict(value, _current_indent_level) else: chunks = _iterencode(value, _current_indent_level) for chunk in chunks: yield chunk if newline_indent is not None: _current_indent_level -= 1 yield '\n' + (' ' * (_indent * _current_indent_level)) yield ']' if markers is not None: del markers[markerid] def _iterencode_dict(dct, _current_indent_level): if not dct: yield '{}' return if markers is not None: markerid = id(dct) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = dct yield '{' if _indent is not None: _current_indent_level += 1 newline_indent = '\n' + (' ' * (_indent * _current_indent_level)) item_separator = _item_separator + newline_indent yield newline_indent else: newline_indent = None item_separator = _item_separator first = True if _sort_keys: items = sorted(dct.items(), key=lambda kv: kv[0]) else: items = dct.iteritems() for key, value in items: if isinstance(key, basestring): pass # JavaScript is weakly typed for these, so it makes sense to # also allow them. Many encoders seem to do something like this. elif isinstance(key, float): key = _floatstr(key) elif key is True: key = 'true' elif key is False: key = 'false' elif key is None: key = 'null' elif isinstance(key, (int, long)): key = str(key) elif _skipkeys: continue else: raise TypeError("key " + repr(key) + " is not a string") if first: first = False else: yield item_separator yield _encoder(key) yield _key_separator if isinstance(value, basestring): yield _encoder(value) elif value is None: yield 'null' elif value is True: yield 'true' elif value is False: yield 'false' elif isinstance(value, (int, long)): yield str(value) elif isinstance(value, float): yield _floatstr(value) else: if isinstance(value, (list, tuple)): chunks = _iterencode_list(value, _current_indent_level) elif isinstance(value, dict): chunks = _iterencode_dict(value, _current_indent_level) else: chunks = _iterencode(value, _current_indent_level) for chunk in chunks: yield chunk if newline_indent is not None: _current_indent_level -= 1 yield '\n' + (' ' * (_indent * _current_indent_level)) yield '}' if markers is not None: del markers[markerid] def _iterencode(o, _current_indent_level): if isinstance(o, basestring): yield _encoder(o) elif o is None: yield 'null' elif o is True: yield 'true' elif o is False: yield 'false' elif isinstance(o, (int, long)): yield str(o) elif isinstance(o, float): yield _floatstr(o) elif isinstance(o, (list, tuple)): for chunk in _iterencode_list(o, _current_indent_level): yield chunk elif isinstance(o, dict): for chunk in _iterencode_dict(o, _current_indent_level): yield chunk else: if markers is not None: markerid = id(o) if markerid in markers: raise ValueError("Circular reference detected") markers[markerid] = o o = _default(o) for chunk in _iterencode(o, _current_indent_level): yield chunk if markers is not None: del markers[markerid] return _iterencode # override the encoder json.encoder._make_iterencode = _make_iterencode
mit
p0psicles/SickRage
lib/unidecode/x0a2.py
253
4503
data = ( 'kax', # 0x00 'ka', # 0x01 'kap', # 0x02 'kuox', # 0x03 'kuo', # 0x04 'kuop', # 0x05 'kot', # 0x06 'kox', # 0x07 'ko', # 0x08 'kop', # 0x09 'ket', # 0x0a 'kex', # 0x0b 'ke', # 0x0c 'kep', # 0x0d 'kut', # 0x0e 'kux', # 0x0f 'ku', # 0x10 'kup', # 0x11 'kurx', # 0x12 'kur', # 0x13 'ggit', # 0x14 'ggix', # 0x15 'ggi', # 0x16 'ggiex', # 0x17 'ggie', # 0x18 'ggiep', # 0x19 'ggat', # 0x1a 'ggax', # 0x1b 'gga', # 0x1c 'ggap', # 0x1d 'gguot', # 0x1e 'gguox', # 0x1f 'gguo', # 0x20 'gguop', # 0x21 'ggot', # 0x22 'ggox', # 0x23 'ggo', # 0x24 'ggop', # 0x25 'gget', # 0x26 'ggex', # 0x27 'gge', # 0x28 'ggep', # 0x29 'ggut', # 0x2a 'ggux', # 0x2b 'ggu', # 0x2c 'ggup', # 0x2d 'ggurx', # 0x2e 'ggur', # 0x2f 'mgiex', # 0x30 'mgie', # 0x31 'mgat', # 0x32 'mgax', # 0x33 'mga', # 0x34 'mgap', # 0x35 'mguox', # 0x36 'mguo', # 0x37 'mguop', # 0x38 'mgot', # 0x39 'mgox', # 0x3a 'mgo', # 0x3b 'mgop', # 0x3c 'mgex', # 0x3d 'mge', # 0x3e 'mgep', # 0x3f 'mgut', # 0x40 'mgux', # 0x41 'mgu', # 0x42 'mgup', # 0x43 'mgurx', # 0x44 'mgur', # 0x45 'hxit', # 0x46 'hxix', # 0x47 'hxi', # 0x48 'hxip', # 0x49 'hxiet', # 0x4a 'hxiex', # 0x4b 'hxie', # 0x4c 'hxiep', # 0x4d 'hxat', # 0x4e 'hxax', # 0x4f 'hxa', # 0x50 'hxap', # 0x51 'hxuot', # 0x52 'hxuox', # 0x53 'hxuo', # 0x54 'hxuop', # 0x55 'hxot', # 0x56 'hxox', # 0x57 'hxo', # 0x58 'hxop', # 0x59 'hxex', # 0x5a 'hxe', # 0x5b 'hxep', # 0x5c 'ngiex', # 0x5d 'ngie', # 0x5e 'ngiep', # 0x5f 'ngat', # 0x60 'ngax', # 0x61 'nga', # 0x62 'ngap', # 0x63 'nguot', # 0x64 'nguox', # 0x65 'nguo', # 0x66 'ngot', # 0x67 'ngox', # 0x68 'ngo', # 0x69 'ngop', # 0x6a 'ngex', # 0x6b 'nge', # 0x6c 'ngep', # 0x6d 'hit', # 0x6e 'hiex', # 0x6f 'hie', # 0x70 'hat', # 0x71 'hax', # 0x72 'ha', # 0x73 'hap', # 0x74 'huot', # 0x75 'huox', # 0x76 'huo', # 0x77 'huop', # 0x78 'hot', # 0x79 'hox', # 0x7a 'ho', # 0x7b 'hop', # 0x7c 'hex', # 0x7d 'he', # 0x7e 'hep', # 0x7f 'wat', # 0x80 'wax', # 0x81 'wa', # 0x82 'wap', # 0x83 'wuox', # 0x84 'wuo', # 0x85 'wuop', # 0x86 'wox', # 0x87 'wo', # 0x88 'wop', # 0x89 'wex', # 0x8a 'we', # 0x8b 'wep', # 0x8c 'zit', # 0x8d 'zix', # 0x8e 'zi', # 0x8f 'zip', # 0x90 'ziex', # 0x91 'zie', # 0x92 'ziep', # 0x93 'zat', # 0x94 'zax', # 0x95 'za', # 0x96 'zap', # 0x97 'zuox', # 0x98 'zuo', # 0x99 'zuop', # 0x9a 'zot', # 0x9b 'zox', # 0x9c 'zo', # 0x9d 'zop', # 0x9e 'zex', # 0x9f 'ze', # 0xa0 'zep', # 0xa1 'zut', # 0xa2 'zux', # 0xa3 'zu', # 0xa4 'zup', # 0xa5 'zurx', # 0xa6 'zur', # 0xa7 'zyt', # 0xa8 'zyx', # 0xa9 'zy', # 0xaa 'zyp', # 0xab 'zyrx', # 0xac 'zyr', # 0xad 'cit', # 0xae 'cix', # 0xaf 'ci', # 0xb0 'cip', # 0xb1 'ciet', # 0xb2 'ciex', # 0xb3 'cie', # 0xb4 'ciep', # 0xb5 'cat', # 0xb6 'cax', # 0xb7 'ca', # 0xb8 'cap', # 0xb9 'cuox', # 0xba 'cuo', # 0xbb 'cuop', # 0xbc 'cot', # 0xbd 'cox', # 0xbe 'co', # 0xbf 'cop', # 0xc0 'cex', # 0xc1 'ce', # 0xc2 'cep', # 0xc3 'cut', # 0xc4 'cux', # 0xc5 'cu', # 0xc6 'cup', # 0xc7 'curx', # 0xc8 'cur', # 0xc9 'cyt', # 0xca 'cyx', # 0xcb 'cy', # 0xcc 'cyp', # 0xcd 'cyrx', # 0xce 'cyr', # 0xcf 'zzit', # 0xd0 'zzix', # 0xd1 'zzi', # 0xd2 'zzip', # 0xd3 'zziet', # 0xd4 'zziex', # 0xd5 'zzie', # 0xd6 'zziep', # 0xd7 'zzat', # 0xd8 'zzax', # 0xd9 'zza', # 0xda 'zzap', # 0xdb 'zzox', # 0xdc 'zzo', # 0xdd 'zzop', # 0xde 'zzex', # 0xdf 'zze', # 0xe0 'zzep', # 0xe1 'zzux', # 0xe2 'zzu', # 0xe3 'zzup', # 0xe4 'zzurx', # 0xe5 'zzur', # 0xe6 'zzyt', # 0xe7 'zzyx', # 0xe8 'zzy', # 0xe9 'zzyp', # 0xea 'zzyrx', # 0xeb 'zzyr', # 0xec 'nzit', # 0xed 'nzix', # 0xee 'nzi', # 0xef 'nzip', # 0xf0 'nziex', # 0xf1 'nzie', # 0xf2 'nziep', # 0xf3 'nzat', # 0xf4 'nzax', # 0xf5 'nza', # 0xf6 'nzap', # 0xf7 'nzuox', # 0xf8 'nzuo', # 0xf9 'nzox', # 0xfa 'nzop', # 0xfb 'nzex', # 0xfc 'nze', # 0xfd 'nzux', # 0xfe 'nzu', # 0xff )
gpl-3.0
amitay/samba
lib/dnspython/dns/rrset.py
99
5895
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS RRsets (an RRset is a named rdataset)""" import dns.name import dns.rdataset import dns.rdataclass import dns.renderer class RRset(dns.rdataset.Rdataset): """A DNS RRset (named rdataset). RRset inherits from Rdataset, and RRsets can be treated as Rdatasets in most cases. There are, however, a few notable exceptions. RRsets have different to_wire() and to_text() method arguments, reflecting the fact that RRsets always have an owner name. """ __slots__ = ['name', 'deleting'] def __init__(self, name, rdclass, rdtype, covers=dns.rdatatype.NONE, deleting=None): """Create a new RRset.""" super(RRset, self).__init__(rdclass, rdtype, covers) self.name = name self.deleting = deleting def _clone(self): obj = super(RRset, self)._clone() obj.name = self.name obj.deleting = self.deleting return obj def __repr__(self): if self.covers == 0: ctext = '' else: ctext = '(' + dns.rdatatype.to_text(self.covers) + ')' if not self.deleting is None: dtext = ' delete=' + dns.rdataclass.to_text(self.deleting) else: dtext = '' return '<DNS ' + str(self.name) + ' ' + \ dns.rdataclass.to_text(self.rdclass) + ' ' + \ dns.rdatatype.to_text(self.rdtype) + ctext + dtext + ' RRset>' def __str__(self): return self.to_text() def __eq__(self, other): """Two RRsets are equal if they have the same name and the same rdataset @rtype: bool""" if not isinstance(other, RRset): return False if self.name != other.name: return False return super(RRset, self).__eq__(other) def match(self, name, rdclass, rdtype, covers, deleting=None): """Returns True if this rrset matches the specified class, type, covers, and deletion state.""" if not super(RRset, self).match(rdclass, rdtype, covers): return False if self.name != name or self.deleting != deleting: return False return True def to_text(self, origin=None, relativize=True, **kw): """Convert the RRset into DNS master file format. @see: L{dns.name.Name.choose_relativity} for more information on how I{origin} and I{relativize} determine the way names are emitted. Any additional keyword arguments are passed on to the rdata to_text() method. @param origin: The origin for relative names, or None. @type origin: dns.name.Name object @param relativize: True if names should names be relativized @type relativize: bool""" return super(RRset, self).to_text(self.name, origin, relativize, self.deleting, **kw) def to_wire(self, file, compress=None, origin=None, **kw): """Convert the RRset to wire format.""" return super(RRset, self).to_wire(self.name, file, compress, origin, self.deleting, **kw) def to_rdataset(self): """Convert an RRset into an Rdataset. @rtype: dns.rdataset.Rdataset object """ return dns.rdataset.from_rdata_list(self.ttl, list(self)) def from_text_list(name, ttl, rdclass, rdtype, text_rdatas): """Create an RRset with the specified name, TTL, class, and type, and with the specified list of rdatas in text format. @rtype: dns.rrset.RRset object """ if isinstance(name, (str, unicode)): name = dns.name.from_text(name, None) if isinstance(rdclass, (str, unicode)): rdclass = dns.rdataclass.from_text(rdclass) if isinstance(rdtype, (str, unicode)): rdtype = dns.rdatatype.from_text(rdtype) r = RRset(name, rdclass, rdtype) r.update_ttl(ttl) for t in text_rdatas: rd = dns.rdata.from_text(r.rdclass, r.rdtype, t) r.add(rd) return r def from_text(name, ttl, rdclass, rdtype, *text_rdatas): """Create an RRset with the specified name, TTL, class, and type and with the specified rdatas in text format. @rtype: dns.rrset.RRset object """ return from_text_list(name, ttl, rdclass, rdtype, text_rdatas) def from_rdata_list(name, ttl, rdatas): """Create an RRset with the specified name and TTL, and with the specified list of rdata objects. @rtype: dns.rrset.RRset object """ if isinstance(name, (str, unicode)): name = dns.name.from_text(name, None) if len(rdatas) == 0: raise ValueError("rdata list must not be empty") r = None for rd in rdatas: if r is None: r = RRset(name, rd.rdclass, rd.rdtype) r.update_ttl(ttl) first_time = False r.add(rd) return r def from_rdata(name, ttl, *rdatas): """Create an RRset with the specified name and TTL, and with the specified rdata objects. @rtype: dns.rrset.RRset object """ return from_rdata_list(name, ttl, rdatas)
gpl-3.0
GitHublong/hue
desktop/core/ext-py/Django-1.6.10/tests/view_tests/tests/test_static.py
34
5274
from __future__ import absolute_import import mimetypes from os import path import unittest from django.conf.urls.static import static from django.http import HttpResponseNotModified from django.test import TestCase from django.test.utils import override_settings from django.utils.http import http_date from django.views.static import was_modified_since, STREAM_CHUNK_SIZE from .. import urls from ..urls import media_dir @override_settings(DEBUG=True) class StaticTests(TestCase): """Tests django views in django/views/static.py""" prefix = 'site_media' def test_serve(self): "The static view can serve static media" media_files = ['file.txt', 'file.txt.gz'] for filename in media_files: response = self.client.get('/views/%s/%s' % (self.prefix, filename)) response_content = b''.join(response) file_path = path.join(media_dir, filename) with open(file_path, 'rb') as fp: self.assertEqual(fp.read(), response_content) self.assertEqual(len(response_content), int(response['Content-Length'])) self.assertEqual(mimetypes.guess_type(file_path)[1], response.get('Content-Encoding', None)) def test_chunked(self): "The static view should stream files in chunks to avoid large memory usage" response = self.client.get('/views/%s/%s' % (self.prefix, 'long-line.txt')) first_chunk = next(response.streaming_content) self.assertEqual(len(first_chunk), STREAM_CHUNK_SIZE) second_chunk = next(response.streaming_content) self.assertEqual(len(second_chunk), 1451) def test_unknown_mime_type(self): response = self.client.get('/views/%s/file.unknown' % self.prefix) self.assertEqual('application/octet-stream', response['Content-Type']) def test_copes_with_empty_path_component(self): file_name = 'file.txt' response = self.client.get('/views/%s//%s' % (self.prefix, file_name)) response_content = b''.join(response) with open(path.join(media_dir, file_name), 'rb') as fp: self.assertEqual(fp.read(), response_content) def test_is_modified_since(self): file_name = 'file.txt' response = self.client.get('/views/%s/%s' % (self.prefix, file_name), HTTP_IF_MODIFIED_SINCE='Thu, 1 Jan 1970 00:00:00 GMT') response_content = b''.join(response) with open(path.join(media_dir, file_name), 'rb') as fp: self.assertEqual(fp.read(), response_content) def test_not_modified_since(self): file_name = 'file.txt' response = self.client.get( '/views/%s/%s' % (self.prefix, file_name), HTTP_IF_MODIFIED_SINCE='Mon, 18 Jan 2038 05:14:07 GMT' # This is 24h before max Unix time. Remember to fix Django and # update this test well before 2038 :) ) self.assertIsInstance(response, HttpResponseNotModified) def test_invalid_if_modified_since(self): """Handle bogus If-Modified-Since values gracefully Assume that a file is modified since an invalid timestamp as per RFC 2616, section 14.25. """ file_name = 'file.txt' invalid_date = 'Mon, 28 May 999999999999 28:25:26 GMT' response = self.client.get('/views/%s/%s' % (self.prefix, file_name), HTTP_IF_MODIFIED_SINCE=invalid_date) response_content = b''.join(response) with open(path.join(media_dir, file_name), 'rb') as fp: self.assertEqual(fp.read(), response_content) self.assertEqual(len(response_content), int(response['Content-Length'])) def test_invalid_if_modified_since2(self): """Handle even more bogus If-Modified-Since values gracefully Assume that a file is modified since an invalid timestamp as per RFC 2616, section 14.25. """ file_name = 'file.txt' invalid_date = ': 1291108438, Wed, 20 Oct 2010 14:05:00 GMT' response = self.client.get('/views/%s/%s' % (self.prefix, file_name), HTTP_IF_MODIFIED_SINCE=invalid_date) response_content = b''.join(response) with open(path.join(media_dir, file_name), 'rb') as fp: self.assertEqual(fp.read(), response_content) self.assertEqual(len(response_content), int(response['Content-Length'])) class StaticHelperTest(StaticTests): """ Test case to make sure the static URL pattern helper works as expected """ def setUp(self): super(StaticHelperTest, self).setUp() self._old_views_urlpatterns = urls.urlpatterns[:] urls.urlpatterns += static('/media/', document_root=media_dir) def tearDown(self): super(StaticHelperTest, self).tearDown() urls.urlpatterns = self._old_views_urlpatterns class StaticUtilsTests(unittest.TestCase): def test_was_modified_since_fp(self): """ Test that a floating point mtime does not disturb was_modified_since. (#18675) """ mtime = 1343416141.107817 header = http_date(mtime) self.assertFalse(was_modified_since(header, mtime))
apache-2.0
ICTU/quality-time
components/collector/src/source_collectors/azure_devops/source_up_to_dateness.py
1
3165
"""Azure Devops Server up-to-dateness collector.""" from abc import ABC from datetime import datetime import aiohttp from dateutil.parser import parse from base_collectors import SourceCollector, SourceUpToDatenessCollector from collector_utilities.type import URL, Response from source_model import SourceMeasurement, SourceResponses from .base import AzureDevopsJobs, AzureDevopsRepositoryBase class AzureDevopsFileUpToDateness(SourceUpToDatenessCollector, AzureDevopsRepositoryBase): """Collector class to measure the up-to-dateness of a repo or folder/file in a repo.""" async def _api_url(self) -> URL: """Extend to add the commit API path and associated parameters.""" api_url = str(await super()._api_url()) path = self._parameter("file_path", quote=True) branch = self._parameter("branch", quote=True) search_criteria = ( f"searchCriteria.itemPath={path}&searchCriteria.itemVersion.version={branch}&searchCriteria.$top=1" ) return URL(f"{api_url}/commits?{search_criteria}&api-version=4.1") async def _landing_url(self, responses: SourceResponses) -> URL: """Extend to add a path to the file.""" landing_url = str(await super()._landing_url(responses)) path = self._parameter("file_path", quote=True) branch = self._parameter("branch", quote=True) return URL(f"{landing_url}?path={path}&version=GB{branch}") async def _parse_source_response_date_time(self, response: Response) -> datetime: """Override to get the date and time of the commit or the pipeline.""" json_value = (await response.json())["value"] return parse(json_value[0]["committer"]["date"]) class AzureDevopsJobUpToDateness(SourceUpToDatenessCollector, AzureDevopsJobs): # lgtm [py/conflicting-attributes] """Collector class to measure the up-to-dateness of a job/pipeline.""" async def _parse_source_response_date_time(self, response: Response) -> datetime: """Override to get the date and time of the commit or the pipeline.""" json_value = (await response.json())["value"] build_date_times = [self._latest_build_date_time(job) for job in json_value if self._include_job(job)] return max(build_date_times, default=datetime.min) class AzureDevopsSourceUpToDateness(SourceCollector, ABC): """Factory class to create a collector to get the up-to-dateness of either jobs or files.""" def __new__(cls, session: aiohttp.ClientSession, source, data_model): """Create an instance of either the file up-to-dateness collector or the jobs up-to-dateness collector.""" file_path = source.get("parameters", {}).get("file_path") collector_class = AzureDevopsFileUpToDateness if file_path else AzureDevopsJobUpToDateness instance = collector_class(session, source, data_model) instance.source_type = cls.source_type return instance async def _parse_source_responses(self, responses: SourceResponses) -> SourceMeasurement: """Override to document that this class does not parse responses itself.""" raise NotImplementedError
apache-2.0
zimmermegan/MARDA
nltk-3.0.3/nltk/corpus/reader/knbc.py
10
5791
#! /usr/bin/env python # KNB Corpus reader # Copyright (C) 2001-2015 NLTK Project # Author: Masato Hagiwara <hagisan@gmail.com> # URL: <http://nltk.org/> # For license information, see LICENSE.TXT # For more information, see http://lilyx.net/pages/nltkjapanesecorpus.html from __future__ import print_function import re from nltk.compat import string_types from nltk.parse import DependencyGraph from nltk.corpus.reader.util import ( FileSystemPathPointer, find_corpus_fileids, read_blankline_block, ) from nltk.corpus.reader.api import SyntaxCorpusReader, CorpusReader # default function to convert morphlist to str for tree representation _morphs2str_default = lambda morphs: '/'.join(m[0] for m in morphs if m[0] != 'EOS') class KNBCorpusReader(SyntaxCorpusReader): """ This class implements: - ``__init__``, which specifies the location of the corpus and a method for detecting the sentence blocks in corpus files. - ``_read_block``, which reads a block from the input stream. - ``_word``, which takes a block and returns a list of list of words. - ``_tag``, which takes a block and returns a list of list of tagged words. - ``_parse``, which takes a block and returns a list of parsed sentences. The structure of tagged words: tagged_word = (word(str), tags(tuple)) tags = (surface, reading, lemma, pos1, posid1, pos2, posid2, pos3, posid3, others ...) Usage example ------------- >>> from nltk.corpus.util import LazyCorpusLoader >>> knbc = LazyCorpusLoader( ... 'knbc/corpus1', ... KNBCorpusReader, ... r'.*/KN.*', ... encoding='euc-jp', ... ) >>> len(knbc.sents()[0]) 9 """ def __init__(self, root, fileids, encoding='utf8', morphs2str=_morphs2str_default): """ Initialize KNBCorpusReader morphs2str is a function to convert morphlist to str for tree representation for _parse() """ CorpusReader.__init__(self, root, fileids, encoding) self.morphs2str = morphs2str def _read_block(self, stream): # blocks are split by blankline (or EOF) - default return read_blankline_block(stream) def _word(self, t): res = [] for line in t.splitlines(): # ignore the Bunsets headers if not re.match(r"EOS|\*|\#|\+", line): cells = line.strip().split(" ") res.append(cells[0]) return res # ignores tagset argument def _tag(self, t, tagset=None): res = [] for line in t.splitlines(): # ignore the Bunsets headers if not re.match(r"EOS|\*|\#|\+", line): cells = line.strip().split(" ") # convert cells to morph tuples res.append((cells[0], ' '.join(cells[1:]))) return res def _parse(self, t): dg = DependencyGraph() i = 0 for line in t.splitlines(): if line[0] in '*+': # start of bunsetsu or tag cells = line.strip().split(" ", 3) m = re.match(r"([\-0-9]*)([ADIP])", cells[1]) assert m is not None node = dg.nodes[i] node.update( { 'address': i, 'rel': m.group(2), 'word': [], } ) dep_parent = int(m.group(1)) if dep_parent == -1: dg.root = node else: dg.nodes[dep_parent]['deps'].append(i) i += 1 elif line[0] != '#': # normal morph cells = line.strip().split(" ") # convert cells to morph tuples morph = cells[0], ' '.join(cells[1:]) dg.nodes[i - 1]['word'].append(morph) if self.morphs2str: for node in dg.nodes.values(): node['word'] = self.morphs2str(node['word']) return dg.tree() ###################################################################### # Demo ###################################################################### def demo(): import nltk from nltk.corpus.util import LazyCorpusLoader root = nltk.data.find('corpora/knbc/corpus1') fileids = [f for f in find_corpus_fileids(FileSystemPathPointer(root), ".*") if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)] def _knbc_fileids_sort(x): cells = x.split('-') return (cells[0], int(cells[1]), int(cells[2]), int(cells[3])) knbc = LazyCorpusLoader('knbc/corpus1', KNBCorpusReader, sorted(fileids, key=_knbc_fileids_sort), encoding='euc-jp') print(knbc.fileids()[:10]) print(''.join(knbc.words()[:100])) print('\n\n'.join(str(tree) for tree in knbc.parsed_sents()[:2])) knbc.morphs2str = lambda morphs: '/'.join( "%s(%s)" % (m[0], m[1].split(' ')[2]) for m in morphs if m[0] != 'EOS' ).encode('utf-8') print('\n\n'.join('%s' % tree for tree in knbc.parsed_sents()[:2])) print( '\n'.join( ' '.join("%s/%s" % (w[0], w[1].split(' ')[2]) for w in sent) for sent in knbc.tagged_sents()[0:2] ) ) def test(): from nltk.corpus.util import LazyCorpusLoader knbc = LazyCorpusLoader( 'knbc/corpus1', KNBCorpusReader, r'.*/KN.*', encoding='euc-jp') assert isinstance(knbc.words()[0], string_types) assert isinstance(knbc.sents()[0][0], string_types) assert isinstance(knbc.tagged_words()[0], tuple) assert isinstance(knbc.tagged_sents()[0][0], tuple) if __name__ == '__main__': demo()
mit
whd/data-pipeline
reports/stability-summary/summarize.py
4
3014
import csv import json from utils import S3CompressedReader, S3CompressedWriter, HeaderCSVReader from collections import defaultdict, Counter from itertools import izip, count default_bucket = 'telemetry-public-analysis-2' prop_list = ( 'abortedsessioncount', 'subsessionlengths', 'abortsplugin', 'abortscontent', 'abortsgmplugin', 'crashesdetectedplugin', 'pluginhangs', 'crashesdetectedcontent', 'crashesdetectedgmplugin', 'crashsubmitattemptmain', 'crashsubmitattemptcontent', 'crashsubmitattemptplugin', 'crashsubmitsuccessmain', 'crashsubmitsuccesscontent', 'crashsubmitsuccessplugin') class Counts(object): def __init__(self): self._counts = [0] * len(prop_list) self.crashes = 0 def increment(self, i, v): self._counts[i] += v def final(self, **kwargs): d = dict(izip(prop_list, self._counts)) d.update(kwargs) d['crashesdetectedmain'] = self.crashes return d def nullint(v): if v == '': return 0 return int(v) def summarize(date): """ read the large CSV file produced by rollup.put_counts and rollup.put_crashes into a smaller summary JSON format for quick overview graphing. """ counts = defaultdict(Counts) counts_path = 'stability-rollups/{year}/{date}-main.csv.gz'.format( year=date.year, date=date.strftime('%Y%m%d')) csvheaders, reader = HeaderCSVReader( S3CompressedReader(default_bucket, counts_path)) key_indexes = [csvheaders.index(prop) for prop in ('channel', 'buildid', 'os')] csv_indexes = [(csvheaders.index(prop), propidx) for propidx, prop in izip(count(), prop_list)] for row in reader: key = tuple(row[idx] for idx in key_indexes) counter = counts[key] for csvidx, propidx in csv_indexes: counter.increment(propidx, nullint(row[csvidx])) crashes_path = 'stability-rollups/{year}/{date}-crashes.csv.gz'.format( year=date.year, date=date.strftime('%Y%m%d')) csvheaders, reader = HeaderCSVReader( S3CompressedReader(default_bucket, crashes_path)) key_indexes = [csvheaders.index(prop) for prop in ('channel', 'buildid', 'os')] for row in reader: key = tuple(row[idx] for idx in key_indexes) counts[key].crashes += nullint(row[-1]) summary_path = 'stability-rollups/{year}/{date}-summary.json.gz'.format( year=date.year, date=date.strftime('%Y%m%d')) with S3CompressedWriter(default_bucket, summary_path) as fd: json.dump([c.final(channel=channel, buildid=buildid, os=os) for (channel, buildid, os), c in counts.iteritems()], fd) if __name__ == '__main__': import sys from datetime import date, timedelta start = date(2015, 11, 5) end = date(2015, 11, 30) for i in count(): d = start + timedelta(days=i) if d > end: break summarize(d)
mpl-2.0
RackSec/ansible
lib/ansible/modules/cloud/amazon/ec2_tag.py
71
5973
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['stableinterface'], 'supported_by': 'curated'} DOCUMENTATION = ''' --- module: ec2_tag short_description: create and remove tag(s) to ec2 resources. description: - Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto. version_added: "1.3" options: resource: description: - The EC2 resource id. required: true default: null aliases: [] state: description: - Whether the tags should be present or absent on the resource. Use list to interrogate the tags of an instance. required: false default: present choices: ['present', 'absent', 'list'] aliases: [] tags: description: - a hash/dictionary of tags to add to the resource; '{"key":"value"}' and '{"key":"value","key":"value"}' required: true default: null aliases: [] author: "Lester Wade (@lwade)" extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' - name: Ensure tags are present on a resource ec2_tag: region: eu-west-1 resource: vol-XXXXXX state: present tags: Name: ubervol env: prod - name: Ensure one dbserver is running ec2: count_tags: Name: dbserver Env: production exact_count: 1 group: '{{ security_group }}' keypair: '{{ keypair }}' image: '{{ image_id }}' instance_tags: Name: dbserver Env: production instance_type: '{{ instance_type }}' region: eu-west-1 volumes: - device_name: /dev/xvdb device_type: standard volume_size: 10 delete_on_termination: True wait: True register: ec2 - name: Retrieve all volumes for a queried instance ec2_vol: instance: '{{ item.id }}' region: eu-west-1 state: list with_items: '{{ ec2.tagged_instances }}' register: ec2_vol - name: Ensure all volumes are tagged ec2_tag: region: eu-west-1 resource: '{{ item.id }}' state: present tags: Name: dbserver Env: production with_items: - ec2_vol.volumes - name: Get EC2 facts action: ec2_facts - name: Retrieve all tags on an instance ec2_tag: region: '{{ ansible_ec2_placement_region }}' resource: '{{ ansible_ec2_instance_id }}' state: list register: ec2_tags - name: List tags, such as Name and env debug: msg: '{{ ec2_tags.tags.Name }} {{ ec2_tags.tags.env }}' ''' try: import boto.ec2 HAS_BOTO = True except ImportError: HAS_BOTO = False def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( resource = dict(required=True), tags = dict(type='dict'), state = dict(default='present', choices=['present', 'absent', 'list']), ) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not HAS_BOTO: module.fail_json(msg='boto required for this module') resource = module.params.get('resource') tags = module.params.get('tags') state = module.params.get('state') ec2 = ec2_connect(module) # We need a comparison here so that we can accurately report back changed status. # Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate. filters = {'resource-id' : resource} gettags = ec2.get_all_tags(filters=filters) dictadd = {} dictremove = {} baddict = {} tagdict = {} for tag in gettags: tagdict[tag.name] = tag.value if state == 'present': if not tags: module.fail_json(msg="tags argument is required when state is present") if set(tags.items()).issubset(set(tagdict.items())): module.exit_json(msg="Tags already exists in %s." %resource, changed=False) else: for (key, value) in set(tags.items()): if (key, value) not in set(tagdict.items()): dictadd[key] = value if not module.check_mode: ec2.create_tags(resource, dictadd) module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True) if state == 'absent': if not tags: module.fail_json(msg="tags argument is required when state is absent") for (key, value) in set(tags.items()): if (key, value) not in set(tagdict.items()): baddict[key] = value if set(baddict) == set(tags): module.exit_json(msg="Nothing to remove here. Move along.", changed=False) for (key, value) in set(tags.items()): if (key, value) in set(tagdict.items()): dictremove[key] = value if not module.check_mode: ec2.delete_tags(resource, dictremove) module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True) if state == 'list': module.exit_json(changed=False, tags=tagdict) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * if __name__ == '__main__': main()
gpl-3.0
ladybug-analysis-tools/honeybee
honeybee/radiance/pattern/brighttext.py
3
2016
"""Radiance Brighttext Pattern. http://radsite.lbl.gov/radiance/refer/ray.html#Brighttext """ from .patternbase import RadiancePattern # TODO(): Implement the class. It's currently creates this material as generic Radiance # material class Brighttext(RadiancePattern): """Radiance Brighttext Pattern. Brighttext is like colortext, but the writing is monochromatic. mod brighttext id 2 fontfile textfile 0 11+ Ox Oy Oz Rx Ry Rz Dx Dy Dz foreground background [spacing] or: mod brighttext id 2+N fontfile . This is a line with N words ... 0 11+ Ox Oy Oz Rx Ry Rz Dx Dy Dz foreground background [spacing] By default, a uniform spacing algorithm is used that guarantees every character will appear in a precisely determined position. Unfortunately, such a scheme results in rather unattractive and difficult to read text with most fonts. The optional spacing value defines the distance between characters for proportional spacing. A positive value selects a spacing algorithm that preserves right margins and indentation, but does not provide the ultimate in proportionally spaced text. A negative value insures that characters are properly spaced, but the placement of words then varies unpredictably. The choice depends on the relative importance of spacing versus formatting. When presenting a section of formatted text, a positive spacing value is usually preferred. A single line of text will often be accompanied by a negative spacing value. A section of text meant to depict a picture, perhaps using a special purpose font such as hexbit4x1.fnt, calls for uniform spacing. Reasonable magnitudes for proportional spacing are between 0.1 (for tightly spaced characters) and 0.3 (for wide spacing). """ pass
gpl-3.0
sandrinr/XCSoar
build/python/build/zlib.py
13
1169
import subprocess from build.makeproject import MakeProject class ZlibProject(MakeProject): def __init__(self, url, alternative_url, md5, installed, **kwargs): MakeProject.__init__(self, url, alternative_url, md5, installed, **kwargs) def get_make_args(self, toolchain): return MakeProject.get_make_args(self, toolchain) + [ 'CC=' + toolchain.cc + ' ' + toolchain.cppflags + ' ' + toolchain.cflags, 'CPP=' + toolchain.cc + ' -E ' + toolchain.cppflags, 'AR=' + toolchain.ar, 'ARFLAGS=' + toolchain.arflags, 'RANLIB=' + toolchain.ranlib, 'LDSHARED=' + toolchain.cc + ' -shared', 'libz.a' ] def get_make_install_args(self, toolchain): return [ 'RANLIB=' + toolchain.ranlib, self.install_target ] def build(self, toolchain): src = self.unpack(toolchain, out_of_tree=False) subprocess.check_call(['./configure', '--prefix=' + toolchain.install_prefix, '--static'], cwd=src, env=toolchain.env) MakeProject.build(self, toolchain, src)
gpl-2.0
anntzer/scipy
scipy/linalg/decomp_qr.py
12
13645
"""QR decomposition functions.""" import numpy # Local imports from .lapack import get_lapack_funcs from .misc import _datacopied __all__ = ['qr', 'qr_multiply', 'rq'] def safecall(f, name, *args, **kwargs): """Call a LAPACK routine, determining lwork automatically and handling error return values""" lwork = kwargs.get("lwork", None) if lwork in (None, -1): kwargs['lwork'] = -1 ret = f(*args, **kwargs) kwargs['lwork'] = ret[-2][0].real.astype(numpy.int_) ret = f(*args, **kwargs) if ret[-1] < 0: raise ValueError("illegal value in %dth argument of internal %s" % (-ret[-1], name)) return ret[:-2] def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False, check_finite=True): """ Compute QR decomposition of a matrix. Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal and R upper triangular. Parameters ---------- a : (M, N) array_like Matrix to be decomposed overwrite_a : bool, optional Whether data in `a` is overwritten (may improve performance if `overwrite_a` is set to True by reusing the existing input data structure rather than creating a new one.) lwork : int, optional Work array size, lwork >= a.shape[1]. If None or -1, an optimal size is computed. mode : {'full', 'r', 'economic', 'raw'}, optional Determines what information is to be returned: either both Q and R ('full', default), only R ('r') or both Q and R but computed in economy-size ('economic', see Notes). The final option 'raw' (added in SciPy 0.11) makes the function return two matrices (Q, TAU) in the internal format used by LAPACK. pivoting : bool, optional Whether or not factorization should include pivoting for rank-revealing qr decomposition. If pivoting, compute the decomposition ``A P = Q R`` as above, but where P is chosen such that the diagonal of R is non-increasing. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- Q : float or complex ndarray Of shape (M, M), or (M, K) for ``mode='economic'``. Not returned if ``mode='r'``. R : float or complex ndarray Of shape (M, N), or (K, N) for ``mode='economic'``. ``K = min(M, N)``. P : int ndarray Of shape (N,) for ``pivoting=True``. Not returned if ``pivoting=False``. Raises ------ LinAlgError Raised if decomposition fails Notes ----- This is an interface to the LAPACK routines dgeqrf, zgeqrf, dorgqr, zungqr, dgeqp3, and zgeqp3. If ``mode=economic``, the shapes of Q and R are (M, K) and (K, N) instead of (M,M) and (M,N), with ``K=min(M,N)``. Examples -------- >>> from scipy import linalg >>> rng = np.random.default_rng() >>> a = rng.standard_normal((9, 6)) >>> q, r = linalg.qr(a) >>> np.allclose(a, np.dot(q, r)) True >>> q.shape, r.shape ((9, 9), (9, 6)) >>> r2 = linalg.qr(a, mode='r') >>> np.allclose(r, r2) True >>> q3, r3 = linalg.qr(a, mode='economic') >>> q3.shape, r3.shape ((9, 6), (6, 6)) >>> q4, r4, p4 = linalg.qr(a, pivoting=True) >>> d = np.abs(np.diag(r4)) >>> np.all(d[1:] <= d[:-1]) True >>> np.allclose(a[:, p4], np.dot(q4, r4)) True >>> q4.shape, r4.shape, p4.shape ((9, 9), (9, 6), (6,)) >>> q5, r5, p5 = linalg.qr(a, mode='economic', pivoting=True) >>> q5.shape, r5.shape, p5.shape ((9, 6), (6, 6), (6,)) """ # 'qr' was the old default, equivalent to 'full'. Neither 'full' nor # 'qr' are used below. # 'raw' is used internally by qr_multiply if mode not in ['full', 'qr', 'r', 'economic', 'raw']: raise ValueError("Mode argument should be one of ['full', 'r'," "'economic', 'raw']") if check_finite: a1 = numpy.asarray_chkfinite(a) else: a1 = numpy.asarray(a) if len(a1.shape) != 2: raise ValueError("expected a 2-D array") M, N = a1.shape overwrite_a = overwrite_a or (_datacopied(a1, a)) if pivoting: geqp3, = get_lapack_funcs(('geqp3',), (a1,)) qr, jpvt, tau = safecall(geqp3, "geqp3", a1, overwrite_a=overwrite_a) jpvt -= 1 # geqp3 returns a 1-based index array, so subtract 1 else: geqrf, = get_lapack_funcs(('geqrf',), (a1,)) qr, tau = safecall(geqrf, "geqrf", a1, lwork=lwork, overwrite_a=overwrite_a) if mode not in ['economic', 'raw'] or M < N: R = numpy.triu(qr) else: R = numpy.triu(qr[:N, :]) if pivoting: Rj = R, jpvt else: Rj = R, if mode == 'r': return Rj elif mode == 'raw': return ((qr, tau),) + Rj gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,)) if M < N: Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr[:, :M], tau, lwork=lwork, overwrite_a=1) elif mode == 'economic': Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr, tau, lwork=lwork, overwrite_a=1) else: t = qr.dtype.char qqr = numpy.empty((M, M), dtype=t) qqr[:, :N] = qr Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qqr, tau, lwork=lwork, overwrite_a=1) return (Q,) + Rj def qr_multiply(a, c, mode='right', pivoting=False, conjugate=False, overwrite_a=False, overwrite_c=False): """ Calculate the QR decomposition and multiply Q with a matrix. Calculate the decomposition ``A = Q R`` where Q is unitary/orthogonal and R upper triangular. Multiply Q with a vector or a matrix c. Parameters ---------- a : (M, N), array_like Input array c : array_like Input array to be multiplied by ``q``. mode : {'left', 'right'}, optional ``Q @ c`` is returned if mode is 'left', ``c @ Q`` is returned if mode is 'right'. The shape of c must be appropriate for the matrix multiplications, if mode is 'left', ``min(a.shape) == c.shape[0]``, if mode is 'right', ``a.shape[0] == c.shape[1]``. pivoting : bool, optional Whether or not factorization should include pivoting for rank-revealing qr decomposition, see the documentation of qr. conjugate : bool, optional Whether Q should be complex-conjugated. This might be faster than explicit conjugation. overwrite_a : bool, optional Whether data in a is overwritten (may improve performance) overwrite_c : bool, optional Whether data in c is overwritten (may improve performance). If this is used, c must be big enough to keep the result, i.e. ``c.shape[0]`` = ``a.shape[0]`` if mode is 'left'. Returns ------- CQ : ndarray The product of ``Q`` and ``c``. R : (K, N), ndarray R array of the resulting QR factorization where ``K = min(M, N)``. P : (N,) ndarray Integer pivot array. Only returned when ``pivoting=True``. Raises ------ LinAlgError Raised if QR decomposition fails. Notes ----- This is an interface to the LAPACK routines ``?GEQRF``, ``?ORMQR``, ``?UNMQR``, and ``?GEQP3``. .. versionadded:: 0.11.0 Examples -------- >>> from scipy.linalg import qr_multiply, qr >>> A = np.array([[1, 3, 3], [2, 3, 2], [2, 3, 3], [1, 3, 2]]) >>> qc, r1, piv1 = qr_multiply(A, 2*np.eye(4), pivoting=1) >>> qc array([[-1., 1., -1.], [-1., -1., 1.], [-1., -1., -1.], [-1., 1., 1.]]) >>> r1 array([[-6., -3., -5. ], [ 0., -1., -1.11022302e-16], [ 0., 0., -1. ]]) >>> piv1 array([1, 0, 2], dtype=int32) >>> q2, r2, piv2 = qr(A, mode='economic', pivoting=1) >>> np.allclose(2*q2 - qc, np.zeros((4, 3))) True """ if mode not in ['left', 'right']: raise ValueError("Mode argument can only be 'left' or 'right' but " "not '{}'".format(mode)) c = numpy.asarray_chkfinite(c) if c.ndim < 2: onedim = True c = numpy.atleast_2d(c) if mode == "left": c = c.T else: onedim = False a = numpy.atleast_2d(numpy.asarray(a)) # chkfinite done in qr M, N = a.shape if mode == 'left': if c.shape[0] != min(M, N + overwrite_c*(M-N)): raise ValueError('Array shapes are not compatible for Q @ c' ' operation: {} vs {}'.format(a.shape, c.shape)) else: if M != c.shape[1]: raise ValueError('Array shapes are not compatible for c @ Q' ' operation: {} vs {}'.format(c.shape, a.shape)) raw = qr(a, overwrite_a, None, "raw", pivoting) Q, tau = raw[0] gor_un_mqr, = get_lapack_funcs(('ormqr',), (Q,)) if gor_un_mqr.typecode in ('s', 'd'): trans = "T" else: trans = "C" Q = Q[:, :min(M, N)] if M > N and mode == "left" and not overwrite_c: if conjugate: cc = numpy.zeros((c.shape[1], M), dtype=c.dtype, order="F") cc[:, :N] = c.T else: cc = numpy.zeros((M, c.shape[1]), dtype=c.dtype, order="F") cc[:N, :] = c trans = "N" if conjugate: lr = "R" else: lr = "L" overwrite_c = True elif c.flags["C_CONTIGUOUS"] and trans == "T" or conjugate: cc = c.T if mode == "left": lr = "R" else: lr = "L" else: trans = "N" cc = c if mode == "left": lr = "L" else: lr = "R" cQ, = safecall(gor_un_mqr, "gormqr/gunmqr", lr, trans, Q, tau, cc, overwrite_c=overwrite_c) if trans != "N": cQ = cQ.T if mode == "right": cQ = cQ[:, :min(M, N)] if onedim: cQ = cQ.ravel() return (cQ,) + raw[1:] def rq(a, overwrite_a=False, lwork=None, mode='full', check_finite=True): """ Compute RQ decomposition of a matrix. Calculate the decomposition ``A = R Q`` where Q is unitary/orthogonal and R upper triangular. Parameters ---------- a : (M, N) array_like Matrix to be decomposed overwrite_a : bool, optional Whether data in a is overwritten (may improve performance) lwork : int, optional Work array size, lwork >= a.shape[1]. If None or -1, an optimal size is computed. mode : {'full', 'r', 'economic'}, optional Determines what information is to be returned: either both Q and R ('full', default), only R ('r') or both Q and R but computed in economy-size ('economic', see Notes). check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- R : float or complex ndarray Of shape (M, N) or (M, K) for ``mode='economic'``. ``K = min(M, N)``. Q : float or complex ndarray Of shape (N, N) or (K, N) for ``mode='economic'``. Not returned if ``mode='r'``. Raises ------ LinAlgError If decomposition fails. Notes ----- This is an interface to the LAPACK routines sgerqf, dgerqf, cgerqf, zgerqf, sorgrq, dorgrq, cungrq and zungrq. If ``mode=economic``, the shapes of Q and R are (K, N) and (M, K) instead of (N,N) and (M,N), with ``K=min(M,N)``. Examples -------- >>> from scipy import linalg >>> rng = np.random.default_rng() >>> a = rng.standard_normal((6, 9)) >>> r, q = linalg.rq(a) >>> np.allclose(a, r @ q) True >>> r.shape, q.shape ((6, 9), (9, 9)) >>> r2 = linalg.rq(a, mode='r') >>> np.allclose(r, r2) True >>> r3, q3 = linalg.rq(a, mode='economic') >>> r3.shape, q3.shape ((6, 6), (6, 9)) """ if mode not in ['full', 'r', 'economic']: raise ValueError( "Mode argument should be one of ['full', 'r', 'economic']") if check_finite: a1 = numpy.asarray_chkfinite(a) else: a1 = numpy.asarray(a) if len(a1.shape) != 2: raise ValueError('expected matrix') M, N = a1.shape overwrite_a = overwrite_a or (_datacopied(a1, a)) gerqf, = get_lapack_funcs(('gerqf',), (a1,)) rq, tau = safecall(gerqf, 'gerqf', a1, lwork=lwork, overwrite_a=overwrite_a) if not mode == 'economic' or N < M: R = numpy.triu(rq, N-M) else: R = numpy.triu(rq[-M:, -M:]) if mode == 'r': return R gor_un_grq, = get_lapack_funcs(('orgrq',), (rq,)) if N < M: Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq[-N:], tau, lwork=lwork, overwrite_a=1) elif mode == 'economic': Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq, tau, lwork=lwork, overwrite_a=1) else: rq1 = numpy.empty((N, N), dtype=rq.dtype) rq1[-M:] = rq Q, = safecall(gor_un_grq, "gorgrq/gungrq", rq1, tau, lwork=lwork, overwrite_a=1) return R, Q
bsd-3-clause
astocko/statsmodels
statsmodels/stats/tests/test_contrast.py
34
1385
import numpy as np import numpy.random as R from numpy.testing import assert_almost_equal, assert_equal from statsmodels.stats.contrast import Contrast class TestContrast(object): @classmethod def setupClass(cls): R.seed(54321) cls.X = R.standard_normal((40,10)) def test_contrast1(self): term = np.column_stack((self.X[:,0], self.X[:,2])) c = Contrast(term, self.X) test_contrast = [[1] + [0]*9, [0]*2 + [1] + [0]*7] assert_almost_equal(test_contrast, c.contrast_matrix) def test_contrast2(self): zero = np.zeros((40,)) term = np.column_stack((zero, self.X[:,2])) c = Contrast(term, self.X) test_contrast = [0]*2 + [1] + [0]*7 assert_almost_equal(test_contrast, c.contrast_matrix) def test_contrast3(self): P = np.dot(self.X, np.linalg.pinv(self.X)) resid = np.identity(40) - P noise = np.dot(resid,R.standard_normal((40,5))) term = np.column_stack((noise, self.X[:,2])) c = Contrast(term, self.X) assert_equal(c.contrast_matrix.shape, (10,)) #TODO: this should actually test the value of the contrast, not only its dimension def test_estimable(self): X2 = np.column_stack((self.X, self.X[:,5])) c = Contrast(self.X[:,5],X2) #TODO: I don't think this should be estimable? isestimable correct?
bsd-3-clause
erikrose/dxr
tests/test_build.py
4
12885
"""Tests for the machinery that takes offsets and markup bits from plugins and decorates source code with them to create HTML""" from unittest import TestCase import warnings from warnings import catch_warnings from nose.tools import eq_ from dxr.build import (line_boundaries, remove_overlapping_refs, Region, LINE, Ref, balanced_tags, build_lines, tag_boundaries, html_lines, nesting_order, balanced_tags_with_empties, lines_and_annotations) def test_line_boundaries(): """Make sure we find the correct line boundaries with all sorts of line endings, even in files that don't end with a newline.""" eq_(list((point, is_start) for point, is_start, _ in line_boundaries('abc\ndef\r\nghi\rjkl')), [(4, False), (9, False), (13, False), (16, False)]) class RemoveOverlappingTests(TestCase): def test_misbalanced(self): """Make sure we cleanly excise a tag pair from a pair of interleaved tags.""" # A _________ (2, 6) # B ____________ (5, 9) a = Ref('a') b = Ref('b') tags = [(2, True, a), (5, True, b), (6, False, a), (9, False, b)] with catch_warnings(): warnings.simplefilter('ignore') remove_overlapping_refs(tags) eq_(tags, [(2, True, a), (6, False, a)]) def test_overlapping_regions(self): """Regions (as opposed to refs) are allowed to overlap and shouldn't be disturbed:: A _________ (2, 6) B (region) ____________ (5, 9) """ a = Ref('a') b = Region('b') tags = [(2, True, a), (5, True, b), (6, False, a), (9, False, b)] original_tags = tags[:] remove_overlapping_refs(tags) eq_(tags, original_tags) def spaced_tags(tags): """Render (point, is_start, payload) triples as human-readable representations.""" segments = [] for point, is_start, payload in tags: segments.append(' ' * point + ('<%s%s>' % ('' if is_start else '/', 'L' if payload is LINE else payload.payload))) return '\n'.join(segments) def tags_from_text(text): """Return unsorted tags based on an ASCII art representation.""" for line in text.splitlines(): start = line.find('_') label, prespace, underscores = line[0], line[2:start], line[start:] ref = Region(label) yield len(prespace), True, ref yield len(prespace) + len(underscores) - 1, False, ref def test_tags_from_text(): # str() so the Region objs compare equal eq_(str(list(tags_from_text('a ______________\n' 'b ______\n' 'c _____'))), '[(0, True, Region("a")), (13, False, Region("a")), ' '(0, True, Region("b")), (5, False, Region("b")), ' '(4, True, Region("c")), (8, False, Region("c"))]') class BalancedTagTests(TestCase): def test_horrors(self): """Try a fairly horrific scenario:: A _______________ (0, 7) B _________ (2, 6) C ____________ (5, 9) D _______ (8, 11) E __ (10, 11) 0 2 5 6 7 8 9 A contains B. B closes while C's still going on. D and E end at the same time. There's even a Region in there. """ a, b, c, d, e = Ref('a'), Region('b'), Ref('c'), Ref('d'), Ref('e') tags = [(0, True, a), (2, True, b), (5, True, c), (6, False, b), (7, False, a), (8, True, d), (9, False, c), (10, True, e), (11, False, e), (11, False, d)] eq_(spaced_tags(balanced_tags(tags)), '<L>\n' '<a>\n' ' <b>\n' ' <c>\n' ' </c>\n' ' </b>\n' ' <c>\n' ' </c>\n' ' </a>\n' ' <c>\n' ' <d>\n' ' </d>\n' ' </c>\n' ' <d>\n' ' <e>\n' ' </e>\n' ' </d>\n' ' </L>') def test_coincident(self): """We shouldn't emit pointless empty tags when tempted to.""" tags = sorted(tags_from_text('a _____\n' 'b _____\n' 'c _____\n'), key=nesting_order) eq_(spaced_tags(balanced_tags(tags)), '<L>\n' '<a>\n' '<b>\n' '<c>\n' ' </c>\n' ' </b>\n' ' </a>\n' ' </L>') def test_coincident_ends(self): """We shouldn't emit empty tags even when coincidently-ending tags don't start together.""" # These Regions aren't in startpoint order. That makes tags_from_test() # instantiate them in a funny order, which makes them sort in the wrong # order, which is realistic. tags = sorted(tags_from_text('d _______\n' 'c _________\n' 'b ___________\n' 'a ____________\n' 'e ___________\n'), key=nesting_order) eq_(spaced_tags(balanced_tags(tags)), '<L>\n' '<a>\n' ' <b>\n' ' <c>\n' ' <e>\n' ' <d>\n' ' </d>\n' ' </e>\n' ' </c>\n' ' </b>\n' ' </a>\n' ' <e>\n' ' </e>\n' ' </L>') def test_multiline_comment(self): """Multi-line spans should close at the end of one line and reopen at the beginning of the next.""" c = Region('c') c2 = Region('c') l = LINE tags = [(0, True, c), (79, False, c), (80, False, l), (80, True, c2), (151, False, l), (222, False, l), (284, False, c2), (285, False, l), (286, False, l)] text = u"""/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ """ eq_(list(html_lines(balanced_tags(tags), text.__getslice__)), ['<span class="c">/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */</span>', '<span class="c">/* This Source Code Form is subject to the terms of the Mozilla Public</span>', '<span class="c"> * License, v. 2.0. If a copy of the MPL was not distributed with this</span>', '<span class="c"> * file, You can obtain one at http://mozilla.org/MPL/2.0/. */</span>', '']) def test_empty(self): """Some files are empty. Make sure they work.""" eq_(list(balanced_tags([])), []) class Htmlifier(object): def __init__(self, regions=None, refs=None, annotations=None): self._regions = regions or [] self._refs = refs or [] self._annotations = annotations or [] def regions(self): return self._regions def refs(self): return self._refs def annotations(self): return self._annotations def test_tag_boundaries(): """Sanity-check ``tag_boundaries()``.""" eq_(str(list(tag_boundaries([Htmlifier(regions=[(0, 3, 'a'), (3, 5, 'b')])]))), '[(0, True, Region("a")), (3, False, Region("a")), ' '(3, True, Region("b")), (5, False, Region("b"))]') def test_simple_html_lines(): """See if the offsets are right in simple HTML stitching.""" a = Region('a') b = Region('b') line = LINE eq_(''.join(html_lines([(0, True, line), (0, True, a), (3, False, a), (3, True, b), (5, False, b), (5, False, line)], 'hello'.__getslice__)), '<span class="a">hel</span><span class="b">lo</span>') class AnnotationsTests(TestCase): def _expand_group(self, group): """Turn ``group_by``'s annoying iterators into something we can test equality against.""" return [(k, list(v)) for k, v in group] def test_sanity(self): """Make sure annotations are pulled from htmlifiers and paired with HTML lines sanely, handling sparsely distributed annotations and multiple htmlifiers annotating a single line.""" h1 = Htmlifier(annotations=[(1, {'a': 'b'}), (3, {'e': 'f'}), (6, {'g': 'h'})]) h2 = Htmlifier(annotations=[(1, {'c': 'd'})]) results = self._expand_group(lines_and_annotations( ['one', 'two', 'three', 'four', 'five', 'six'], [h1, h2])) eq_(results, [('one', [{'a': 'b'}, {'c': 'd'}]), ('two', []), ('three', [{'e': 'f'}]), ('four', []), ('five', []), ('six', [{'g': 'h'}])]) def test_jump_ahead(self): """Make sure annotations show up on the correct line even when there is no annotation for the first line.""" h1 = Htmlifier(annotations=[(3, {'e': 'f'})]) results = self._expand_group(lines_and_annotations( ['one', 'two', 'three', 'four'], [h1])) eq_(results, [('one', []), ('two', []), ('three', [{'e': 'f'}]), ('four', [])]) def test_none(self): """If there are no annotations, or if the annotations run short of the lines, don't stop emitting lines.""" eq_(self._expand_group(lines_and_annotations(['one', 'two'], [Htmlifier(annotations=[])])), [('one', []), ('two', [])]) class IntegrationTests(TestCase): """Tests for several layers at once, though not necessarily all of them""" def test_simple(self): """Sanity-check build_lines, which ties the whole shootin' match together.""" eq_(''.join(build_lines('hello', [Htmlifier(regions=[(0, 3, 'a'), (3, 5, 'b')])])), u'<span class="a">hel</span><span class="b">lo</span>') def test_split_anchor_avoidance(self): """Don't split anchor tags when we can avoid it.""" eq_(''.join(build_lines('this that', [Htmlifier(regions=[(0, 4, 'k')], refs=[(0, 9, ({}, '', None))])])), u'<a data-menu="{}"><span class="k">this</span> that</a>') def test_split_anchor_across_lines(self): """Support unavoidable splits of an anchor across lines.""" eq_(list(build_lines('this\nthat', [Htmlifier(refs=[(0, 9, ({}, '', None))])])), [u'<a data-menu="{}">this</a>', u'<a data-menu="{}">that</a>']) def test_horrors(self): """Untangle a circus of interleaved tags, tags that start where others end, and other untold wretchedness.""" # This is a little brittle. All we really want to test is that each # span of text is within the right spans. We don't care what order the # span tags are in. eq_(list(build_lines('this&that', [Htmlifier(regions=[(0, 9, 'a'), (1, 8, 'b'), (4, 7, 'c'), (3, 4, 'd'), (3, 5, 'e'), (0, 4, 'm'), (5, 9, 'n')])])), [u'<span class="a"><span class="m">t<span class="b">hi<span class="d"><span class="e">s</span></span></span></span><span class="b"><span class="e"><span class="c">&amp;</span></span><span class="c"><span class="n">th</span></span><span class="n">a</span></span><span class="n">t</span></span>']) def test_empty_tag_boundaries(self): """Zero-length tags should be filtered out by ``tag_boundaries()``. If they are not, the start of a tag can sort after the end, crashing the tag balancer. """ list(build_lines('hello!', [Htmlifier(regions=[(3, 3, 'a'), (3, 5, 'b')])]))
mit
ASCrookes/django
django/views/i18n.py
82
11102
import gettext as gettext_module import importlib import json import os from django import http from django.apps import apps from django.conf import settings from django.core.urlresolvers import translate_url from django.template import Context, Engine from django.utils import six from django.utils._os import upath from django.utils.encoding import smart_text from django.utils.formats import get_format, get_format_modules from django.utils.http import is_safe_url from django.utils.translation import ( LANGUAGE_SESSION_KEY, check_for_language, get_language, to_locale, ) def set_language(request): """ Redirect to a given url while setting the chosen language in the session or cookie. The url and the language code need to be specified in the request parameters. Since this view changes how the user will see the rest of the site, it must only be accessed as a POST request. If called as a GET request, it will redirect to the page in the request (the 'next' parameter) without changing any state. """ next = request.POST.get('next', request.GET.get('next')) if not is_safe_url(url=next, host=request.get_host()): next = request.META.get('HTTP_REFERER') if not is_safe_url(url=next, host=request.get_host()): next = '/' response = http.HttpResponseRedirect(next) if request.method == 'POST': lang_code = request.POST.get('language') if lang_code and check_for_language(lang_code): next_trans = translate_url(next, lang_code) if next_trans != next: response = http.HttpResponseRedirect(next_trans) if hasattr(request, 'session'): request.session[LANGUAGE_SESSION_KEY] = lang_code else: response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code, max_age=settings.LANGUAGE_COOKIE_AGE, path=settings.LANGUAGE_COOKIE_PATH, domain=settings.LANGUAGE_COOKIE_DOMAIN) return response def get_formats(): """ Returns all formats strings required for i18n to work """ FORMAT_SETTINGS = ( 'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT', 'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT', 'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR', 'THOUSAND_SEPARATOR', 'NUMBER_GROUPING', 'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS' ) result = {} for module in [settings] + get_format_modules(reverse=True): for attr in FORMAT_SETTINGS: result[attr] = get_format(attr) formats = {} for k, v in result.items(): if isinstance(v, (six.string_types, int)): formats[k] = smart_text(v) elif isinstance(v, (tuple, list)): formats[k] = [smart_text(value) for value in v] return formats js_catalog_template = r""" {% autoescape off %} (function(globals) { var django = globals.django || (globals.django = {}); {% if plural %} django.pluralidx = function(n) { var v={{ plural }}; if (typeof(v) == 'boolean') { return v ? 1 : 0; } else { return v; } }; {% else %} django.pluralidx = function(count) { return (count == 1) ? 0 : 1; }; {% endif %} /* gettext library */ django.catalog = django.catalog || {}; {% if catalog_str %} var newcatalog = {{ catalog_str }}; for (var key in newcatalog) { django.catalog[key] = newcatalog[key]; } {% endif %} if (!django.jsi18n_initialized) { django.gettext = function(msgid) { var value = django.catalog[msgid]; if (typeof(value) == 'undefined') { return msgid; } else { return (typeof(value) == 'string') ? value : value[0]; } }; django.ngettext = function(singular, plural, count) { var value = django.catalog[singular]; if (typeof(value) == 'undefined') { return (count == 1) ? singular : plural; } else { return value[django.pluralidx(count)]; } }; django.gettext_noop = function(msgid) { return msgid; }; django.pgettext = function(context, msgid) { var value = django.gettext(context + '\x04' + msgid); if (value.indexOf('\x04') != -1) { value = msgid; } return value; }; django.npgettext = function(context, singular, plural, count) { var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count); if (value.indexOf('\x04') != -1) { value = django.ngettext(singular, plural, count); } return value; }; django.interpolate = function(fmt, obj, named) { if (named) { return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])}); } else { return fmt.replace(/%s/g, function(match){return String(obj.shift())}); } }; /* formatting library */ django.formats = {{ formats_str }}; django.get_format = function(format_type) { var value = django.formats[format_type]; if (typeof(value) == 'undefined') { return format_type; } else { return value; } }; /* add to global namespace */ globals.pluralidx = django.pluralidx; globals.gettext = django.gettext; globals.ngettext = django.ngettext; globals.gettext_noop = django.gettext_noop; globals.pgettext = django.pgettext; globals.npgettext = django.npgettext; globals.interpolate = django.interpolate; globals.get_format = django.get_format; django.jsi18n_initialized = true; } }(this)); {% endautoescape %} """ def render_javascript_catalog(catalog=None, plural=None): template = Engine().from_string(js_catalog_template) indent = lambda s: s.replace('\n', '\n ') context = Context({ 'catalog_str': indent(json.dumps( catalog, sort_keys=True, indent=2)) if catalog else None, 'formats_str': indent(json.dumps( get_formats(), sort_keys=True, indent=2)), 'plural': plural, }) return http.HttpResponse(template.render(context), 'text/javascript') def get_javascript_catalog(locale, domain, packages): default_locale = to_locale(settings.LANGUAGE_CODE) app_configs = apps.get_app_configs() allowable_packages = set(app_config.name for app_config in app_configs) allowable_packages.add('django.conf') packages = [p for p in packages if p in allowable_packages] t = {} paths = [] en_selected = locale.startswith('en') en_catalog_missing = True # paths of requested packages for package in packages: p = importlib.import_module(package) path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale') paths.append(path) # add the filesystem paths listed in the LOCALE_PATHS setting paths.extend(reversed(settings.LOCALE_PATHS)) # first load all english languages files for defaults for path in paths: try: catalog = gettext_module.translation(domain, path, ['en']) t.update(catalog._catalog) except IOError: pass else: # 'en' is the selected language and at least one of the packages # listed in `packages` has an 'en' catalog if en_selected: en_catalog_missing = False # next load the settings.LANGUAGE_CODE translations if it isn't english if default_locale != 'en': for path in paths: try: catalog = gettext_module.translation(domain, path, [default_locale]) except IOError: catalog = None if catalog is not None: t.update(catalog._catalog) # last load the currently selected language, if it isn't identical to the default. if locale != default_locale: # If the currently selected language is English but it doesn't have a # translation catalog (presumably due to being the language translated # from) then a wrong language catalog might have been loaded in the # previous step. It needs to be discarded. if en_selected and en_catalog_missing: t = {} else: locale_t = {} for path in paths: try: catalog = gettext_module.translation(domain, path, [locale]) except IOError: catalog = None if catalog is not None: locale_t.update(catalog._catalog) if locale_t: t = locale_t plural = None if '' in t: for l in t[''].split('\n'): if l.startswith('Plural-Forms:'): plural = l.split(':', 1)[1].strip() if plural is not None: # this should actually be a compiled function of a typical plural-form: # Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : # n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2; plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1] pdict = {} maxcnts = {} catalog = {} for k, v in t.items(): if k == '': continue if isinstance(k, six.string_types): catalog[k] = v elif isinstance(k, tuple): msgid = k[0] cnt = k[1] maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0)) pdict.setdefault(msgid, {})[cnt] = v else: raise TypeError(k) for k, v in pdict.items(): catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)] return catalog, plural def null_javascript_catalog(request, domain=None, packages=None): """ Returns "identity" versions of the JavaScript i18n functions -- i.e., versions that don't actually do anything. """ return render_javascript_catalog() def javascript_catalog(request, domain='djangojs', packages=None): """ Returns the selected language catalog as a javascript library. Receives the list of packages to check for translations in the packages parameter either from an infodict or as a +-delimited string from the request. Default is 'django.conf'. Additionally you can override the gettext domain for this view, but usually you don't want to do that, as JavaScript messages go to the djangojs domain. But this might be needed if you deliver your JavaScript source from Django templates. """ locale = to_locale(get_language()) if request.GET and 'language' in request.GET: if check_for_language(request.GET['language']): locale = to_locale(request.GET['language']) if packages is None: packages = ['django.conf'] if isinstance(packages, six.string_types): packages = packages.split('+') catalog, plural = get_javascript_catalog(locale, domain, packages) return render_javascript_catalog(catalog, plural)
bsd-3-clause
tumbl3w33d/ansible
lib/ansible/modules/network/illumos/dladm_vlan.py
52
5406
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Adam ล tevko <adam.stevko@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: dladm_vlan short_description: Manage VLAN interfaces on Solaris/illumos systems. description: - Create or delete VLAN interfaces on Solaris/illumos systems. version_added: "2.3" author: Adam ล tevko (@xen0l) options: name: description: - VLAN interface name. required: true link: description: - VLAN underlying link name. required: true temporary: description: - Specifies that the VLAN interface is temporary. Temporary VLANs do not persist across reboots. required: false default: false type: bool vlan_id: description: - VLAN ID value for VLAN interface. required: false default: false aliases: [ "vid" ] state: description: - Create or delete Solaris/illumos VNIC. required: false default: "present" choices: [ "present", "absent" ] ''' EXAMPLES = ''' - name: Create 'vlan42' VLAN over 'bnx0' link dladm_vlan: name=vlan42 link=bnx0 vlan_id=42 state=present - name: Remove 'vlan1337' VLAN interface dladm_vlan: name=vlan1337 state=absent ''' RETURN = ''' name: description: VLAN name returned: always type: str sample: vlan42 state: description: state of the target returned: always type: str sample: present temporary: description: specifies if operation will persist across reboots returned: always type: bool sample: True link: description: VLAN's underlying link name returned: always type: str sample: e100g0 vlan_id: description: VLAN ID returned: always type: str sample: 42 ''' from ansible.module_utils.basic import AnsibleModule class VLAN(object): def __init__(self, module): self.module = module self.name = module.params['name'] self.link = module.params['link'] self.vlan_id = module.params['vlan_id'] self.temporary = module.params['temporary'] self.state = module.params['state'] def vlan_exists(self): cmd = [self.module.get_bin_path('dladm', True)] cmd.append('show-vlan') cmd.append(self.name) (rc, _, _) = self.module.run_command(cmd) if rc == 0: return True else: return False def create_vlan(self): cmd = [self.module.get_bin_path('dladm', True)] cmd.append('create-vlan') if self.temporary: cmd.append('-t') cmd.append('-l') cmd.append(self.link) cmd.append('-v') cmd.append(self.vlan_id) cmd.append(self.name) return self.module.run_command(cmd) def delete_vlan(self): cmd = [self.module.get_bin_path('dladm', True)] cmd.append('delete-vlan') if self.temporary: cmd.append('-t') cmd.append(self.name) return self.module.run_command(cmd) def is_valid_vlan_id(self): return 0 <= int(self.vlan_id) <= 4095 def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True, type='str'), link=dict(default=None, type='str'), vlan_id=dict(default=0, aliases=['vid']), temporary=dict(default=False, type='bool'), state=dict(default='present', choices=['absent', 'present']), ), required_if=[ ['state', 'present', ['vlan_id', 'link', 'name']], ], supports_check_mode=True ) vlan = VLAN(module) rc = None out = '' err = '' result = {} result['name'] = vlan.name result['link'] = vlan.link result['state'] = vlan.state result['temporary'] = vlan.temporary if int(vlan.vlan_id) != 0: if not vlan.is_valid_vlan_id(): module.fail_json(msg='Invalid VLAN id value', name=vlan.name, state=vlan.state, link=vlan.link, vlan_id=vlan.vlan_id) result['vlan_id'] = vlan.vlan_id if vlan.state == 'absent': if vlan.vlan_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = vlan.delete_vlan() if rc != 0: module.fail_json(name=vlan.name, msg=err, rc=rc) elif vlan.state == 'present': if not vlan.vlan_exists(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = vlan.create_vlan() if rc is not None and rc != 0: module.fail_json(name=vlan.name, msg=err, rc=rc) if rc is None: result['changed'] = False else: result['changed'] = True if out: result['stdout'] = out if err: result['stderr'] = err module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
cbeck88/fifengine
engine/python/fife/extensions/pychan/widgets/ext/__init__.py
5
1458
# -*- coding: utf-8 -*- # #################################################################### # Copyright (C) 2005-2013 by the FIFE team # http://www.fifengine.net # This file is part of FIFE. # # FIFE is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # #################################################################### """ Pychan extension widgets. Extension widgets are partly experimental, partly rarely used widgets which are added here. They are by default not included in the widgets registry and thus cannot be loaded from XML files. Use L{pychan.widgets.registerWidget} to enable that. Not the same care to keep the API stable will be taken for them and before and if they are added (or replace) the standard widgets they will have to be reviewed in detail. """
lgpl-2.1
caleb531/automata
tests/test_nfa.py
1
6512
#!/usr/bin/env python3 """Classes and functions for testing the behavior of NFAs.""" import types from unittest.mock import patch import nose.tools as nose import automata.base.exceptions as exceptions import tests.test_fa as test_fa from automata.fa.nfa import NFA class TestNFA(test_fa.TestFA): """A test class for testing nondeterministic finite automata.""" def test_init_nfa(self): """Should copy NFA if passed into NFA constructor.""" new_nfa = NFA.copy(self.nfa) self.assert_is_copy(new_nfa, self.nfa) def test_init_nfa_missing_formal_params(self): """Should raise an error if formal NFA parameters are missing.""" with nose.assert_raises(TypeError): NFA( states={'q0', 'q1'}, input_symbols={'0', '1'}, initial_state='q0', final_states={'q1'} ) def test_copy_nfa(self): """Should create exact copy of NFA if copy() method is called.""" new_nfa = self.nfa.copy() self.assert_is_copy(new_nfa, self.nfa) def test_init_dfa(self): """Should convert DFA to NFA if passed into NFA constructor.""" nfa = NFA.from_dfa(self.dfa) nose.assert_equal(nfa.states, {'q0', 'q1', 'q2'}) nose.assert_equal(nfa.input_symbols, {'0', '1'}) nose.assert_equal(nfa.transitions, { 'q0': {'0': {'q0'}, '1': {'q1'}}, 'q1': {'0': {'q0'}, '1': {'q2'}}, 'q2': {'0': {'q2'}, '1': {'q1'}} }) nose.assert_equal(nfa.initial_state, 'q0') @patch('automata.fa.nfa.NFA.validate') def test_init_validation(self, validate): """Should validate NFA when initialized.""" NFA.copy(self.nfa) validate.assert_called_once_with() def test_nfa_equal(self): """Should correctly determine if two NFAs are equal.""" new_nfa = self.nfa.copy() nose.assert_true(self.nfa == new_nfa, 'NFAs are not equal') def test_nfa_not_equal(self): """Should correctly determine if two NFAs are not equal.""" new_nfa = self.nfa.copy() new_nfa.final_states.add('q2') nose.assert_true(self.nfa != new_nfa, 'NFAs are equal') def test_validate_invalid_symbol(self): """Should raise error if a transition references an invalid symbol.""" with nose.assert_raises(exceptions.InvalidSymbolError): self.nfa.transitions['q1']['c'] = {'q2'} self.nfa.validate() def test_validate_invalid_state(self): """Should raise error if a transition references an invalid state.""" with nose.assert_raises(exceptions.InvalidStateError): self.nfa.transitions['q1']['a'] = {'q3'} self.nfa.validate() def test_validate_invalid_initial_state(self): """Should raise error if the initial state is invalid.""" with nose.assert_raises(exceptions.InvalidStateError): self.nfa.initial_state = 'q3' self.nfa.validate() def test_validate_initial_state_transitions(self): """Should raise error if the initial state has no transitions.""" with nose.assert_raises(exceptions.MissingStateError): del self.nfa.transitions[self.nfa.initial_state] self.nfa.validate() def test_validate_invalid_final_state(self): """Should raise error if the final state is invalid.""" with nose.assert_raises(exceptions.InvalidStateError): self.nfa.final_states = {'q3'} self.nfa.validate() def test_validate_invalid_final_state_non_str(self): """Should raise InvalidStateError even for non-string final states.""" with nose.assert_raises(exceptions.InvalidStateError): self.nfa.final_states = {3} self.nfa.validate() def test_read_input_accepted(self): """Should return correct states if acceptable NFA input is given.""" nose.assert_equal(self.nfa.read_input('aba'), {'q1', 'q2'}) def test_validate_missing_state(self): """Should silently ignore states without transitions defined.""" self.nfa.states.add('q3') self.nfa.transitions['q0']['a'].add('q3') nose.assert_equal(self.nfa.validate(), True) def test_read_input_rejection(self): """Should raise error if the stop state is not a final state.""" with nose.assert_raises(exceptions.RejectionException): self.nfa.read_input('abba') def test_read_input_rejection_invalid_symbol(self): """Should raise error if an invalid symbol is read.""" with nose.assert_raises(exceptions.RejectionException): self.nfa.read_input('abc') def test_read_input_step(self): """Should return validation generator if step flag is supplied.""" validation_generator = self.nfa.read_input_stepwise('aba') nose.assert_is_instance(validation_generator, types.GeneratorType) nose.assert_equal(list(validation_generator), [ {'q0'}, {'q1', 'q2'}, {'q0'}, {'q1', 'q2'} ]) def test_accepts_input_true(self): """Should return True if NFA input is accepted.""" nose.assert_equal(self.nfa.accepts_input('aba'), True) def test_accepts_input_false(self): """Should return False if NFA input is rejected.""" nose.assert_equal(self.nfa.accepts_input('abba'), False) def test_cyclic_lambda_transitions(self): """Should traverse NFA containing cyclic lambda transitions.""" # NFA which matches zero or more occurrences of 'a' nfa = NFA( states={'q0', 'q1', 'q2', 'q3'}, input_symbols={'a'}, transitions={ 'q0': {'': {'q1', 'q3'}}, 'q1': {'a': {'q2'}}, 'q2': {'': {'q3'}}, 'q3': {'': {'q0'}} }, initial_state='q0', final_states={'q3'} ) nose.assert_equal(nfa.read_input(''), {'q0', 'q1', 'q3'}) nose.assert_equal(nfa.read_input('a'), {'q0', 'q1', 'q2', 'q3'}) def test_non_str_states(self): """should handle non-string state names""" nfa = NFA( states={0}, input_symbols={0}, transitions={0: {}}, initial_state=0, final_states=set()) # We don't care what the output is, just as long as no exception is # raised nose.assert_not_equal(nfa.accepts_input(''), None)
mit
dnjohnstone/hyperspy
hyperspy/_signals/signal2d.py
1
35008
# -*- coding: utf-8 -*- # Copyright 2007-2020 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. import matplotlib.pyplot as plt import numpy as np import scipy as sp import numpy.ma as ma import dask.array as da import logging import warnings from scipy import ndimage try: # For scikit-image >= 0.17.0 from skimage.registration._phase_cross_correlation import _upsampled_dft except ModuleNotFoundError: from skimage.feature.register_translation import _upsampled_dft from hyperspy.defaults_parser import preferences from hyperspy.external.progressbar import progressbar from hyperspy.misc.math_tools import symmetrize, antisymmetrize, optimal_fft_size from hyperspy.signal import BaseSignal from hyperspy._signals.lazy import LazySignal from hyperspy._signals.common_signal2d import CommonSignal2D from hyperspy.signal_tools import PeaksFinder2D from hyperspy.docstrings.plot import ( BASE_PLOT_DOCSTRING, PLOT2D_DOCSTRING, KWARGS_DOCSTRING) from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG from hyperspy.ui_registry import DISPLAY_DT, TOOLKIT_DT from hyperspy.utils.peakfinders2D import ( find_local_max, find_peaks_max, find_peaks_minmax, find_peaks_zaefferer, find_peaks_stat, find_peaks_log, find_peaks_dog, find_peaks_xc) _logger = logging.getLogger(__name__) def shift_image(im, shift=0, interpolation_order=1, fill_value=np.nan): if np.any(shift): fractional, integral = np.modf(shift) if fractional.any(): order = interpolation_order else: # Disable interpolation order = 0 return ndimage.shift(im, shift, cval=fill_value, order=order) else: return im def triu_indices_minus_diag(n): """Returns the indices for the upper-triangle of an (n, n) array excluding its diagonal Parameters ---------- n : int The length of the square array """ ti = np.triu_indices(n) isnotdiag = ti[0] != ti[1] return ti[0][isnotdiag], ti[1][isnotdiag] def hanning2d(M, N): """ A 2D hanning window created by outer product. """ return np.outer(np.hanning(M), np.hanning(N)) def sobel_filter(im): sx = ndimage.sobel(im, axis=0, mode='constant') sy = ndimage.sobel(im, axis=1, mode='constant') sob = np.hypot(sx, sy) return sob def fft_correlation(in1, in2, normalize=False, real_only=False): """Correlation of two N-dimensional arrays using FFT. Adapted from scipy's fftconvolve. Parameters ---------- in1, in2 : array Input arrays to convolve. normalize: bool, default False If True performs phase correlation. real_only : bool, default False If True, and in1 and in2 are real-valued inputs, uses rfft instead of fft for approx. 2x speed-up. """ s1 = np.array(in1.shape) s2 = np.array(in2.shape) size = s1 + s2 - 1 # Calculate optimal FFT size complex_result = (in1.dtype.kind == 'c' or in2.dtype.kind == 'c') fsize = [optimal_fft_size(a, not complex_result) for a in size] # For real-valued inputs, rfftn is ~2x faster than fftn if not complex_result and real_only: fft_f, ifft_f = np.fft.rfftn, np.fft.irfftn else: fft_f, ifft_f = np.fft.fftn, np.fft.ifftn fprod = fft_f(in1, fsize) fprod *= fft_f(in2, fsize).conjugate() if normalize is True: fprod = np.nan_to_num(fprod / np.absolute(fprod)) ret = ifft_f(fprod).real.copy() return ret, fprod def estimate_image_shift(ref, image, roi=None, sobel=True, medfilter=True, hanning=True, plot=False, dtype='float', normalize_corr=False, sub_pixel_factor=1, return_maxval=True): """Estimate the shift in a image using phase correlation This method can only estimate the shift by comparing bidimensional features that should not change the position in the given axis. To decrease the memory usage, the time of computation and the accuracy of the results it is convenient to select a region of interest by setting the roi keyword. Parameters ---------- ref : 2D numpy.ndarray Reference image image : 2D numpy.ndarray Image to register roi : tuple of ints (top, bottom, left, right) Define the region of interest sobel : bool apply a sobel filter for edge enhancement medfilter : bool apply a median filter for noise reduction hanning : bool Apply a 2d hanning filter plot : bool or matplotlib.Figure If True, plots the images after applying the filters and the phase correlation. If a figure instance, the images will be plotted to the given figure. reference : 'current' or 'cascade' If 'current' (default) the image at the current coordinates is taken as reference. If 'cascade' each image is aligned with the previous one. dtype : str or dtype Typecode or data-type in which the calculations must be performed. normalize_corr : bool If True use phase correlation instead of standard correlation sub_pixel_factor : float Estimate shifts with a sub-pixel accuracy of 1/sub_pixel_factor parts of a pixel. Default is 1, i.e. no sub-pixel accuracy. Returns ------- shifts: np.array containing the estimate shifts max_value : float The maximum value of the correlation Notes ----- The statistical analysis approach to the translation estimation when using reference='stat' roughly follows [*]_ . If you use it please cite their article. References ---------- .. [*] Bernhard Schaffer, Werner Grogger and Gerald Kothleitner. โ€œAutomated Spatial Drift Correction for EFTEM Image Series.โ€ Ultramicroscopy 102, no. 1 (December 2004): 27โ€“36. """ ref, image = da.compute(ref, image) # Make a copy of the images to avoid modifying them ref = ref.copy().astype(dtype) image = image.copy().astype(dtype) if roi is not None: top, bottom, left, right = roi else: top, bottom, left, right = [None, ] * 4 # Select region of interest ref = ref[top:bottom, left:right] image = image[top:bottom, left:right] # Apply filters for im in (ref, image): if hanning is True: im *= hanning2d(*im.shape) if medfilter is True: # This is faster than sp.signal.med_filt, # which was the previous implementation. # The size is fixed at 3 to be consistent # with the previous implementation. im[:] = sp.ndimage.median_filter(im, size=3) if sobel is True: im[:] = sobel_filter(im) # If sub-pixel alignment not being done, use faster real-valued fft real_only = (sub_pixel_factor == 1) phase_correlation, image_product = fft_correlation( ref, image, normalize=normalize_corr, real_only=real_only) # Estimate the shift by getting the coordinates of the maximum argmax = np.unravel_index(np.argmax(phase_correlation), phase_correlation.shape) threshold = (phase_correlation.shape[0] / 2 - 1, phase_correlation.shape[1] / 2 - 1) shift0 = argmax[0] if argmax[0] < threshold[0] else \ argmax[0] - phase_correlation.shape[0] shift1 = argmax[1] if argmax[1] < threshold[1] else \ argmax[1] - phase_correlation.shape[1] max_val = phase_correlation.real.max() shifts = np.array((shift0, shift1)) # The following code is more or less copied from # skimage.feature.register_feature, to gain access to the maximum value: if sub_pixel_factor != 1: # Initial shift estimate in upsampled grid shifts = np.round(shifts * sub_pixel_factor) / sub_pixel_factor upsampled_region_size = np.ceil(sub_pixel_factor * 1.5) # Center of output array at dftshift + 1 dftshift = np.fix(upsampled_region_size / 2.0) sub_pixel_factor = np.array(sub_pixel_factor, dtype=np.float64) normalization = (image_product.size * sub_pixel_factor ** 2) # Matrix multiply DFT around the current shift estimate sample_region_offset = dftshift - shifts * sub_pixel_factor correlation = _upsampled_dft(image_product.conj(), upsampled_region_size, sub_pixel_factor, sample_region_offset).conj() correlation /= normalization # Locate maximum and map back to original pixel grid maxima = np.array(np.unravel_index( np.argmax(np.abs(correlation)), correlation.shape), dtype=np.float64) maxima -= dftshift shifts = shifts + maxima / sub_pixel_factor max_val = correlation.real.max() # Plot on demand if plot is True or isinstance(plot, plt.Figure): if isinstance(plot, plt.Figure): fig = plot axarr = plot.axes if len(axarr) < 3: for i in range(3): fig.add_subplot(1, 3, i + 1) axarr = fig.axes else: fig, axarr = plt.subplots(1, 3) full_plot = len(axarr[0].images) == 0 if full_plot: axarr[0].set_title('Reference') axarr[1].set_title('Image') axarr[2].set_title('Phase correlation') axarr[0].imshow(ref) axarr[1].imshow(image) d = (np.array(phase_correlation.shape) - 1) // 2 extent = [-d[1], d[1], -d[0], d[0]] axarr[2].imshow(np.fft.fftshift(phase_correlation), extent=extent) plt.show() else: axarr[0].images[0].set_data(ref) axarr[1].images[0].set_data(image) axarr[2].images[0].set_data(np.fft.fftshift(phase_correlation)) # TODO: Renormalize images fig.canvas.draw_idle() # Liberate the memory. It is specially necessary if it is a # memory map del ref del image if return_maxval: return -shifts, max_val else: return -shifts class Signal2D(BaseSignal, CommonSignal2D): """ """ _signal_dimension = 2 _lazy = False def __init__(self, *args, **kw): super().__init__(*args, **kw) if self.axes_manager.signal_dimension != 2: self.axes_manager.set_signal_dimension(2) def plot(self, colorbar=True, scalebar=True, scalebar_color="white", axes_ticks=None, axes_off=False, saturated_pixels=None, vmin=None, vmax=None, gamma=1.0, no_nans=False, centre_colormap="auto", min_aspect=0.1, **kwargs ): """%s %s %s """ super(Signal2D, self).plot( colorbar=colorbar, scalebar=scalebar, scalebar_color=scalebar_color, axes_ticks=axes_ticks, axes_off=axes_off, saturated_pixels=saturated_pixels, vmin=vmin, vmax=vmax, gamma=gamma, no_nans=no_nans, centre_colormap=centre_colormap, min_aspect=min_aspect, **kwargs ) plot.__doc__ %= (BASE_PLOT_DOCSTRING, PLOT2D_DOCSTRING, KWARGS_DOCSTRING) def create_model(self, dictionary=None): """Create a model for the current signal Parameters ---------- dictionary : {None, dict}, optional A dictionary to be used to recreate a model. Usually generated using :meth:`hyperspy.model.as_dictionary` Returns ------- A Model class """ from hyperspy.models.model2d import Model2D return Model2D(self, dictionary=dictionary) def estimate_shift2D(self, reference='current', correlation_threshold=None, chunk_size=30, roi=None, normalize_corr=False, sobel=True, medfilter=True, hanning=True, plot=False, dtype='float', show_progressbar=None, sub_pixel_factor=1): """Estimate the shifts in an image using phase correlation. This method can only estimate the shift by comparing bi-dimensional features that should not change position between frames. To decrease the memory usage, the time of computation and the accuracy of the results it is convenient to select a region of interest by setting the ``roi`` argument. Parameters ---------- reference : {'current', 'cascade' ,'stat'} If 'current' (default) the image at the current coordinates is taken as reference. If 'cascade' each image is aligned with the previous one. If 'stat' the translation of every image with all the rest is estimated and by performing statistical analysis on the result the translation is estimated. correlation_threshold : {None, 'auto', float} This parameter is only relevant when reference='stat'. If float, the shift estimations with a maximum correlation value lower than the given value are not used to compute the estimated shifts. If 'auto' the threshold is calculated automatically as the minimum maximum correlation value of the automatically selected reference image. chunk_size : {None, int} If int and reference='stat' the number of images used as reference are limited to the given value. roi : tuple of ints or floats (left, right, top, bottom) Define the region of interest. If int(float) the position is given axis index(value). Note that ROIs can be used in place of a tuple. normalize_corr : bool, default False If True, use phase correlation to align the images, otherwise use cross correlation. sobel : bool, default True Apply a Sobel filter for edge enhancement medfilter : bool, default True Apply a median filter for noise reduction hanning : bool, default True Apply a 2D hanning filter plot : bool or 'reuse' If True plots the images after applying the filters and the phase correlation. If 'reuse', it will also plot the images, but it will only use one figure, and continuously update the images in that figure as it progresses through the stack. dtype : str or dtype Typecode or data-type in which the calculations must be performed. %s sub_pixel_factor : float Estimate shifts with a sub-pixel accuracy of 1/sub_pixel_factor parts of a pixel. Default is 1, i.e. no sub-pixel accuracy. Returns ------- shifts : list of array List of estimated shifts Notes ----- The statistical analysis approach to the translation estimation when using ``reference='stat'`` roughly follows [Schaffer2004]_. If you use it please cite their article. References ---------- .. [Schaffer2004] Schaffer, Bernhard, Werner Grogger, and Gerald Kothleitner. โ€œAutomated Spatial Drift Correction for EFTEM Image Series.โ€ Ultramicroscopy 102, no. 1 (December 2004): 27โ€“36. See Also -------- * :py:meth:`~._signals.signal2d.Signal2D.align2D` """ if show_progressbar is None: show_progressbar = preferences.General.show_progressbar self._check_signal_dimension_equals_two() if roi is not None: # Get the indices of the roi yaxis = self.axes_manager.signal_axes[1] xaxis = self.axes_manager.signal_axes[0] roi = tuple([xaxis._get_index(i) for i in roi[2:]] + [yaxis._get_index(i) for i in roi[:2]]) ref = None if reference == 'cascade' else \ self.__call__().copy() shifts = [] nrows = None images_number = self.axes_manager._max_index + 1 if plot == 'reuse': # Reuse figure for plots plot = plt.figure() if reference == 'stat': nrows = images_number if chunk_size is None else \ min(images_number, chunk_size) pcarray = ma.zeros((nrows, self.axes_manager._max_index + 1, ), dtype=np.dtype([('max_value', np.float), ('shift', np.int32, (2,))])) nshift, max_value = estimate_image_shift( self(), self(), roi=roi, sobel=sobel, medfilter=medfilter, hanning=hanning, normalize_corr=normalize_corr, plot=plot, dtype=dtype, sub_pixel_factor=sub_pixel_factor) np.fill_diagonal(pcarray['max_value'], max_value) pbar_max = nrows * images_number else: pbar_max = images_number # Main iteration loop. Fills the rows of pcarray when reference # is stat with progressbar(total=pbar_max, disable=not show_progressbar, leave=True) as pbar: for i1, im in enumerate(self._iterate_signal()): if reference in ['current', 'cascade']: if ref is None: ref = im.copy() shift = np.array([0, 0]) nshift, max_val = estimate_image_shift( ref, im, roi=roi, sobel=sobel, medfilter=medfilter, hanning=hanning, plot=plot, normalize_corr=normalize_corr, dtype=dtype, sub_pixel_factor=sub_pixel_factor) if reference == 'cascade': shift += nshift ref = im.copy() else: shift = nshift shifts.append(shift.copy()) pbar.update(1) elif reference == 'stat': if i1 == nrows: break # Iterate to fill the columns of pcarray for i2, im2 in enumerate( self._iterate_signal()): if i2 > i1: nshift, max_value = estimate_image_shift( im, im2, roi=roi, sobel=sobel, medfilter=medfilter, hanning=hanning, normalize_corr=normalize_corr, plot=plot, dtype=dtype, sub_pixel_factor=sub_pixel_factor) pcarray[i1, i2] = max_value, nshift del im2 pbar.update(1) del im if reference == 'stat': # Select the reference image as the one that has the # higher max_value in the row sqpcarr = pcarray[:, :nrows] sqpcarr['max_value'][:] = symmetrize(sqpcarr['max_value']) sqpcarr['shift'][:] = antisymmetrize(sqpcarr['shift']) ref_index = np.argmax(pcarray['max_value'].min(1)) self.ref_index = ref_index shifts = (pcarray['shift'] + pcarray['shift'][ref_index, :nrows][:, np.newaxis]) if correlation_threshold is not None: if correlation_threshold == 'auto': correlation_threshold = \ (pcarray['max_value'].min(0)).max() _logger.info("Correlation threshold = %1.2f", correlation_threshold) shifts[pcarray['max_value'] < correlation_threshold] = ma.masked shifts.mask[ref_index, :] = False shifts = shifts.mean(0) else: shifts = np.array(shifts) del ref return shifts estimate_shift2D.__doc__ %= SHOW_PROGRESSBAR_ARG def align2D( self, crop=True, fill_value=np.nan, shifts=None, expand=False, interpolation_order=1, show_progressbar=None, parallel=None, max_workers=None, **kwargs, ): """Align the images in-place using :py:func:`scipy.ndimage.shift`. The images can be aligned using either user-provided shifts or by first estimating the shifts. See :py:meth:`~._signals.signal2d.Signal2D.estimate_shift2D` for more details on estimating image shifts. Parameters ---------- crop : bool If True, the data will be cropped not to include regions with missing data fill_value : int, float, nan The areas with missing data are filled with the given value. Default is nan. shifts : None or list of tuples If None the shifts are estimated using :py:meth:`~._signals.signal2D.estimate_shift2D`. expand : bool If True, the data will be expanded to fit all data after alignment. Overrides `crop`. interpolation_order: int, default 1. The order of the spline interpolation. Default is 1, linear interpolation. %s %s %s **kwargs : Keyword arguments passed to :py:meth:`~._signals.signal2d.Signal2D.estimate_shift2D` Returns ------- shifts : np.array The estimated shifts are returned only if ``shifts`` is None See Also -------- * :py:meth:`~._signals.signal2d.Signal2D.estimate_shift2D` """ self._check_signal_dimension_equals_two() return_shifts = False if shifts is None: shifts = self.estimate_shift2D(**kwargs) return_shifts = True if not np.any(shifts): warnings.warn( "The estimated shifts are all zero, suggesting " "the images are already aligned", UserWarning, ) return shifts elif not np.any(shifts): warnings.warn( "The provided shifts are all zero, no alignment done", UserWarning, ) return None if expand: # Expand to fit all valid data left, right = ( int(np.floor(shifts[:, 1].min())) if shifts[:, 1].min() < 0 else 0, int(np.ceil(shifts[:, 1].max())) if shifts[:, 1].max() > 0 else 0, ) top, bottom = ( int(np.floor(shifts[:, 0].min())) if shifts[:, 0].min() < 0 else 0, int(np.ceil(shifts[:, 0].max())) if shifts[:, 0].max() > 0 else 0, ) xaxis = self.axes_manager.signal_axes[0] yaxis = self.axes_manager.signal_axes[1] padding = [] for i in range(self.data.ndim): if i == xaxis.index_in_array: padding.append((right, -left)) elif i == yaxis.index_in_array: padding.append((bottom, -top)) else: padding.append((0, 0)) self.data = np.pad( self.data, padding, mode="constant", constant_values=(fill_value,) ) if left < 0: xaxis.offset += left * xaxis.scale if np.any((left < 0, right > 0)): xaxis.size += right - left if top < 0: yaxis.offset += top * yaxis.scale if np.any((top < 0, bottom > 0)): yaxis.size += bottom - top # Translate, with sub-pixel precision if necesary, # note that we operate in-place here self._map_iterate( shift_image, iterating_kwargs=(("shift", -shifts),), show_progressbar=show_progressbar, parallel=parallel, max_workers=max_workers, ragged=False, inplace=True, fill_value=fill_value, interpolation_order=interpolation_order, ) if crop and not expand: max_shift = np.max(shifts, axis=0) - np.min(shifts, axis=0) if np.any(max_shift >= np.array(self.axes_manager.signal_shape)): raise ValueError("Shift outside range of signal axes. Cannot crop signal.") # Crop the image to the valid size shifts = -shifts bottom, top = ( int(np.floor(shifts[:, 0].min())) if shifts[:, 0].min() < 0 else None, int(np.ceil(shifts[:, 0].max())) if shifts[:, 0].max() > 0 else 0, ) right, left = ( int(np.floor(shifts[:, 1].min())) if shifts[:, 1].min() < 0 else None, int(np.ceil(shifts[:, 1].max())) if shifts[:, 1].max() > 0 else 0, ) self.crop_image(top, bottom, left, right) shifts = -shifts self.events.data_changed.trigger(obj=self) if return_shifts: return shifts align2D.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG) def crop_image(self, top=None, bottom=None, left=None, right=None, convert_units=False): """Crops an image in place. Parameters ---------- top, bottom, left, right : {int | float} If int the values are taken as indices. If float the values are converted to indices. convert_units : bool Default is False If True, convert the signal units using the 'convert_to_units' method of the `axes_manager`. If False, does nothing. See also -------- crop """ self._check_signal_dimension_equals_two() self.crop(self.axes_manager.signal_axes[1].index_in_axes_manager, top, bottom) self.crop(self.axes_manager.signal_axes[0].index_in_axes_manager, left, right) if convert_units: self.axes_manager.convert_units('signal') def add_ramp(self, ramp_x, ramp_y, offset=0): """Add a linear ramp to the signal. Parameters ---------- ramp_x: float Slope of the ramp in x-direction. ramp_y: float Slope of the ramp in y-direction. offset: float, optional Offset of the ramp at the signal fulcrum. Notes ----- The fulcrum of the linear ramp is at the origin and the slopes are given in units of the axis with the according scale taken into account. Both are available via the `axes_manager` of the signal. """ yy, xx = np.indices(self.axes_manager._signal_shape_in_array) if self._lazy: import dask.array as da ramp = offset * da.ones(self.data.shape, dtype=self.data.dtype, chunks=self.data.chunks) else: ramp = offset * np.ones(self.data.shape, dtype=self.data.dtype) ramp += ramp_x * xx ramp += ramp_y * yy self.data += ramp def find_peaks(self, method='local_max', interactive=True, current_index=False, show_progressbar=None, parallel=None, max_workers=None, display=True, toolkit=None, **kwargs): """Find peaks in a 2D signal. Function to locate the positive peaks in an image using various, user specified, methods. Returns a structured array containing the peak positions. Parameters ---------- method : str Select peak finding algorithm to implement. Available methods are: * 'local_max' - simple local maximum search using the :py:func:`skimage.feature.peak_local_max` function * 'max' - simple local maximum search using the :py:func:`~hyperspy.utils.peakfinders2D.find_peaks_max`. * 'minmax' - finds peaks by comparing maximum filter results with minimum filter, calculates centers of mass. See the :py:func:`~hyperspy.utils.peakfinders2D.find_peaks_minmax` function. * 'zaefferer' - based on gradient thresholding and refinement by local region of interest optimisation. See the :py:func:`~hyperspy.utils.peakfinders2D.find_peaks_zaefferer` function. * 'stat' - based on statistical refinement and difference with respect to mean intensity. See the :py:func:`~hyperspy.utils.peakfinders2D.find_peaks_stat` function. * 'laplacian_of_gaussian' - a blob finder using the laplacian of Gaussian matrices approach. See the :py:func:`~hyperspy.utils.peakfinders2D.find_peaks_log` function. * 'difference_of_gaussian' - a blob finder using the difference of Gaussian matrices approach. See the :py:func:`~hyperspy.utils.peakfinders2D.find_peaks_log` function. * 'template_matching' - A cross correlation peakfinder. This method requires providing a template with the ``template`` parameter, which is used as reference pattern to perform the template matching to the signal. It uses the :py:func:`skimage.feature.match_template` function and the peaks position are obtained by using `minmax` method on the template matching result. interactive : bool If True, the method parameter can be adjusted interactively. If False, the results will be returned. current_index : bool if True, the computation will be performed for the current index. %s %s %s %s %s **kwargs : dict Keywords parameters associated with above methods, see the documentation of each method for more details. Notes ----- As a convenience, the 'local_max' method accepts the 'distance' and 'threshold' argument, which will be map to the 'min_distance' and 'threshold_abs' of the :py:func:`skimage.feature.peak_local_max` function. Returns ------- peaks : :py:class:`~hyperspy.signal.BaseSignal` or numpy.ndarray if current_index=True Array of shape `_navigation_shape_in_array` in which each cell contains an array with dimensions (npeaks, 2) that contains the `x, y` pixel coordinates of peaks found in each image sorted first along `y` and then along `x`. """ method_dict = { 'local_max': find_local_max, 'max': find_peaks_max, 'minmax': find_peaks_minmax, 'zaefferer': find_peaks_zaefferer, 'stat': find_peaks_stat, 'laplacian_of_gaussian': find_peaks_log, 'difference_of_gaussian': find_peaks_dog, 'template_matching' : find_peaks_xc, } # As a convenience, we map 'distance' to 'min_distance' and # 'threshold' to 'threshold_abs' when using the 'local_max' method to # match with the arguments of skimage.feature.peak_local_max. if method == 'local_max': if 'distance' in kwargs.keys(): kwargs['min_distance'] = kwargs.pop('distance') if 'threshold' in kwargs.keys(): kwargs['threshold_abs'] = kwargs.pop('threshold') if method in method_dict.keys(): method_func = method_dict[method] else: raise NotImplementedError(f"The method `{method}` is not " "implemented. See documentation for " "available implementations.") if interactive: # Create a peaks signal with the same navigation shape as a # placeholder for the output axes_dict = self.axes_manager._get_axes_dicts( self.axes_manager.navigation_axes) peaks = BaseSignal(np.empty(self.axes_manager.navigation_shape), axes=axes_dict) pf2D = PeaksFinder2D(self, method=method, peaks=peaks, **kwargs) pf2D.gui(display=display, toolkit=toolkit) elif current_index: peaks = method_func(self.__call__(), **kwargs) else: peaks = self.map(method_func, show_progressbar=show_progressbar, parallel=parallel, inplace=False, ragged=True, max_workers=max_workers, **kwargs) return peaks find_peaks.__doc__ %= (SHOW_PROGRESSBAR_ARG, PARALLEL_ARG, MAX_WORKERS_ARG, DISPLAY_DT, TOOLKIT_DT) class LazySignal2D(LazySignal, Signal2D): _lazy = True def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
gpl-3.0
webdesignll/coin
contrib/pyminer/pyminer.py
1
6434
#!/usr/bin/python # # Copyright (c) 2011 The Bitcoin developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # import time import json import pprint import hashlib import struct import re import base64 import httplib import sys from multiprocessing import Process ERR_SLEEP = 15 MAX_NONCE = 1000000L settings = {} pp = pprint.PrettyPrinter(indent=4) class BitcoinRPC: OBJID = 1 def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) self.authhdr = "Basic %s" % (base64.b64encode(authpair)) self.conn = httplib.HTTPConnection(host, port, False, 30) def rpc(self, method, params=None): self.OBJID += 1 obj = { 'version' : '1.1', 'method' : method, 'id' : self.OBJID } if params is None: obj['params'] = [] else: obj['params'] = params self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) resp = self.conn.getresponse() if resp is None: print "JSON-RPC: no response" return None body = resp.read() resp_obj = json.loads(body) if resp_obj is None: print "JSON-RPC: cannot JSON-decode body" return None if 'error' in resp_obj and resp_obj['error'] != None: return resp_obj['error'] if 'result' not in resp_obj: print "JSON-RPC: no result in object" return None return resp_obj['result'] def getblockcount(self): return self.rpc('getblockcount') def getwork(self, data=None): return self.rpc('getwork', data) def uint32(x): return x & 0xffffffffL def bytereverse(x): return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): word = struct.unpack('@I', in_buf[i:i+4])[0] out_words.append(struct.pack('@I', bytereverse(word))) return ''.join(out_words) def wordreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): out_words.append(in_buf[i:i+4]) out_words.reverse() return ''.join(out_words) class Miner: def __init__(self, id): self.id = id self.max_nonce = MAX_NONCE def work(self, datastr, targetstr): # decode work data hex string to binary static_data = datastr.decode('hex') static_data = bufreverse(static_data) # the first 76b of 80b do not change blk_hdr = static_data[:76] # decode 256-bit target value targetbin = targetstr.decode('hex') targetbin = targetbin[::-1] # byte-swap and dword-swap targetbin_str = targetbin.encode('hex') target = long(targetbin_str, 16) # pre-hash first 76b of block header static_hash = hashlib.sha256() static_hash.update(blk_hdr) for nonce in xrange(self.max_nonce): # encode 32-bit nonce value nonce_bin = struct.pack("<I", nonce) # hash final 4b, the nonce value hash1_o = static_hash.copy() hash1_o.update(nonce_bin) hash1 = hash1_o.digest() # sha256 hash of sha256 hash hash_o = hashlib.sha256() hash_o.update(hash1) hash = hash_o.digest() # quick test for winning solution: high 32 bits zero? if hash[-4:] != '\0\0\0\0': continue # convert binary hash to 256-bit Python long hash = bufreverse(hash) hash = wordreverse(hash) hash_str = hash.encode('hex') l = long(hash_str, 16) # proof-of-work test: hash < target if l < target: print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,) return (nonce + 1, nonce_bin) else: print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,) # return (nonce + 1, nonce_bin) return (nonce + 1, None) def submit_work(self, rpc, original_data, nonce_bin): nonce_bin = bufreverse(nonce_bin) nonce = nonce_bin.encode('hex') solution = original_data[:152] + nonce + original_data[160:256] param_arr = [ solution ] result = rpc.getwork(param_arr) print time.asctime(), "--> Upstream RPC result:", result def iterate(self, rpc): work = rpc.getwork() if work is None: time.sleep(ERR_SLEEP) return if 'data' not in work or 'target' not in work: time.sleep(ERR_SLEEP) return time_start = time.time() (hashes_done, nonce_bin) = self.work(work['data'], work['target']) time_end = time.time() time_diff = time_end - time_start self.max_nonce = long( (hashes_done * settings['scantime']) / time_diff) if self.max_nonce > 0xfffffffaL: self.max_nonce = 0xfffffffaL if settings['hashmeter']: print "HashMeter(%d): %d hashes, %.2f Khash/sec" % ( self.id, hashes_done, (hashes_done / 1000.0) / time_diff) if nonce_bin is not None: self.submit_work(rpc, work['data'], nonce_bin) def loop(self): rpc = BitcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpass']) if rpc is None: return while True: self.iterate(rpc) def miner_thread(id): miner = Miner(id) miner.loop() if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: pyminer.py CONFIG-FILE" sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 9394 if 'threads' not in settings: settings['threads'] = 1 if 'hashmeter' not in settings: settings['hashmeter'] = 0 if 'scantime' not in settings: settings['scantime'] = 30L if 'rpcuser' not in settings or 'rpcpass' not in settings: print "Missing username and/or password in cfg file" sys.exit(1) settings['port'] = int(settings['port']) settings['threads'] = int(settings['threads']) settings['hashmeter'] = int(settings['hashmeter']) settings['scantime'] = long(settings['scantime']) thr_list = [] for thr_id in range(settings['threads']): p = Process(target=miner_thread, args=(thr_id,)) p.start() thr_list.append(p) time.sleep(1) # stagger threads print settings['threads'], "mining threads started" print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port']) try: for thr_proc in thr_list: thr_proc.join() except KeyboardInterrupt: pass print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
mit
MrSurly/micropython-esp32
tests/extmod/vfs_fat_ramdisk.py
20
2181
try: import uerrno try: import uos_vfs as uos except ImportError: import uos except ImportError: print("SKIP") raise SystemExit try: uos.VfsFat except AttributeError: print("SKIP") raise SystemExit class RAMFS: SEC_SIZE = 512 def __init__(self, blocks): self.data = bytearray(blocks * self.SEC_SIZE) def readblocks(self, n, buf): #print("readblocks(%s, %x(%d))" % (n, id(buf), len(buf))) for i in range(len(buf)): buf[i] = self.data[n * self.SEC_SIZE + i] def writeblocks(self, n, buf): #print("writeblocks(%s, %x)" % (n, id(buf))) for i in range(len(buf)): self.data[n * self.SEC_SIZE + i] = buf[i] def ioctl(self, op, arg): #print("ioctl(%d, %r)" % (op, arg)) if op == 4: # BP_IOCTL_SEC_COUNT return len(self.data) // self.SEC_SIZE if op == 5: # BP_IOCTL_SEC_SIZE return self.SEC_SIZE try: bdev = RAMFS(50) except MemoryError: print("SKIP") raise SystemExit uos.VfsFat.mkfs(bdev) print(b"FOO_FILETXT" not in bdev.data) print(b"hello!" not in bdev.data) vfs = uos.VfsFat(bdev) uos.mount(vfs, "/ramdisk") print("statvfs:", vfs.statvfs("/ramdisk")) print("getcwd:", vfs.getcwd()) try: vfs.stat("no_file.txt") except OSError as e: print(e.args[0] == uerrno.ENOENT) with vfs.open("foo_file.txt", "w") as f: f.write("hello!") print(list(vfs.ilistdir())) print("stat root:", vfs.stat("/")) print("stat file:", vfs.stat("foo_file.txt")[:-3]) # timestamps differ across runs print(b"FOO_FILETXT" in bdev.data) print(b"hello!" in bdev.data) vfs.mkdir("foo_dir") vfs.chdir("foo_dir") print("getcwd:", vfs.getcwd()) print(list(vfs.ilistdir())) with vfs.open("sub_file.txt", "w") as f: f.write("subdir file") try: vfs.chdir("sub_file.txt") except OSError as e: print(e.args[0] == uerrno.ENOENT) vfs.chdir("..") print("getcwd:", vfs.getcwd()) uos.umount(vfs) vfs = uos.VfsFat(bdev) print(list(vfs.ilistdir(b""))) # list a non-existent directory try: vfs.ilistdir(b"no_exist") except OSError as e: print('ENOENT:', e.args[0] == uerrno.ENOENT)
mit
peterbe/peekaboo
vendor-local/lib/python/sorl/thumbnail/engines/convert_engine.py
5
4948
from __future__ import with_statement import re import os from django.utils.datastructures import SortedDict from django.utils.encoding import smart_str from sorl.thumbnail.base import EXTENSIONS from sorl.thumbnail.conf import settings from sorl.thumbnail.engines.base import EngineBase from subprocess import Popen, PIPE from tempfile import mkstemp size_re = re.compile(r'^(?:.+) (?:[A-Z]+) (?P<x>\d+)x(?P<y>\d+)') class Engine(EngineBase): """ Image object is a dict with source path, options and size """ def write(self, image, options, thumbnail): """ Writes the thumbnail image """ handle, out = mkstemp(suffix='.%s' % EXTENSIONS[options['format']]) if ( options['format'] == 'JPEG' and options.get('progressive', settings.THUMBNAIL_PROGRESSIVE) ): image['options']['interlace'] = 'line' image['options']['quality'] = options['quality'] args = settings.THUMBNAIL_CONVERT.split(' ') args.append(image['source']) for k, v in image['options'].iteritems(): args.append('-%s' % k) if v is not None: args.append('%s' % v) args.append(out) args = map(smart_str, args) p = Popen(args) p.wait() with open(out, 'r') as fp: thumbnail.write(fp.read()) os.close(handle) os.remove(out) os.remove(image['source']) # we should not need this now def get_image(self, source): """ Returns the backend image objects from a ImageFile instance """ handle, tmp = mkstemp() with open(tmp, 'w') as fp: fp.write(source.read()) os.close(handle) return {'source': tmp, 'options': SortedDict(), 'size': None} def get_image_size(self, image): """ Returns the image width and height as a tuple """ if image['size'] is None: args = settings.THUMBNAIL_IDENTIFY.split(' ') args.append(image['source']) p = Popen(args, stdout=PIPE) p.wait() m = size_re.match(p.stdout.read()) image['size'] = int(m.group('x')), int(m.group('y')) return image['size'] def is_valid_image(self, raw_data): """ This is not very good for imagemagick because it will say anything is valid that it can use as input. """ handle, tmp = mkstemp() with open(tmp, 'w') as fp: fp.write(raw_data) fp.flush() args = settings.THUMBNAIL_IDENTIFY.split(' ') args.append(tmp) p = Popen(args) retcode = p.wait() os.close(handle) os.remove(tmp) return retcode == 0 def _orientation(self, image): if settings.THUMBNAIL_CONVERT.endswith('gm convert'): args = settings.THUMBNAIL_IDENTIFY.split() args.extend([ '-format', '%[exif:orientation]', image['source'] ]) p = Popen(args, stdout=PIPE) p.wait() result = p.stdout.read().strip() if result: result = int(result) options = image['options'] if result == 2: options['flop'] = None elif result == 3: options['rotate'] = '180' elif result == 4: options['flip'] = None elif result == 5: options['rotate'] = '90' options['flop'] = None elif result == 6: options['rotate'] = '90' elif result == 7: options['rotate'] = '-90' options['flop'] = None elif result == 8: options['rotate'] = '-90' else: # ImageMagick also corrects the orientation exif data for # destination image['options']['auto-orient'] = None return image def _colorspace(self, image, colorspace): """ `Valid colorspaces <http://www.graphicsmagick.org/GraphicsMagick.html#details-colorspace>`_. Backends need to implement the following:: RGB, GRAY """ image['options']['colorspace'] = colorspace return image def _crop(self, image, width, height, x_offset, y_offset): """ Crops the image """ image['options']['crop'] = '%sx%s+%s+%s' % ( width, height, x_offset, y_offset ) image['size'] = (width, height) # update image size return image def _scale(self, image, width, height): """ Does the resizing of the image """ image['options']['scale'] = '%sx%s!' % (width, height) image['size'] = (width, height) # update image size return image
mpl-2.0
ewels/MultiQC_OSXApp
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euckrprober.py
2931
1675
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCKRDistributionAnalysis from .mbcssm import EUCKRSMModel class EUCKRProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(EUCKRSMModel) self._mDistributionAnalyzer = EUCKRDistributionAnalysis() self.reset() def get_charset_name(self): return "EUC-KR"
mit
prattcmp/OpenJournal
JournalController.py
1
1314
from Journal import Journal, db from datetime import date, timedelta from PyQt5.QtCore import QTimer import time class JournalController(): def __init__(self): self.viewID = Journal.get(date = date.today()).id db.connect() if not Journal.table_exists(): Journal.create_table(True) self.create() def reset(self): self.create() def create(self): try: self.journal = Journal.get(date = date.today()) except Journal.DoesNotExist: self.journal = Journal.create(text = "", date = date.today()) def update(self, text): self.journal.text = text self.journal.save() def forward(self): try: journals = Journal.select().where(Journal.id > self.viewID).order_by(Journal.date.asc()) self.viewID = journals[0].id return journals[0] except IndexError: return False def back(self): try: journals = Journal.select().where(Journal.id < self.viewID).order_by(Journal.date.desc()) self.viewID = journals[0].id return journals[0] except IndexError: return False def get(self, date = date.today()): return Journal.get(date = date)
mit
Evfro/polara
polara/lib/sparse.py
1
9203
import sys from concurrent.futures import ThreadPoolExecutor from concurrent.futures import as_completed import numpy as np from numpy import power from scipy.sparse import csr_matrix from scipy.sparse import diags from scipy.sparse.linalg import norm as spnorm from numba import jit, njit, guvectorize, prange from numba import float64 as f8 from numba import intp as ip from polara.recommender import defaults tuplsize = sys.getsizeof(()) itemsize = np.dtype(np.intp).itemsize pntrsize = sys.getsizeof(1.0) # size of list of tuples of indices - to estimate when to convert sparse matrix to dense # based on http://stackoverflow.com/questions/15641344/python-memory-consumption-dict-vs-list-of-tuples # and https://code.tutsplus.com/tutorials/understand-how-much-memory-your-python-objects-use--cms-25609 def get_nnz_max(): return int(defaults.memory_hard_limit * (1024**3) / (tuplsize + 2*(pntrsize + itemsize))) def check_sparsity(matrix, nnz_coef=0.5, tocsr=False): if matrix.nnz > nnz_coef * matrix.shape[0] * matrix.shape[1]: return matrix.toarray(order='C') if tocsr: return matrix.tocsr() return matrix def sparse_dot(left_mat, right_mat, dense_output=False, tocsr=False): # scipy always returns sparse result, even if dot product is dense # this function offers solution to this problem # it also takes care on sparse result w.r.t. to further processing if dense_output: # calculate dense result directly # TODO matmat multiplication instead of iteration with matvec res_type = np.result_type(right_mat.dtype, left_mat.dtype) result = np.empty((left_mat.shape[0], right_mat.shape[1]), dtype=res_type) for i in range(left_mat.shape[0]): v = left_mat.getrow(i) result[i, :] = csc_matvec(right_mat, v, dense_output=True, dtype=res_type) else: result = left_mat.dot(right_mat.T) # NOTE even though not neccessary for symmetric i2i matrix, # transpose helps to avoid expensive conversion to CSR (performed by scipy) if result.nnz > get_nnz_max(): # too many nnz lead to undesired memory overhead in downvote_seen_items result = result.toarray() # not using order='C' as it may consume memory else: result = check_sparsity(result, tocsr=tocsr) return result def inner_product_at(target='parallel', **kwargs): @guvectorize([ 'f4[:,:], f4[:,:], i4[:], i4[:], f4[:]', 'f4[:,:], f4[:,:], i8[:], i8[:], f4[:]', 'f8[:,:], f8[:,:], i4[:], i4[:], f8[:]', 'f8[:,:], f8[:,:], i8[:], i8[:], f8[:]'], '(i,k),(j,k),(),()->()', target=target, nopython=True, **kwargs) def inner_product_at_wrapped(u, v, ui, vi, res): rank = v.shape[1] tmp = 0 for f in range(rank): tmp += u[ui[0], f] * v[vi[0], f] res[0] = tmp return inner_product_at_wrapped # roughly equivalent to # @njit(parallel=True) # def inner_product_at(u, v, uidx, vidx): # size = len(uidx) # res = np.empty(size) # rank = v.shape[1] # for k in prange(size): # i = uidx[k] # j = vidx[k] # tmp = 0 # for f in range(rank): # tmp += u[i, f] * v[j, f] # res[k] = tmp # return res # matvec implementation is based on # http://stackoverflow.com/questions/18595981/improving-performance-of-multiplication-of-scipy-sparse-matrices @njit(nogil=True) def matvec2dense(m_ptr, m_ind, m_val, v_nnz, v_val, out): l = len(v_nnz) for j in range(l): col_start = v_nnz[j] col_end = col_start + 1 ind_start = m_ptr[col_start] ind_end = m_ptr[col_end] if ind_start != ind_end: out[m_ind[ind_start:ind_end]] += m_val[ind_start:ind_end] * v_val[j] @njit(nogil=True) def matvec2sparse(m_ptr, m_ind, m_val, v_nnz, v_val, sizes, indices, data): l = len(sizes) - 1 for j in range(l): col_start = v_nnz[j] col_end = col_start + 1 ind_start = m_ptr[col_start] ind_end = m_ptr[col_end] data_start = sizes[j] data_end = sizes[j+1] if ind_start != ind_end: indices[data_start:data_end] = m_ind[ind_start:ind_end] data[data_start:data_end] = m_val[ind_start:ind_end] * v_val[j] def csc_matvec(mat_csc, vec, dense_output=True, dtype=None): v_nnz = vec.indices v_val = vec.data m_val = mat_csc.data m_ind = mat_csc.indices m_ptr = mat_csc.indptr res_dtype = dtype or np.result_type(mat_csc.dtype, vec.dtype) if dense_output: res = np.zeros((mat_csc.shape[0],), dtype=res_dtype) matvec2dense(m_ptr, m_ind, m_val, v_nnz, v_val, res) else: sizes = m_ptr.take(v_nnz+1) - m_ptr.take(v_nnz) sizes = np.concatenate(([0], np.cumsum(sizes))) n = sizes[-1] data = np.empty((n,), dtype=res_dtype) indices = np.empty((n,), dtype=np.intp) indptr = np.array([0, n], dtype=np.intp) matvec2sparse(m_ptr, m_ind, m_val, v_nnz, v_val, sizes, indices, data) res = csr_matrix((data, indices, indptr), shape=(1, mat_csc.shape[0]), dtype=res_dtype) res.sum_duplicates() # expensive operation return res @njit def _blockify(ind, ptr, major_dim): # convenient function to compute only diagonal # elements of the product of 2 matrices; # indices must be intp in order to avoid overflow # major_dim is shape[0] for csc format and shape[1] for csr format n = len(ptr) - 1 for i in range(1, n): #first row/col is unchanged lind = ptr[i] rind = ptr[i+1] for j in range(lind, rind): shift_ind = i * major_dim ind[j] += shift_ind def row_unblockify(mat, block_size): # only for CSR matrices factor = (mat.indices // block_size) * block_size mat.indices -= factor mat._shape = (mat.shape[0], block_size) def row_blockify(mat, block_size): # only for CSR matrices _blockify(mat.indices, mat.indptr, block_size) mat._shape = (mat.shape[0], block_size*mat.shape[0]) def inverse_permutation(p): s = np.empty(p.size, p.dtype) s[p] = np.arange(p.size) return s def unfold_tensor_coordinates(index, shape, mode): # TODO implement direct calculation w/o intermediate flattening modes = [m for m in [0, 1, 2] if m != mode] + [mode,] mode_shape = tuple(shape[m] for m in modes) mode_index = tuple(index[m] for m in modes) flat_index = np.ravel_multi_index(mode_index, mode_shape) unfold_shape = (mode_shape[0]*mode_shape[1], mode_shape[2]) unfold_index = np.unravel_index(flat_index, unfold_shape) return unfold_index, unfold_shape def tensor_outer_at(vtarget, **kwargs): @guvectorize(['void(float64[:], float64[:, :], float64[:, :], intp[:], intp[:], float64[:, :])'], '(),(i,m),(j,n),(),()->(m,n)', target=vtarget, nopython=True, **kwargs) def tensor_outer_wrapped(val, v, w, i, j, res): r1 = v.shape[1] r2 = w.shape[1] for m in range(r1): for n in range(r2): res[m, n] = val[0] * v[i[0], m] * w[j[0], n] return tensor_outer_wrapped @njit(nogil=True) def dttm_seq(idx, val, u, v, mode0, mode1, mode2, res): new_shape1 = u.shape[1] new_shape2 = v.shape[1] for i in range(len(val)): i0 = idx[i, mode0] i1 = idx[i, mode1] i2 = idx[i, mode2] vv = val[i] for j in range(new_shape1): uij = u[i1, j] for k in range(new_shape2): vik = v[i2, k] res[i0, j, k] += vv * uij * vik @njit(parallel=True) def dttm_par(idx, val, mat1, mat2, mode1, mode2, unqs, inds, res): r1 = mat1.shape[1] r2 = mat2.shape[1] n = len(unqs) for s in prange(n): i0 = unqs[s] ul = inds[s] for pos in ul: i1 = idx[pos, mode1] i2 = idx[pos, mode2] vp = val[pos] for j1 in range(r1): for j2 in range(r2): res[i0, j1, j2] += vp * mat1[i1, j1] * mat2[i2, j2] # @jit(parallel=True) # numba up to v0.41.dev only supports the 1st argument # https://numba.pydata.org/numba-doc/dev/reference/numpysupported.html def arrange_index(array): unqs, unq_inv, unq_cnt = np.unique(array, return_inverse=True, return_counts=True) inds = np.split(np.argsort(unq_inv), np.cumsum(unq_cnt[:-1])) return unqs, inds def arrange_indices(idx, mode_mask=None): n = idx.shape[1] res = [[]]*n if mode_mask is None: mode_mask = [True] * n if sum(mode_mask) == 0: return res if sum(mode_mask) == 1: mode, = [i for i, x in enumerate(mode_mask) if x] res[mode] = arrange_index(idx[:, mode]) return res with ThreadPoolExecutor(max_workers=sum(mode_mask)) as executor: arranged_futures = {executor.submit(arrange_index, idx[:, mode]): mode for mode in range(n) if mode_mask[mode]} for future in as_completed(arranged_futures): mode = arranged_futures[future] res[mode] = future.result() return res
mit
redbear/micropython
tests/wipy/pin.py
65
4862
""" This test need a set of pins which can be set as inputs and have no external pull up or pull down connected. GP12 and GP17 must be connected together """ from machine import Pin import os mch = os.uname().machine if 'LaunchPad' in mch: pin_map = ['GP24', 'GP12', 'GP14', 'GP15', 'GP16', 'GP17', 'GP28', 'GP8', 'GP6', 'GP30', 'GP31', 'GP3', 'GP0', 'GP4', 'GP5'] max_af_idx = 15 elif 'WiPy' in mch: pin_map = ['GP23', 'GP24', 'GP12', 'GP13', 'GP14', 'GP9', 'GP17', 'GP28', 'GP22', 'GP8', 'GP30', 'GP31', 'GP0', 'GP4', 'GP5'] max_af_idx = 15 else: raise Exception('Board not supported!') # test initial value p = Pin('GP12', Pin.IN) Pin('GP17', Pin.OUT, value=1) print(p() == 1) Pin('GP17', Pin.OUT, value=0) print(p() == 0) def test_noinit(): for p in pin_map: pin = Pin(p) pin.value() def test_pin_read(pull): # enable the pull resistor on all pins, then read the value for p in pin_map: pin = Pin(p, mode=Pin.IN, pull=pull) for p in pin_map: print(pin()) def test_pin_af(): for p in pin_map: for af in Pin(p).alt_list(): if af[1] <= max_af_idx: Pin(p, mode=Pin.ALT, alt=af[1]) Pin(p, mode=Pin.ALT_OPEN_DRAIN, alt=af[1]) # test un-initialized pins test_noinit() # test with pull-up and pull-down test_pin_read(Pin.PULL_UP) test_pin_read(Pin.PULL_DOWN) # test all constructor combinations pin = Pin(pin_map[0]) pin = Pin(pin_map[0], mode=Pin.IN) pin = Pin(pin_map[0], mode=Pin.OUT) pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.PULL_DOWN) pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.PULL_UP) pin = Pin(pin_map[0], mode=Pin.OPEN_DRAIN, pull=Pin.PULL_UP) pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_DOWN) pin = Pin(pin_map[0], mode=Pin.OUT, pull=None) pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP) pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.LOW_POWER) pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.MED_POWER) pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.HIGH_POWER) pin = Pin(pin_map[0], mode=Pin.OUT, drive=pin.LOW_POWER) pin = Pin(pin_map[0], Pin.OUT, Pin.PULL_DOWN) pin = Pin(pin_map[0], Pin.ALT, Pin.PULL_UP) pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP) test_pin_af() # try the entire af range on all pins # test pin init and printing pin = Pin(pin_map[0]) pin.init(mode=Pin.IN) print(pin) pin.init(Pin.IN, Pin.PULL_DOWN) print(pin) pin.init(mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.LOW_POWER) print(pin) pin.init(mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.HIGH_POWER) print(pin) # test value in OUT mode pin = Pin(pin_map[0], mode=Pin.OUT) pin.value(0) pin.toggle() # test toggle print(pin()) pin.toggle() # test toggle again print(pin()) # test different value settings pin(1) print(pin.value()) pin(0) print(pin.value()) pin.value(1) print(pin()) pin.value(0) print(pin()) # test all getters and setters pin = Pin(pin_map[0], mode=Pin.OUT) # mode print(pin.mode() == Pin.OUT) pin.mode(Pin.IN) print(pin.mode() == Pin.IN) # pull pin.pull(None) print(pin.pull() == None) pin.pull(Pin.PULL_DOWN) print(pin.pull() == Pin.PULL_DOWN) # drive pin.drive(Pin.MED_POWER) print(pin.drive() == Pin.MED_POWER) pin.drive(Pin.HIGH_POWER) print(pin.drive() == Pin.HIGH_POWER) # id print(pin.id() == pin_map[0]) # all the next ones MUST raise try: pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.IN) # incorrect drive value except Exception: print('Exception') try: pin = Pin(pin_map[0], mode=Pin.LOW_POWER, pull=Pin.PULL_UP) # incorrect mode value except Exception: print('Exception') try: pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.HIGH_POWER) # incorrect pull value except Exception: print('Exception') try: pin = Pin('A0', Pin.OUT, Pin.PULL_DOWN) # incorrect pin id except Exception: print('Exception') try: pin = Pin(pin_map[0], Pin.IN, Pin.PULL_UP, alt=0) # af specified in GPIO mode except Exception: print('Exception') try: pin = Pin(pin_map[0], Pin.OUT, Pin.PULL_UP, alt=7) # af specified in GPIO mode except Exception: print('Exception') try: pin = Pin(pin_map[0], Pin.ALT, Pin.PULL_UP, alt=0) # incorrect af except Exception: print('Exception') try: pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP, alt=-1) # incorrect af except Exception: print('Exception') try: pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP, alt=16) # incorrect af except Exception: print('Exception') try: pin.mode(Pin.PULL_UP) # incorrect pin mode except Exception: print('Exception') try: pin.pull(Pin.OUT) # incorrect pull except Exception: print('Exception') try: pin.drive(Pin.IN) # incorrect drive strength except Exception: print('Exception') try: pin.id('ABC') # id cannot be set except Exception: print('Exception')
mit
microy/MeshToolkit
MeshToolkit/Tool/Statistics.py
2
1149
# -*- coding:utf-8 -*- # # Provide functions to compute different statistics on an array of values # # External dependencies import numpy as np # Print statictics of the given values def Statistics( values ) : # Compute the statistics of the given values stats = list() stats.append( ('Minimum', np.amin( values ) ) ) stats.append( ('Maximum', np.amax( values ) ) ) stats.append( ('Mean', np.mean( values ) ) ) stats.append( ('Median', np.median( values ) ) ) stats.append( ('Deviation', np.std( values ) ) ) stats.append( ('Variance', np.var( values ) ) ) # Print the stats print( 'Statistics...' ) for s in stats : print( '{:>14} : {:>15.5f}'.format( *s ) ) # Print a histogram of the given values def Histogram( values, bins = 20 ) : # Compute histogram hist, bin_edges = np.histogram( values, bins ) # Get the contribution percentage of each bin total = hist.astype( np.float ) / hist.sum() # Print the histogram in the console print( 'Histogram...' ) for i in range( bins ) : print( '{:>14.2f} | {:60} |'.format( bin_edges[i], '_' * int(total[i] * 60) ) ) print( '{:>14.2f} | {:60} |'.format( bin_edges[bins], '' ) )
mit
lgarren/spack
var/spack/repos/builtin/packages/libdrm/package.py
3
2264
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * import sys class Libdrm(Package): """A userspace library for accessing the DRM, direct rendering manager, on Linux, BSD and other systems supporting the ioctl interface.""" homepage = "http://dri.freedesktop.org/libdrm/" url = "http://dri.freedesktop.org/libdrm/libdrm-2.4.59.tar.gz" version('2.4.81', 'dc575dd661a082390e9f1366ca5734b0') version('2.4.75', '743c16109d91a2539dfc9cc56130d695') version('2.4.70', 'a8c275bce5f3d71a5ca25e8fb60df084') version('2.4.59', '105ac7af1afcd742d402ca7b4eb168b6') version('2.4.33', '86e4e3debe7087d5404461e0032231c8') depends_on('pkg-config@0.9.0:', type='build') depends_on('libpciaccess@0.10:', when=(sys.platform != 'darwin')) depends_on('libpthread-stubs') def install(self, spec, prefix): configure('--prefix={0}'.format(prefix), '--enable-static', 'LIBS=-lrt') # This fixes a bug with `make check` make() make('check') make('install')
lgpl-2.1
SnakeJenny/TensorFlow
tensorflow/compiler/tests/conv2d_test.py
43
17758
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Conv2D via the XLA JIT. The canned results in these tests are created by running each test using the Tensorflow CPU device and saving the output. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.compiler.tests.xla_test import XLATestCase from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import nn_impl from tensorflow.python.ops import nn_ops from tensorflow.python.platform import googletest class Conv2DTest(XLATestCase): def _VerifyValues(self, input_sizes, filter_sizes, stride, padding, expected): """Tests that tf.nn.conv2d produces the expected value. Args: input_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. stride: Stride. padding: Padding type. expected: Expected output. """ total_size_1 = np.prod(input_sizes) total_size_2 = np.prod(filter_sizes) x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes) x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(filter_sizes) strides = [1, stride, stride, 1] with self.test_session() as sess: with self.test_scope(): t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes) t2 = array_ops.placeholder(dtypes.float32, shape=filter_sizes) out = nn_ops.conv2d( t1, t2, strides=strides, padding=padding, data_format="NHWC") value = sess.run(out, {t1: x1, t2: x2}) self.assertArrayNear(expected, np.ravel(value), 1e-3) def testConv2D1x1Filter(self): expected_output = [ 30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0, 204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0 ] self._VerifyValues( input_sizes=[1, 2, 3, 3], filter_sizes=[1, 1, 3, 3], stride=1, padding="VALID", expected=expected_output) def testConv2D2x2Filter(self): expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0] self._VerifyValues( input_sizes=[1, 2, 3, 3], filter_sizes=[2, 2, 3, 3], stride=1, padding="VALID", expected=expected_output) def testConv2D1x2Filter(self): expected_output = [ 231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0, 936.0, 1029.0 ] self._VerifyValues( input_sizes=[1, 2, 3, 3], filter_sizes=[1, 2, 3, 3], stride=1, padding="VALID", expected=expected_output) def testConv2D2x2FilterStride2(self): expected_output = [2271.0, 2367.0, 2463.0] self._VerifyValues( input_sizes=[1, 2, 3, 3], filter_sizes=[2, 2, 3, 3], stride=2, padding="VALID", expected=expected_output) def testConv2D2x2FilterStride2Same(self): expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0] self._VerifyValues( input_sizes=[1, 2, 3, 3], filter_sizes=[2, 2, 3, 3], stride=2, padding="SAME", expected=expected_output) class Conv2DBackpropInputTest(XLATestCase): def _VerifyValues(self, input_sizes, filter_sizes, out_backprop_sizes, stride, padding, expected): """Tests that gen_nn_ops.conv2d_backprop_input produces the expected output. Args: input_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. out_backprop_sizes: Output gradients tensor dimensions. stride: Stride. padding: Padding type. expected: Expected output. """ total_size_1 = np.prod(filter_sizes) total_size_2 = np.prod(out_backprop_sizes) x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(filter_sizes) x2 = np.arange( 1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes) strides = [1, stride, stride, 1] with self.test_session() as sess: with self.test_scope(): t1 = array_ops.placeholder(dtypes.float32, shape=filter_sizes) t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes) out = gen_nn_ops.conv2d_backprop_input( input_sizes=input_sizes, filter=t1, out_backprop=t2, strides=strides, padding=padding, data_format="NHWC") value = sess.run(out, {t1: x1, t2: x2}) self.assertArrayNear(expected, np.ravel(value), 1e-3) def testConv2D1x1Filter(self): expected_output = [ 5, 11, 17, 11, 25, 39, 17, 39, 61, 23, 53, 83, 29, 67, 105, 35, 81, 127, 41, 95, 149, 47, 109, 171, 53, 123, 193, 59, 137, 215, 65, 151, 237, 71, 165, 259, 77, 179, 281, 83, 193, 303, 89, 207, 325, 95, 221, 347. ] self._VerifyValues( input_sizes=[1, 4, 4, 3], filter_sizes=[1, 1, 3, 2], out_backprop_sizes=[1, 4, 4, 2], stride=1, padding="VALID", expected=expected_output) def testConv2D1x2FilterStride3Width5(self): expected_output = [1, 2, 0, 2, 4] self._VerifyValues( input_sizes=[1, 1, 5, 1], filter_sizes=[1, 2, 1, 1], out_backprop_sizes=[1, 1, 2, 1], stride=3, padding="VALID", expected=expected_output) def testConv2D1x2FilterStride3Width6(self): expected_output = [1, 2, 0, 2, 4, 0] self._VerifyValues( input_sizes=[1, 1, 6, 1], filter_sizes=[1, 2, 1, 1], out_backprop_sizes=[1, 1, 2, 1], stride=3, padding="VALID", expected=expected_output) def testConv2D1x2FilterStride3Width7(self): expected_output = [1, 2, 0, 2, 4, 0, 0] self._VerifyValues( input_sizes=[1, 1, 7, 1], filter_sizes=[1, 2, 1, 1], out_backprop_sizes=[1, 1, 2, 1], stride=3, padding="VALID", expected=expected_output) def testConv2D2x2FilterC1Same(self): expected_output = [1, 4, 7, 7, 23, 33] self._VerifyValues( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], out_backprop_sizes=[1, 2, 3, 1], stride=1, padding="SAME", expected=expected_output) def testConv2D2x2Filter(self): expected_output = [ 14, 32, 50, 100, 163, 226, 167, 212, 257, 122, 140, 158, 478, 541, 604, 437, 482, 527 ] self._VerifyValues( input_sizes=[1, 2, 3, 3], filter_sizes=[2, 2, 3, 3], out_backprop_sizes=[1, 1, 2, 3], stride=1, padding="VALID", expected=expected_output) def testConv2D2x2FilterSame(self): expected_output = [ 14, 32, 50, 100, 163, 226, 217, 334, 451, 190, 307, 424, 929, 1217, 1505, 1487, 1883, 2279 ] self._VerifyValues( input_sizes=[1, 2, 3, 3], filter_sizes=[2, 2, 3, 3], out_backprop_sizes=[1, 2, 3, 3], stride=1, padding="SAME", expected=expected_output) def testConv2D1x2Filter(self): expected_output = [1, 4, 4, 3, 10, 8, 5, 16, 12] self._VerifyValues( input_sizes=[1, 3, 3, 1], filter_sizes=[1, 2, 1, 1], out_backprop_sizes=[1, 3, 2, 1], stride=1, padding="VALID", expected=expected_output) def testConv2D1x2FilterSame(self): expected_output = [1, 4, 7, 4, 13, 16, 7, 22, 25] self._VerifyValues( input_sizes=[1, 3, 3, 1], filter_sizes=[1, 2, 1, 1], out_backprop_sizes=[1, 3, 3, 1], stride=1, padding="SAME", expected=expected_output) def testConv2D2x2FilterStride2(self): expected_output = [1, 2, 5, 4, 6, 0, 0, 0, 0, 0, 3, 6, 13, 8, 12] self._VerifyValues( input_sizes=[1, 3, 5, 1], filter_sizes=[1, 3, 1, 1], out_backprop_sizes=[1, 2, 2, 1], stride=2, padding="VALID", expected=expected_output) def testConv2D2x2FilterStride2Same(self): expected_output = [1, 2, 2, 3, 4, 6] self._VerifyValues( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], out_backprop_sizes=[1, 1, 2, 1], stride=2, padding="SAME", expected=expected_output) class Conv2DBackpropFilterTest(XLATestCase): def _VerifyValues(self, input_sizes, filter_sizes, out_backprop_sizes, stride, padding, expected): """Tests that gen_nn_ops.conv2d_backprop_filter produces the right output. Args: input_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. out_backprop_sizes: Output gradients tensor dimensions. stride: Stride. padding: Padding type. expected: Expected output. """ total_size_1 = np.prod(input_sizes) total_size_2 = np.prod(out_backprop_sizes) x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes) x2 = np.arange( 1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes) strides = [1, stride, stride, 1] with self.test_session() as sess: with self.test_scope(): t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes) t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes) tensor = gen_nn_ops.conv2d_backprop_filter( input=t1, filter_sizes=filter_sizes, out_backprop=t2, strides=strides, padding=padding, data_format="NHWC") value = sess.run(tensor, {t1: x1, t2: x2}) self.assertArrayNear(expected, np.ravel(value), 1e-3) def testConv2D1x1Filter(self): expected_output = [8056, 8432, 8312, 8704, 8568, 8976] self._VerifyValues( input_sizes=[1, 4, 4, 3], filter_sizes=[1, 1, 3, 2], out_backprop_sizes=[1, 4, 4, 2], stride=1, padding="VALID", expected=expected_output) def testConv2D1x2Filter(self): expected_output = [120, 141] self._VerifyValues( input_sizes=[1, 3, 3, 1], filter_sizes=[1, 2, 1, 1], out_backprop_sizes=[1, 3, 2, 1], stride=1, padding="VALID", expected=expected_output) def testConv2D2x2FilterDepth1(self): expected_output = [5, 8, 14, 17] self._VerifyValues( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], out_backprop_sizes=[1, 1, 2, 1], stride=1, padding="VALID", expected=expected_output) def testConv2D2x2Filter(self): expected_output = [ 17, 22, 27, 22, 29, 36, 27, 36, 45, 32, 43, 54, 37, 50, 63, 42, 57, 72, 62, 85, 108, 67, 92, 117, 72, 99, 126, 77, 106, 135, 82, 113, 144, 87, 120, 153 ] self._VerifyValues( input_sizes=[1, 2, 3, 3], filter_sizes=[2, 2, 3, 3], out_backprop_sizes=[1, 1, 2, 3], stride=1, padding="VALID", expected=expected_output) def testConv2D1x2FilterStride3Width5(self): expected_output = [9, 12] self._VerifyValues( input_sizes=[1, 1, 5, 1], filter_sizes=[1, 2, 1, 1], out_backprop_sizes=[1, 1, 2, 1], stride=3, padding="VALID", expected=expected_output) def testConv2D1x2FilterStride3Width6(self): expected_output = [9, 12] self._VerifyValues( input_sizes=[1, 1, 6, 1], filter_sizes=[1, 2, 1, 1], out_backprop_sizes=[1, 1, 2, 1], stride=3, padding="VALID", expected=expected_output) def testConv2D1x2FilterStride3Width7(self): expected_output = [9, 12] self._VerifyValues( input_sizes=[1, 1, 7, 1], filter_sizes=[1, 2, 1, 1], out_backprop_sizes=[1, 1, 2, 1], stride=3, padding="VALID", expected=expected_output) def testConv2D1x3Filter(self): expected_output = [5, 8, 11] self._VerifyValues( input_sizes=[1, 1, 4, 1], filter_sizes=[1, 3, 1, 1], out_backprop_sizes=[1, 1, 2, 1], stride=1, padding="VALID", expected=expected_output) def testConv2D1x3FilterSame(self): expected_output = [20, 30, 20] self._VerifyValues( input_sizes=[1, 1, 4, 1], filter_sizes=[1, 3, 1, 1], out_backprop_sizes=[1, 1, 4, 1], stride=1, padding="SAME", expected=expected_output) def testConv2D1x3FilterSameOutbackprop2(self): expected_output = [7, 10, 3] self._VerifyValues( input_sizes=[1, 1, 4, 1], filter_sizes=[1, 3, 1, 1], out_backprop_sizes=[1, 1, 2, 1], stride=2, padding="SAME", expected=expected_output) def testConv2D2x2FilterC1Same(self): expected_output = [91, 58, 32, 17] self._VerifyValues( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], out_backprop_sizes=[1, 2, 3, 1], stride=1, padding="SAME", expected=expected_output) def testConv2D2x2FilterStride2(self): expected_output = [92, 102, 112] self._VerifyValues( input_sizes=[1, 3, 5, 1], filter_sizes=[1, 3, 1, 1], out_backprop_sizes=[1, 2, 2, 1], stride=2, padding="VALID", expected=expected_output) def testConv2D2x2FilterStride2Same(self): expected_output = [7, 2, 16, 5] self._VerifyValues( input_sizes=[1, 2, 3, 1], filter_sizes=[2, 2, 1, 1], out_backprop_sizes=[1, 1, 2, 1], stride=2, padding="SAME", expected=expected_output) class DepthwiseConv2DTest(XLATestCase): CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0" def ConfigsToTest(self): input_sizes = [[4, 35, 35, 2], [4, 147, 147, 2], [3, 299, 299, 3], [5, 183, 183, 1]] filter_sizes = [[5, 5, 2, 1], [3, 3, 2, 8], [2, 2, 3, 8], [5, 5, 1, 2]] strides = [1, 3, 2, 2] # pylint: disable=invalid-name VALID = "VALID" SAME = "SAME" # pylint: enable=invalid-name paddings = [SAME, VALID, SAME, SAME, SAME] for i, f, s, p in zip(input_sizes, filter_sizes, strides, paddings): yield i, f, s, p def _VerifyValues(self, input_size, filter_size, stride, padding): imag = np.random.rand(*input_size).astype(np.float32) filt = np.random.rand(*filter_size).astype(np.float32) strides = [1, stride, stride, 1] with self.test_session(): with self.test_scope(): imag_ph = array_ops.placeholder(dtypes.float32, shape=input_size) filt_ph = array_ops.placeholder(dtypes.float32, shape=filter_size) feed_dict = {imag_ph: imag, filt_ph: filt} xla_out = nn_impl.depthwise_conv2d(imag_ph, filt_ph, strides, padding).eval(feed_dict=feed_dict) with self.test_session(): with ops.device(self.CPU_DEVICE): imag_ph = array_ops.placeholder(dtypes.float32, shape=input_size) filt_ph = array_ops.placeholder(dtypes.float32, shape=filter_size) feed_dict = {imag_ph: imag, filt_ph: filt} cpu_out = nn_impl.depthwise_conv2d(imag_ph, filt_ph, strides, padding).eval(feed_dict=feed_dict) self.assertAllClose(xla_out, cpu_out) # This is disabled because we need a mechanism to set command-line flags, # i.e. an implementation of SetCommandLineOption() below. # # def _VerifyDummy(self, input_size, filter_size, stride, padding): # imag = np.random.rand(*input_size).astype(np.float32) # filt = np.random.rand(*filter_size).astype(np.float32) # strides = [1, stride, stride, 1] # # with self.test_session(): # with self.test_scope(): # imag_ph = tf.placeholder(tf.float32, shape=input_size) # filt_ph = tf.placeholder(tf.float32, shape=filter_size) # feed_dict = {imag_ph: imag, filt_ph: filt} # SetCommandLineOption( # "tf_tla_depthwise_conv2d_custom_func", # "DummyDepthwiseConv2dKernel") # xla_out = tf.nn.depthwise_conv2d( # imag_ph, filt_ph, strides, padding).eval(feed_dict=feed_dict) # SetCommandLineOption( # "tf_tla_depthwise_conv2d_custom_func", "") # # expected = np.array(range(np.ravel(xla_out).shape[0]), dtype=np.float32) # self.assertAllClose(np.ravel(xla_out), expected) def testBasic(self): for i, f, s, p in self.ConfigsToTest(): self._VerifyValues(i, f, s, p) # Test disabled until _VerifyDummy(), above can be implemented. # def testCustomFunc(self): # if self.has_custom_call: # for i, f, s, p in self.ConfigsToTest(): # self._VerifyDummy(i, f, s, p) if __name__ == "__main__": googletest.main()
apache-2.0
borosnborea/SwordGO_app
example/kivymap/.buildozer/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/__init__.py
838
1384
''' Debian and other distributions "unbundle" requests' vendored dependencies, and rewrite all imports to use the global versions of ``urllib3`` and ``chardet``. The problem with this is that not only requests itself imports those dependencies, but third-party code outside of the distros' control too. In reaction to these problems, the distro maintainers replaced ``requests.packages`` with a magical "stub module" that imports the correct modules. The implementations were varying in quality and all had severe problems. For example, a symlink (or hardlink) that links the correct modules into place introduces problems regarding object identity, since you now have two modules in `sys.modules` with the same API, but different identities:: requests.packages.urllib3 is not urllib3 With version ``2.5.2``, requests started to maintain its own stub, so that distro-specific breakage would be reduced to a minimum, even though the whole issue is not requests' fault in the first place. See https://github.com/kennethreitz/requests/pull/2375 for the corresponding pull request. ''' from __future__ import absolute_import import sys try: from . import urllib3 except ImportError: import urllib3 sys.modules['%s.urllib3' % __name__] = urllib3 try: from . import chardet except ImportError: import chardet sys.modules['%s.chardet' % __name__] = chardet
gpl-3.0
ulrikdb/linux
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
12980
5411
# SchedGui.py - Python extension for perf script, basic GUI code for # traces drawing and overview. # # Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com> # # This software is distributed under the terms of the GNU General # Public License ("GPL") version 2 as published by the Free Software # Foundation. try: import wx except ImportError: raise ImportError, "You need to install the wxpython lib for this script" class RootFrame(wx.Frame): Y_OFFSET = 100 RECT_HEIGHT = 100 RECT_SPACE = 50 EVENT_MARKING_WIDTH = 5 def __init__(self, sched_tracer, title, parent = None, id = -1): wx.Frame.__init__(self, parent, id, title) (self.screen_width, self.screen_height) = wx.GetDisplaySize() self.screen_width -= 10 self.screen_height -= 10 self.zoom = 0.5 self.scroll_scale = 20 self.sched_tracer = sched_tracer self.sched_tracer.set_root_win(self) (self.ts_start, self.ts_end) = sched_tracer.interval() self.update_width_virtual() self.nr_rects = sched_tracer.nr_rectangles() + 1 self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) # whole window panel self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height)) # scrollable container self.scroll = wx.ScrolledWindow(self.panel) self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale) self.scroll.EnableScrolling(True, True) self.scroll.SetFocus() # scrollable drawing area self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2)) self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint) self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Bind(wx.EVT_PAINT, self.on_paint) self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press) self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down) self.scroll.Fit() self.Fit() self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING) self.txt = None self.Show(True) def us_to_px(self, val): return val / (10 ** 3) * self.zoom def px_to_us(self, val): return (val / self.zoom) * (10 ** 3) def scroll_start(self): (x, y) = self.scroll.GetViewStart() return (x * self.scroll_scale, y * self.scroll_scale) def scroll_start_us(self): (x, y) = self.scroll_start() return self.px_to_us(x) def paint_rectangle_zone(self, nr, color, top_color, start, end): offset_px = self.us_to_px(start - self.ts_start) width_px = self.us_to_px(end - self.ts_start) offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)) width_py = RootFrame.RECT_HEIGHT dc = self.dc if top_color is not None: (r, g, b) = top_color top_color = wx.Colour(r, g, b) brush = wx.Brush(top_color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH) width_py -= RootFrame.EVENT_MARKING_WIDTH offset_py += RootFrame.EVENT_MARKING_WIDTH (r ,g, b) = color color = wx.Colour(r, g, b) brush = wx.Brush(color, wx.SOLID) dc.SetBrush(brush) dc.DrawRectangle(offset_px, offset_py, width_px, width_py) def update_rectangles(self, dc, start, end): start += self.ts_start end += self.ts_start self.sched_tracer.fill_zone(start, end) def on_paint(self, event): dc = wx.PaintDC(self.scroll_panel) self.dc = dc width = min(self.width_virtual, self.screen_width) (x, y) = self.scroll_start() start = self.px_to_us(x) end = self.px_to_us(x + width) self.update_rectangles(dc, start, end) def rect_from_ypixel(self, y): y -= RootFrame.Y_OFFSET rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE) if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT: return -1 return rect def update_summary(self, txt): if self.txt: self.txt.Destroy() self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50)) def on_mouse_down(self, event): (x, y) = event.GetPositionTuple() rect = self.rect_from_ypixel(y) if rect == -1: return t = self.px_to_us(x) + self.ts_start self.sched_tracer.mouse_down(rect, t) def update_width_virtual(self): self.width_virtual = self.us_to_px(self.ts_end - self.ts_start) def __zoom(self, x): self.update_width_virtual() (xpos, ypos) = self.scroll.GetViewStart() xpos = self.us_to_px(x) / self.scroll_scale self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos) self.Refresh() def zoom_in(self): x = self.scroll_start_us() self.zoom *= 2 self.__zoom(x) def zoom_out(self): x = self.scroll_start_us() self.zoom /= 2 self.__zoom(x) def on_key_press(self, event): key = event.GetRawKeyCode() if key == ord("+"): self.zoom_in() return if key == ord("-"): self.zoom_out() return key = event.GetKeyCode() (x, y) = self.scroll.GetViewStart() if key == wx.WXK_RIGHT: self.scroll.Scroll(x + 1, y) elif key == wx.WXK_LEFT: self.scroll.Scroll(x - 1, y) elif key == wx.WXK_DOWN: self.scroll.Scroll(x, y + 1) elif key == wx.WXK_UP: self.scroll.Scroll(x, y - 1)
gpl-2.0
Dellware78/mtasa-blue
vendor/google-breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/reflection.py
260
5864
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # http://code.google.com/p/protobuf/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # This code is meant to work on Python 2.4 and above only. """Contains a metaclass and helper functions used to create protocol message classes from Descriptor objects at runtime. Recall that a metaclass is the "type" of a class. (A class is to a metaclass what an instance is to a class.) In this case, we use the GeneratedProtocolMessageType metaclass to inject all the useful functionality into the classes output by the protocol compiler at compile-time. The upshot of all this is that the real implementation details for ALL pure-Python protocol buffers are *here in this file*. """ __author__ = 'robinson@google.com (Will Robinson)' from google.protobuf.internal import api_implementation from google.protobuf import descriptor as descriptor_mod _FieldDescriptor = descriptor_mod.FieldDescriptor if api_implementation.Type() == 'cpp': from google.protobuf.internal import cpp_message _NewMessage = cpp_message.NewMessage _InitMessage = cpp_message.InitMessage else: from google.protobuf.internal import python_message _NewMessage = python_message.NewMessage _InitMessage = python_message.InitMessage class GeneratedProtocolMessageType(type): """Metaclass for protocol message classes created at runtime from Descriptors. We add implementations for all methods described in the Message class. We also create properties to allow getting/setting all fields in the protocol message. Finally, we create slots to prevent users from accidentally "setting" nonexistent fields in the protocol message, which then wouldn't get serialized / deserialized properly. The protocol compiler currently uses this metaclass to create protocol message classes at runtime. Clients can also manually create their own classes at runtime, as in this example: mydescriptor = Descriptor(.....) class MyProtoClass(Message): __metaclass__ = GeneratedProtocolMessageType DESCRIPTOR = mydescriptor myproto_instance = MyProtoClass() myproto.foo_field = 23 ... """ # Must be consistent with the protocol-compiler code in # proto2/compiler/internal/generator.*. _DESCRIPTOR_KEY = 'DESCRIPTOR' def __new__(cls, name, bases, dictionary): """Custom allocation for runtime-generated class types. We override __new__ because this is apparently the only place where we can meaningfully set __slots__ on the class we're creating(?). (The interplay between metaclasses and slots is not very well-documented). Args: name: Name of the class (ignored, but required by the metaclass protocol). bases: Base classes of the class we're constructing. (Should be message.Message). We ignore this field, but it's required by the metaclass protocol dictionary: The class dictionary of the class we're constructing. dictionary[_DESCRIPTOR_KEY] must contain a Descriptor object describing this protocol message type. Returns: Newly-allocated class. """ descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] _NewMessage(descriptor, dictionary) superclass = super(GeneratedProtocolMessageType, cls) new_class = superclass.__new__(cls, name, bases, dictionary) setattr(descriptor, '_concrete_class', new_class) return new_class def __init__(cls, name, bases, dictionary): """Here we perform the majority of our work on the class. We add enum getters, an __init__ method, implementations of all Message methods, and properties for all fields in the protocol type. Args: name: Name of the class (ignored, but required by the metaclass protocol). bases: Base classes of the class we're constructing. (Should be message.Message). We ignore this field, but it's required by the metaclass protocol dictionary: The class dictionary of the class we're constructing. dictionary[_DESCRIPTOR_KEY] must contain a Descriptor object describing this protocol message type. """ descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY] _InitMessage(descriptor, cls) superclass = super(GeneratedProtocolMessageType, cls) superclass.__init__(name, bases, dictionary)
gpl-3.0
markredballoon/clivemizen
proto/bootstrap/node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py
899
2768
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import collections import os import gyp import gyp.common import gyp.msvs_emulation import json import sys generator_supports_multiple_toolsets = True generator_wants_static_library_dependencies_adjusted = False generator_default_variables = { } for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']: # Some gyp steps fail if these are empty(!). generator_default_variables[dirname] = 'dir' for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', 'CONFIGURATION_NAME']: generator_default_variables[unused] = '' def CalculateVariables(default_variables, params): generator_flags = params.get('generator_flags', {}) for key, val in generator_flags.items(): default_variables.setdefault(key, val) default_variables.setdefault('OS', gyp.common.GetFlavor(params)) flavor = gyp.common.GetFlavor(params) if flavor =='win': # Copy additional generator configuration data from VS, which is shared # by the Windows Ninja generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) def CalculateGeneratorInputInfo(params): """Calculate the generator specific info that gets fed to input (called by gyp).""" generator_flags = params.get('generator_flags', {}) if generator_flags.get('adjust_static_libraries', False): global generator_wants_static_library_dependencies_adjusted generator_wants_static_library_dependencies_adjusted = True def GenerateOutput(target_list, target_dicts, data, params): # Map of target -> list of targets it depends on. edges = {} # Queue of targets to visit. targets_to_visit = target_list[:] while len(targets_to_visit) > 0: target = targets_to_visit.pop() if target in edges: continue edges[target] = [] for dep in target_dicts[target].get('dependencies', []): edges[target].append(dep) targets_to_visit.append(dep) filename = 'dump.json' f = open(filename, 'w') json.dump(edges, f) f.close() print 'Wrote json to %s.' % filename
gpl-2.0
chitr/neutron
neutron/tests/tempest/common/generator/valid_generator.py
34
2931
# Copyright 2014 Deutsche Telekom AG # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import six import neutron.tests.tempest.common.generator.base_generator as base LOG = logging.getLogger(__name__) class ValidTestGenerator(base.BasicGeneratorSet): @base.generator_type("string") @base.simple_generator def generate_valid_string(self, schema): size = schema.get("minLength", 1) # TODO(dkr mko): handle format and pattern return "x" * size @base.generator_type("integer") @base.simple_generator def generate_valid_integer(self, schema): # TODO(dkr mko): handle multipleOf if "minimum" in schema: minimum = schema["minimum"] if "exclusiveMinimum" not in schema: return minimum else: return minimum + 1 if "maximum" in schema: maximum = schema["maximum"] if "exclusiveMaximum" not in schema: return maximum else: return maximum - 1 return 0 @base.generator_type("object") @base.simple_generator def generate_valid_object(self, schema): obj = {} for k, v in six.iteritems(schema["properties"]): obj[k] = self.generate_valid(v) return obj def generate(self, schema): schema_type = schema["type"] if isinstance(schema_type, list): if "integer" in schema_type: schema_type = "integer" else: raise Exception("non-integer list types not supported") result = [] if schema_type not in self.types_dict: raise TypeError("generator (%s) doesn't support type: %s" % (self.__class__.__name__, schema_type)) for generator in self.types_dict[schema_type]: ret = generator(schema) if ret is not None: if isinstance(ret, list): result.extend(ret) elif isinstance(ret, tuple): result.append(ret) else: raise Exception("generator (%s) returns invalid result: %s" % (generator, ret)) return result def generate_valid(self, schema): return self.generate(schema)[0][1]
apache-2.0
marcoarruda/MissionPlanner
Lib/site-packages/scipy/stats/tests/test_continuous_basic.py
51
14973
import warnings import numpy.testing as npt import numpy as np import nose from scipy import stats """ Test all continuous distributions. Parameters were chosen for those distributions that pass the Kolmogorov-Smirnov test. This provides safe parameters for each distributions so that we can perform further testing of class methods. These tests currently check only/mostly for serious errors and exceptions, not for numerically exact results. TODO: * make functioning test for skew and kurtosis still known failures - skip for now """ #currently not used DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5 DECIMAL_kurt = 0 distcont = [ ['alpha', (3.5704770516650459,)], ['anglit', ()], ['arcsine', ()], ['beta', (2.3098496451481823, 0.62687954300963677)], ['betaprime', (5, 6)], # avoid unbound error in entropy with (100, 86)], ['bradford', (0.29891359763170633,)], ['burr', (10.5, 4.3)], #incorrect mean and var for(0.94839838075366045, 4.3820284068855795)], ['cauchy', ()], ['chi', (78,)], ['chi2', (55,)], ['cosine', ()], ['dgamma', (1.1023326088288166,)], ['dweibull', (2.0685080649914673,)], ['erlang', (20,)], #correction numargs = 1 ['expon', ()], ['exponpow', (2.697119160358469,)], ['exponweib', (2.8923945291034436, 1.9505288745913174)], ['f', (29, 18)], ['fatiguelife', (29,)], #correction numargs = 1 ['fisk', (3.0857548622253179,)], ['foldcauchy', (4.7164673455831894,)], ['foldnorm', (1.9521253373555869,)], ['frechet_l', (3.6279911255583239,)], ['frechet_r', (1.8928171603534227,)], ['gamma', (1.9932305483800778,)], ['gausshyper', (13.763771604130699, 3.1189636648681431, 2.5145980350183019, 5.1811649903971615)], #veryslow ['genexpon', (9.1325976465418908, 16.231956600590632, 3.2819552690843983)], ['genextreme', (-0.1,)], # sample mean test fails for (3.3184017469423535,)], ['gengamma', (4.4162385429431925, 3.1193091679242761)], ['genhalflogistic', (0.77274727809929322,)], ['genlogistic', (0.41192440799679475,)], ['genpareto', (0.1,)], # use case with finite moments ['gilbrat', ()], ['gompertz', (0.94743713075105251,)], ['gumbel_l', ()], ['gumbel_r', ()], ['halfcauchy', ()], ['halflogistic', ()], ['halfnorm', ()], ['hypsecant', ()], ['invgamma', (2.0668996136993067,)], ['invnorm', (0.14546264555347513,)], ['invgauss', (0.14546264555347513,)], ['invweibull', (10.58,)], # sample mean test fails at(0.58847112119264788,)] ['johnsonsb', (4.3172675099141058, 3.1837781130785063)], ['johnsonsu', (2.554395574161155, 2.2482281679651965)], ['ksone', (1000,)], #replace 22 by 100 to avoid failing range, ticket 956 ['kstwobign', ()], ['laplace', ()], ['levy', ()], ['levy_l', ()], # ['levy_stable', (0.35667405469844993, # -0.67450531578494011)], #NotImplementedError # rvs not tested ['loggamma', (0.41411931826052117,)], ['logistic', ()], ['loglaplace', (3.2505926592051435,)], ['lognorm', (0.95368226960575331,)], ['lomax', (1.8771398388773268,)], ['maxwell', ()], ['mielke', (10.4, 3.6)], # sample mean test fails for (4.6420495492121487, 0.59707419545516938)], # mielke: good results if 2nd parameter >2, weird mean or var below ['nakagami', (4.9673794866666237,)], ['ncf', (27, 27, 0.41578441799226107)], ['nct', (14, 0.24045031331198066)], ['ncx2', (21, 1.0560465975116415)], ['norm', ()], ['pareto', (2.621716532144454,)], ['powerlaw', (1.6591133289905851,)], ['powerlognorm', (2.1413923530064087, 0.44639540782048337)], ['powernorm', (4.4453652254590779,)], ['rayleigh', ()], ['rdist', (0.9,)], # feels also slow # ['rdist', (3.8266985793976525,)], #veryslow, especially rvs #['rdist', (541.0,)], # from ticket #758 #veryslow ['recipinvgauss', (0.63004267809369119,)], ['reciprocal', (0.0062309367010521255, 1.0062309367010522)], ['rice', (0.7749725210111873,)], ['semicircular', ()], ['t', (2.7433514990818093,)], ['triang', (0.15785029824528218,)], ['truncexpon', (4.6907725456810478,)], ['truncnorm', (-1.0978730080013919, 2.7306754109031979)], ['tukeylambda', (3.1321477856738267,)], ['uniform', ()], ['vonmises', (3.9939042581071398,)], ['wald', ()], ['weibull_max', (2.8687961709100187,)], ['weibull_min', (1.7866166930421596,)], ['wrapcauchy', (0.031071279018614728,)]] # for testing only specific functions ##distcont = [ ## ['erlang', (20,)], #correction numargs = 1 ## ['fatiguelife', (29,)], #correction numargs = 1 ## ['loggamma', (0.41411931826052117,)]] # for testing ticket:767 ##distcont = [ ## ['genextreme', (3.3184017469423535,)], ## ['genextreme', (0.01,)], ## ['genextreme', (0.00001,)], ## ['genextreme', (0.0,)], ## ['genextreme', (-0.01,)] ## ] ##distcont = [['gumbel_l', ()], ## ['gumbel_r', ()], ## ['norm', ()] ## ] ##distcont = [['norm', ()]] distmissing = ['wald', 'gausshyper', 'genexpon', 'rv_continuous', 'loglaplace', 'rdist', 'semicircular', 'invweibull', 'ksone', 'cosine', 'kstwobign', 'truncnorm', 'mielke', 'recipinvgauss', 'levy', 'johnsonsu', 'levy_l', 'powernorm', 'wrapcauchy', 'johnsonsb', 'truncexpon', 'rice', 'invnorm', 'invgauss', 'invgamma', 'powerlognorm'] distmiss = [[dist,args] for dist,args in distcont if dist in distmissing] distslow = ['rdist', 'gausshyper', 'recipinvgauss', 'ksone', 'genexpon', 'vonmises', 'rice', 'mielke', 'semicircular', 'cosine', 'invweibull', 'powerlognorm', 'johnsonsu', 'kstwobign'] #distslow are sorted by speed (very slow to slow) def _silence_fp_errors(func): def wrap(*a, **kw): olderr = np.seterr(all='ignore') try: return func(*a, **kw) finally: np.seterr(**olderr) wrap.__name__ = func.__name__ return wrap def test_cont_basic(): # this test skips slow distributions for distname, arg in distcont[:]: if distname in distslow: continue distfn = getattr(stats, distname) np.random.seed(765456) sn = 1000 rvs = distfn.rvs(size=sn,*arg) sm = rvs.mean() sv = rvs.var() skurt = stats.kurtosis(rvs) sskew = stats.skew(rvs) m,v = distfn.stats(*arg) yield check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn, distname + \ 'sample mean test' # the sample skew kurtosis test has known failures, not very good distance measure #yield check_sample_skew_kurt, distfn, arg, sskew, skurt, distname yield check_moment, distfn, arg, m, v, distname yield check_cdf_ppf, distfn, arg, distname yield check_sf_isf, distfn, arg, distname yield check_pdf, distfn, arg, distname if distname in ['wald']: continue yield check_pdf_logpdf, distfn, arg, distname yield check_cdf_logcdf, distfn, arg, distname yield check_sf_logsf, distfn, arg, distname if distname in distmissing: alpha = 0.01 yield check_distribution_rvs, distname, arg, alpha, rvs @npt.dec.slow def test_cont_basic_slow(): # same as above for slow distributions for distname, arg in distcont[:]: if distname not in distslow: continue distfn = getattr(stats, distname) np.random.seed(765456) sn = 1000 rvs = distfn.rvs(size=sn,*arg) sm = rvs.mean() sv = rvs.var() skurt = stats.kurtosis(rvs) sskew = stats.skew(rvs) m,v = distfn.stats(*arg) yield check_sample_meanvar_, distfn, arg, m, v, sm, sv, sn, distname + \ 'sample mean test' # the sample skew kurtosis test has known failures, not very good distance measure #yield check_sample_skew_kurt, distfn, arg, sskew, skurt, distname yield check_moment, distfn, arg, m, v, distname yield check_cdf_ppf, distfn, arg, distname yield check_sf_isf, distfn, arg, distname yield check_pdf, distfn, arg, distname yield check_pdf_logpdf, distfn, arg, distname yield check_cdf_logcdf, distfn, arg, distname yield check_sf_logsf, distfn, arg, distname #yield check_oth, distfn, arg # is still missing if distname in distmissing: alpha = 0.01 yield check_distribution_rvs, distname, arg, alpha, rvs @_silence_fp_errors def check_moment(distfn, arg, m, v, msg): m1 = distfn.moment(1,*arg) m2 = distfn.moment(2,*arg) if not np.isinf(m): npt.assert_almost_equal(m1, m, decimal=10, err_msg= msg + \ ' - 1st moment') else: # or np.isnan(m1), npt.assert_(np.isinf(m1), msg + ' - 1st moment -infinite, m1=%s' % str(m1)) #np.isnan(m1) temporary special treatment for loggamma if not np.isinf(v): npt.assert_almost_equal(m2-m1*m1, v, decimal=10, err_msg= msg + \ ' - 2ndt moment') else: #or np.isnan(m2), npt.assert_(np.isinf(m2), msg + ' - 2nd moment -infinite, m2=%s' % str(m2)) #np.isnan(m2) temporary special treatment for loggamma @_silence_fp_errors def check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, msg): #this did not work, skipped silently by nose #check_sample_meanvar, sm, m, msg + 'sample mean test' #check_sample_meanvar, sv, v, msg + 'sample var test' if not np.isinf(m): check_sample_mean(sm, sv, sn, m) if not np.isinf(v): check_sample_var(sv, sn, v) ## check_sample_meanvar( sm, m, msg + 'sample mean test') ## check_sample_meanvar( sv, v, msg + 'sample var test') def check_sample_mean(sm,v,n, popmean): """ from stats.stats.ttest_1samp(a, popmean): Calculates the t-obtained for the independent samples T-test on ONE group of scores a, given a population mean. Returns: t-value, two-tailed prob """ ## a = asarray(a) ## x = np.mean(a) ## v = np.var(a, ddof=1) ## n = len(a) df = n-1 svar = ((n-1)*v) / float(df) #looks redundant t = (sm-popmean)/np.sqrt(svar*(1.0/n)) prob = stats.betai(0.5*df,0.5,df/(df+t*t)) #return t,prob npt.assert_(prob > 0.01, 'mean fail, t,prob = %f, %f, m,sm=%f,%f' % (t,prob,popmean,sm)) def check_sample_var(sv,n, popvar): ''' two-sided chisquare test for sample variance equal to hypothesized variance ''' df = n-1 chi2 = (n-1)*popvar/float(popvar) pval = stats.chisqprob(chi2,df)*2 npt.assert_(pval > 0.01, 'var fail, t,pval = %f, %f, v,sv=%f,%f' % (chi2,pval,popvar,sv)) def check_sample_skew_kurt(distfn, arg, ss, sk, msg): skew,kurt = distfn.stats(moments='sk',*arg) ## skew = distfn.stats(moment='s',*arg)[()] ## kurt = distfn.stats(moment='k',*arg)[()] check_sample_meanvar( sk, kurt, msg + 'sample kurtosis test') check_sample_meanvar( ss, skew, msg + 'sample skew test') def check_sample_meanvar(sm,m,msg): if not np.isinf(m) and not np.isnan(m): npt.assert_almost_equal(sm, m, decimal=DECIMAL, err_msg= msg + \ ' - finite moment') ## else: ## assert abs(sm) > 10000, 'infinite moment, sm = ' + str(sm) @_silence_fp_errors def check_cdf_ppf(distfn,arg,msg): npt.assert_almost_equal(distfn.cdf(distfn.ppf([0.001,0.5,0.999], *arg), *arg), [0.001,0.5,0.999], decimal=DECIMAL, err_msg= msg + \ ' - cdf-ppf roundtrip') @_silence_fp_errors def check_sf_isf(distfn,arg,msg): npt.assert_almost_equal(distfn.sf(distfn.isf([0.1,0.5,0.9], *arg), *arg), [0.1,0.5,0.9], decimal=DECIMAL, err_msg= msg + \ ' - sf-isf roundtrip') npt.assert_almost_equal(distfn.cdf([0.1,0.9], *arg), 1.0-distfn.sf([0.1,0.9], *arg), decimal=DECIMAL, err_msg= msg + \ ' - cdf-sf relationship') @_silence_fp_errors def check_pdf(distfn, arg, msg): # compares pdf at median with numerical derivative of cdf median = distfn.ppf(0.5, *arg) eps = 1e-6 pdfv = distfn.pdf(median, *arg) if (pdfv < 1e-4) or (pdfv > 1e4): # avoid checking a case where pdf is close to zero or huge (singularity) median = median + 0.1 pdfv = distfn.pdf(median, *arg) cdfdiff = (distfn.cdf(median + eps, *arg) - distfn.cdf(median - eps, *arg))/eps/2.0 #replace with better diff and better test (more points), #actually, this works pretty well npt.assert_almost_equal(pdfv, cdfdiff, decimal=DECIMAL, err_msg= msg + ' - cdf-pdf relationship') @_silence_fp_errors def check_pdf_logpdf(distfn, args, msg): # compares pdf at several points with the log of the pdf points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) vals = distfn.ppf(points, *args) pdf = distfn.pdf(vals, *args) logpdf = distfn.logpdf(vals, *args) pdf = pdf[pdf != 0] logpdf = logpdf[np.isfinite(logpdf)] npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg + " - logpdf-log(pdf) relationship") @_silence_fp_errors def check_sf_logsf(distfn, args, msg): # compares sf at several points with the log of the sf points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) vals = distfn.ppf(points, *args) sf = distfn.sf(vals, *args) logsf = distfn.logsf(vals, *args) sf = sf[sf != 0] logsf = logsf[np.isfinite(logsf)] npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg + " - logsf-log(sf) relationship") @_silence_fp_errors def check_cdf_logcdf(distfn, args, msg): # compares cdf at several points with the log of the cdf points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) vals = distfn.ppf(points, *args) cdf = distfn.cdf(vals, *args) logcdf = distfn.logcdf(vals, *args) cdf = cdf[cdf != 0] logcdf = logcdf[np.isfinite(logcdf)] npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg + " - logcdf-log(cdf) relationship") @_silence_fp_errors def check_distribution_rvs(dist, args, alpha, rvs): #test from scipy.stats.tests #this version reuses existing random variables D,pval = stats.kstest(rvs, dist, args=args, N=1000) if (pval < alpha): D,pval = stats.kstest(dist,'',args=args, N=1000) npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) + "; alpha = " + str(alpha) + "\nargs = " + str(args)) warnings.filterwarnings('ignore', message="The `invnorm` distribution") if __name__ == "__main__": #nose.run(argv=['', __file__]) nose.runmodule(argv=[__file__,'-s'], exit=False)
gpl-3.0
whitepages/nova
nova/tests/unit/virt/libvirt/volume/test_net.py
11
9636
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from nova.tests.unit.virt.libvirt.volume import test_volume from nova.virt.libvirt import host from nova.virt.libvirt.volume import net CONF = cfg.CONF CONF.import_opt('rbd_user', 'nova.virt.libvirt.volume.net', group='libvirt') CONF.import_opt('rbd_secret_uuid', 'nova.virt.libvirt.volume.net', group='libvirt') class LibvirtNetVolumeDriverTestCase( test_volume.LibvirtISCSIVolumeBaseTestCase): """Tests the libvirt network volume driver.""" def _assertNetworkAndProtocolEquals(self, tree): self.assertEqual(tree.get('type'), 'network') self.assertEqual(tree.find('./source').get('protocol'), 'rbd') rbd_name = '%s/%s' % ('rbd', self.name) self.assertEqual(tree.find('./source').get('name'), rbd_name) def _assertISCSINetworkAndProtocolEquals(self, tree): self.assertEqual(tree.get('type'), 'network') self.assertEqual(tree.find('./source').get('protocol'), 'iscsi') iscsi_name = '%s/%s' % (self.iqn, self.vol['id']) self.assertEqual(tree.find('./source').get('name'), iscsi_name) def sheepdog_connection(self, volume): return { 'driver_volume_type': 'sheepdog', 'data': { 'name': volume['name'] } } def test_libvirt_sheepdog_driver(self): libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.sheepdog_connection(self.vol) conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self.assertEqual(tree.get('type'), 'network') self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog') self.assertEqual(tree.find('./source').get('name'), self.name) libvirt_driver.disconnect_volume(connection_info, "vde") def rbd_connection(self, volume): return { 'driver_volume_type': 'rbd', 'data': { 'name': '%s/%s' % ('rbd', volume['name']), 'auth_enabled': CONF.libvirt.rbd_secret_uuid is not None, 'auth_username': CONF.libvirt.rbd_user, 'secret_type': 'ceph', 'secret_uuid': CONF.libvirt.rbd_secret_uuid, 'qos_specs': { 'total_bytes_sec': '1048576', 'read_iops_sec': '500', } } } def test_libvirt_rbd_driver(self): libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.rbd_connection(self.vol) conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertNetworkAndProtocolEquals(tree) self.assertIsNone(tree.find('./source/auth')) self.assertEqual('1048576', tree.find('./iotune/total_bytes_sec').text) self.assertEqual('500', tree.find('./iotune/read_iops_sec').text) libvirt_driver.disconnect_volume(connection_info, "vde") def test_libvirt_rbd_driver_hosts(self): libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.rbd_connection(self.vol) hosts = ['example.com', '1.2.3.4', '::1'] ports = [None, '6790', '6791'] connection_info['data']['hosts'] = hosts connection_info['data']['ports'] = ports conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertNetworkAndProtocolEquals(tree) self.assertIsNone(tree.find('./source/auth')) found_hosts = tree.findall('./source/host') self.assertEqual([host.get('name') for host in found_hosts], hosts) self.assertEqual([host.get('port') for host in found_hosts], ports) libvirt_driver.disconnect_volume(connection_info, "vde") def test_libvirt_rbd_driver_auth_enabled(self): libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.rbd_connection(self.vol) secret_type = 'ceph' connection_info['data']['auth_enabled'] = True connection_info['data']['auth_username'] = self.user connection_info['data']['secret_type'] = secret_type connection_info['data']['secret_uuid'] = self.uuid conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertNetworkAndProtocolEquals(tree) self.assertEqual(tree.find('./auth').get('username'), self.user) self.assertEqual(tree.find('./auth/secret').get('type'), secret_type) self.assertEqual(tree.find('./auth/secret').get('uuid'), self.uuid) libvirt_driver.disconnect_volume(connection_info, "vde") def test_libvirt_rbd_driver_auth_enabled_flags_override(self): libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.rbd_connection(self.vol) secret_type = 'ceph' connection_info['data']['auth_enabled'] = True connection_info['data']['auth_username'] = self.user connection_info['data']['secret_type'] = secret_type connection_info['data']['secret_uuid'] = self.uuid flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b' flags_user = 'bar' self.flags(rbd_user=flags_user, rbd_secret_uuid=flags_uuid, group='libvirt') conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertNetworkAndProtocolEquals(tree) self.assertEqual(tree.find('./auth').get('username'), flags_user) self.assertEqual(tree.find('./auth/secret').get('type'), secret_type) self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid) libvirt_driver.disconnect_volume(connection_info, "vde") def test_libvirt_rbd_driver_auth_disabled(self): libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.rbd_connection(self.vol) secret_type = 'ceph' connection_info['data']['auth_enabled'] = False connection_info['data']['auth_username'] = self.user connection_info['data']['secret_type'] = secret_type connection_info['data']['secret_uuid'] = self.uuid conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertNetworkAndProtocolEquals(tree) self.assertIsNone(tree.find('./auth')) libvirt_driver.disconnect_volume(connection_info, "vde") def test_libvirt_rbd_driver_auth_disabled_flags_override(self): libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.rbd_connection(self.vol) secret_type = 'ceph' connection_info['data']['auth_enabled'] = False connection_info['data']['auth_username'] = self.user connection_info['data']['secret_type'] = secret_type connection_info['data']['secret_uuid'] = self.uuid # NOTE: Supplying the rbd_secret_uuid will enable authentication # locally in nova-compute even if not enabled in nova-volume/cinder flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b' flags_user = 'bar' self.flags(rbd_user=flags_user, rbd_secret_uuid=flags_uuid, group='libvirt') conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertNetworkAndProtocolEquals(tree) self.assertEqual(tree.find('./auth').get('username'), flags_user) self.assertEqual(tree.find('./auth/secret').get('type'), secret_type) self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid) libvirt_driver.disconnect_volume(connection_info, "vde") @mock.patch.object(host.Host, 'find_secret') @mock.patch.object(host.Host, 'create_secret') @mock.patch.object(host.Host, 'delete_secret') def test_libvirt_iscsi_net_driver(self, mock_delete, mock_create, mock_find): mock_find.return_value = test_volume.FakeSecret() mock_create.return_value = test_volume.FakeSecret() libvirt_driver = net.LibvirtNetVolumeDriver(self.fake_conn) connection_info = self.iscsi_connection(self.vol, self.location, self.iqn, auth=True) secret_type = 'iscsi' flags_user = connection_info['data']['auth_username'] conf = libvirt_driver.get_config(connection_info, self.disk_info) tree = conf.format_dom() self._assertISCSINetworkAndProtocolEquals(tree) self.assertEqual(tree.find('./auth').get('username'), flags_user) self.assertEqual(tree.find('./auth/secret').get('type'), secret_type) self.assertEqual(tree.find('./auth/secret').get('uuid'), test_volume.SECRET_UUID) libvirt_driver.disconnect_volume(connection_info, 'vde')
apache-2.0
valmynd/MediaFetcher
src/plugins/youtube_dl/youtube_dl/extractor/ctsnews.py
1
2972
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import unified_timestamp class CtsNewsIE(InfoExtractor): IE_DESC = '่ฏ่ฆ–ๆ–ฐ่ž' _VALID_URL = r'https?://news\.cts\.com\.tw/[a-z]+/[a-z]+/\d+/(?P<id>\d+)\.html' _TESTS = [{ 'url': 'http://news.cts.com.tw/cts/international/201501/201501291578109.html', 'md5': 'a9875cb790252b08431186d741beaabe', 'info_dict': { 'id': '201501291578109', 'ext': 'mp4', 'title': 'ไปฅ่‰ฒๅˆ—.็œŸไธป้ปจไบค็ซ 3ไบบๆญปไบก', 'description': 'ไปฅ่‰ฒๅˆ—ๅ’Œ้ปŽๅทดๅซฉ็œŸไธป้ปจ๏ผŒ็ˆ†็™ผไบ”ๅนดๆœ€ๅšด้‡่ก็ช๏ผŒ้›™ๆ–น็ ฒ่ฝŸไบค็ซ๏ผŒๅ…ฉๅไปฅ่ปๆญปไบก๏ผŒ้‚„ๆœ‰ไธ€ๅ่ฅฟ็ญ็‰™็ฑ็š„่ฏๅˆๅœ‹็ถญๅ’Œไบบ...', 'timestamp': 1422528540, 'upload_date': '20150129', } }, { # News count not appear on page but still available in database 'url': 'http://news.cts.com.tw/cts/international/201309/201309031304098.html', 'md5': '3aee7e0df7cdff94e43581f54c22619e', 'info_dict': { 'id': '201309031304098', 'ext': 'mp4', 'title': '้Ÿ“ๅœ‹31ๆญฒ็ซฅ้ก็”ท ่ฒŒๅฆ‚ๅๅคšๆญฒๅฐๅญฉ', 'description': '่ถŠๆœ‰ๅนด็ด€็š„ไบบ๏ผŒ่ถŠๅธŒๆœ›็œ‹่ตทไพ†ๅนด่ผ•ไธ€้ปž๏ผŒ่€Œๅ—้Ÿ“ๅปๆœ‰ไธ€ไฝ31ๆญฒ็š„็”ทๅญ๏ผŒ็œ‹่ตทไพ†ๅƒๆ˜ฏ11ใ€12ๆญฒ็š„ๅฐๅญฉ๏ผŒ่บซ...', 'thumbnail': r're:^https?://.*\.jpg$', 'timestamp': 1378205880, 'upload_date': '20130903', } }, { # With Youtube embedded video 'url': 'http://news.cts.com.tw/cts/money/201501/201501291578003.html', 'md5': 'e4726b2ccd70ba2c319865e28f0a91d1', 'info_dict': { 'id': 'OVbfO7d0_hQ', 'ext': 'mp4', 'title': 'iPhone6็†ฑ้Šท ่˜‹ๆžœ่ฒกๅ ฑไบฎ็œผ', 'description': 'md5:f395d4f485487bb0f992ed2c4b07aa7d', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20150128', 'uploader_id': 'TBSCTS', 'uploader': 'ไธญ่ฏ้›ป่ฆ–ๅ…ฌๅธ', }, 'add_ie': ['Youtube'], }] def _real_extract(self, url): news_id = self._match_id(url) page = self._download_webpage(url, news_id) news_id = self._hidden_inputs(page).get('get_id') if news_id: mp4_feed = self._download_json( 'http://news.cts.com.tw/action/test_mp4feed.php', news_id, note='Fetching feed', query={'news_id': news_id}) video_url = mp4_feed['source_url'] else: self.to_screen('Not CTSPlayer video, trying Youtube...') youtube_url = self._search_regex( r'src="(//www\.youtube\.com/embed/[^"]+)"', page, 'youtube url') return self.url_result(youtube_url, ie='Youtube') description = self._html_search_meta('description', page) title = self._html_search_meta('title', page, fatal=True) thumbnail = self._html_search_meta('image', page) datetime_str = self._html_search_regex( r'(\d{4}/\d{2}/\d{2} \d{2}:\d{2})', page, 'date and time', fatal=False) timestamp = None if datetime_str: timestamp = unified_timestamp(datetime_str) - 8 * 3600 return { 'id': news_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': thumbnail, 'timestamp': timestamp, }
gpl-3.0
abalkin/numpy
numpy/core/tests/test_function_base.py
6
13148
from numpy import ( logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan, ndarray, sqrt, nextafter, stack ) from numpy.testing import ( assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose, ) class PhysicalQuantity(float): def __new__(cls, value): return float.__new__(cls, value) def __add__(self, x): assert_(isinstance(x, PhysicalQuantity)) return PhysicalQuantity(float(x) + float(self)) __radd__ = __add__ def __sub__(self, x): assert_(isinstance(x, PhysicalQuantity)) return PhysicalQuantity(float(self) - float(x)) def __rsub__(self, x): assert_(isinstance(x, PhysicalQuantity)) return PhysicalQuantity(float(x) - float(self)) def __mul__(self, x): return PhysicalQuantity(float(x) * float(self)) __rmul__ = __mul__ def __div__(self, x): return PhysicalQuantity(float(self) / float(x)) def __rdiv__(self, x): return PhysicalQuantity(float(x) / float(self)) class PhysicalQuantity2(ndarray): __array_priority__ = 10 class TestLogspace: def test_basic(self): y = logspace(0, 6) assert_(len(y) == 50) y = logspace(0, 6, num=100) assert_(y[-1] == 10 ** 6) y = logspace(0, 6, endpoint=False) assert_(y[-1] < 10 ** 6) y = logspace(0, 6, num=7) assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) def test_start_stop_array(self): start = array([0., 1.]) stop = array([6., 7.]) t1 = logspace(start, stop, 6) t2 = stack([logspace(_start, _stop, 6) for _start, _stop in zip(start, stop)], axis=1) assert_equal(t1, t2) t3 = logspace(start, stop[0], 6) t4 = stack([logspace(_start, stop[0], 6) for _start in start], axis=1) assert_equal(t3, t4) t5 = logspace(start, stop, 6, axis=-1) assert_equal(t5, t2.T) def test_dtype(self): y = logspace(0, 6, dtype='float32') assert_equal(y.dtype, dtype('float32')) y = logspace(0, 6, dtype='float64') assert_equal(y.dtype, dtype('float64')) y = logspace(0, 6, dtype='int32') assert_equal(y.dtype, dtype('int32')) def test_physical_quantities(self): a = PhysicalQuantity(1.0) b = PhysicalQuantity(5.0) assert_equal(logspace(a, b), logspace(1.0, 5.0)) def test_subclass(self): a = array(1).view(PhysicalQuantity2) b = array(7).view(PhysicalQuantity2) ls = logspace(a, b) assert type(ls) is PhysicalQuantity2 assert_equal(ls, logspace(1.0, 7.0)) ls = logspace(a, b, 1) assert type(ls) is PhysicalQuantity2 assert_equal(ls, logspace(1.0, 7.0, 1)) class TestGeomspace: def test_basic(self): y = geomspace(1, 1e6) assert_(len(y) == 50) y = geomspace(1, 1e6, num=100) assert_(y[-1] == 10 ** 6) y = geomspace(1, 1e6, endpoint=False) assert_(y[-1] < 10 ** 6) y = geomspace(1, 1e6, num=7) assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) y = geomspace(8, 2, num=3) assert_allclose(y, [8, 4, 2]) assert_array_equal(y.imag, 0) y = geomspace(-1, -100, num=3) assert_array_equal(y, [-1, -10, -100]) assert_array_equal(y.imag, 0) y = geomspace(-100, -1, num=3) assert_array_equal(y, [-100, -10, -1]) assert_array_equal(y.imag, 0) def test_complex(self): # Purely imaginary y = geomspace(1j, 16j, num=5) assert_allclose(y, [1j, 2j, 4j, 8j, 16j]) assert_array_equal(y.real, 0) y = geomspace(-4j, -324j, num=5) assert_allclose(y, [-4j, -12j, -36j, -108j, -324j]) assert_array_equal(y.real, 0) y = geomspace(1+1j, 1000+1000j, num=4) assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j]) y = geomspace(-1+1j, -1000+1000j, num=4) assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j]) # Logarithmic spirals y = geomspace(-1, 1, num=3, dtype=complex) assert_allclose(y, [-1, 1j, +1]) y = geomspace(0+3j, -3+0j, 3) assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) y = geomspace(0+3j, 3+0j, 3) assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j]) y = geomspace(-3+0j, 0-3j, 3) assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j]) y = geomspace(0+3j, -3+0j, 3) assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j]) y = geomspace(-2-3j, 5+7j, 7) assert_allclose(y, [-2-3j, -0.29058977-4.15771027j, 2.08885354-4.34146838j, 4.58345529-3.16355218j, 6.41401745-0.55233457j, 6.75707386+3.11795092j, 5+7j]) # Type promotion should prevent the -5 from becoming a NaN y = geomspace(3j, -5, 2) assert_allclose(y, [3j, -5]) y = geomspace(-5, 3j, 2) assert_allclose(y, [-5, 3j]) def test_dtype(self): y = geomspace(1, 1e6, dtype='float32') assert_equal(y.dtype, dtype('float32')) y = geomspace(1, 1e6, dtype='float64') assert_equal(y.dtype, dtype('float64')) y = geomspace(1, 1e6, dtype='int32') assert_equal(y.dtype, dtype('int32')) # Native types y = geomspace(1, 1e6, dtype=float) assert_equal(y.dtype, dtype('float_')) y = geomspace(1, 1e6, dtype=complex) assert_equal(y.dtype, dtype('complex')) def test_start_stop_array_scalar(self): lim1 = array([120, 100], dtype="int8") lim2 = array([-120, -100], dtype="int8") lim3 = array([1200, 1000], dtype="uint16") t1 = geomspace(lim1[0], lim1[1], 5) t2 = geomspace(lim2[0], lim2[1], 5) t3 = geomspace(lim3[0], lim3[1], 5) t4 = geomspace(120.0, 100.0, 5) t5 = geomspace(-120.0, -100.0, 5) t6 = geomspace(1200.0, 1000.0, 5) # t3 uses float32, t6 uses float64 assert_allclose(t1, t4, rtol=1e-2) assert_allclose(t2, t5, rtol=1e-2) assert_allclose(t3, t6, rtol=1e-5) def test_start_stop_array(self): # Try to use all special cases. start = array([1.e0, 32., 1j, -4j, 1+1j, -1]) stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1]) t1 = geomspace(start, stop, 5) t2 = stack([geomspace(_start, _stop, 5) for _start, _stop in zip(start, stop)], axis=1) assert_equal(t1, t2) t3 = geomspace(start, stop[0], 5) t4 = stack([geomspace(_start, stop[0], 5) for _start in start], axis=1) assert_equal(t3, t4) t5 = geomspace(start, stop, 5, axis=-1) assert_equal(t5, t2.T) def test_physical_quantities(self): a = PhysicalQuantity(1.0) b = PhysicalQuantity(5.0) assert_equal(geomspace(a, b), geomspace(1.0, 5.0)) def test_subclass(self): a = array(1).view(PhysicalQuantity2) b = array(7).view(PhysicalQuantity2) gs = geomspace(a, b) assert type(gs) is PhysicalQuantity2 assert_equal(gs, geomspace(1.0, 7.0)) gs = geomspace(a, b, 1) assert type(gs) is PhysicalQuantity2 assert_equal(gs, geomspace(1.0, 7.0, 1)) def test_bounds(self): assert_raises(ValueError, geomspace, 0, 10) assert_raises(ValueError, geomspace, 10, 0) assert_raises(ValueError, geomspace, 0, 0) class TestLinspace: def test_basic(self): y = linspace(0, 10) assert_(len(y) == 50) y = linspace(2, 10, num=100) assert_(y[-1] == 10) y = linspace(2, 10, endpoint=False) assert_(y[-1] < 10) assert_raises(ValueError, linspace, 0, 10, num=-1) def test_corner(self): y = list(linspace(0, 1, 1)) assert_(y == [0.0], y) assert_raises(TypeError, linspace, 0, 1, num=2.5) def test_type(self): t1 = linspace(0, 1, 0).dtype t2 = linspace(0, 1, 1).dtype t3 = linspace(0, 1, 2).dtype assert_equal(t1, t2) assert_equal(t2, t3) def test_dtype(self): y = linspace(0, 6, dtype='float32') assert_equal(y.dtype, dtype('float32')) y = linspace(0, 6, dtype='float64') assert_equal(y.dtype, dtype('float64')) y = linspace(0, 6, dtype='int32') assert_equal(y.dtype, dtype('int32')) def test_start_stop_array_scalar(self): lim1 = array([-120, 100], dtype="int8") lim2 = array([120, -100], dtype="int8") lim3 = array([1200, 1000], dtype="uint16") t1 = linspace(lim1[0], lim1[1], 5) t2 = linspace(lim2[0], lim2[1], 5) t3 = linspace(lim3[0], lim3[1], 5) t4 = linspace(-120.0, 100.0, 5) t5 = linspace(120.0, -100.0, 5) t6 = linspace(1200.0, 1000.0, 5) assert_equal(t1, t4) assert_equal(t2, t5) assert_equal(t3, t6) def test_start_stop_array(self): start = array([-120, 120], dtype="int8") stop = array([100, -100], dtype="int8") t1 = linspace(start, stop, 5) t2 = stack([linspace(_start, _stop, 5) for _start, _stop in zip(start, stop)], axis=1) assert_equal(t1, t2) t3 = linspace(start, stop[0], 5) t4 = stack([linspace(_start, stop[0], 5) for _start in start], axis=1) assert_equal(t3, t4) t5 = linspace(start, stop, 5, axis=-1) assert_equal(t5, t2.T) def test_complex(self): lim1 = linspace(1 + 2j, 3 + 4j, 5) t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j]) lim2 = linspace(1j, 10, 5) t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j]) assert_equal(lim1, t1) assert_equal(lim2, t2) def test_physical_quantities(self): a = PhysicalQuantity(0.0) b = PhysicalQuantity(1.0) assert_equal(linspace(a, b), linspace(0.0, 1.0)) def test_subclass(self): a = array(0).view(PhysicalQuantity2) b = array(1).view(PhysicalQuantity2) ls = linspace(a, b) assert type(ls) is PhysicalQuantity2 assert_equal(ls, linspace(0.0, 1.0)) ls = linspace(a, b, 1) assert type(ls) is PhysicalQuantity2 assert_equal(ls, linspace(0.0, 1.0, 1)) def test_array_interface(self): # Regression test for https://github.com/numpy/numpy/pull/6659 # Ensure that start/stop can be objects that implement # __array_interface__ and are convertible to numeric scalars class Arrayish: """ A generic object that supports the __array_interface__ and hence can in principle be converted to a numeric scalar, but is not otherwise recognized as numeric, but also happens to support multiplication by floats. Data should be an object that implements the buffer interface, and contains at least 4 bytes. """ def __init__(self, data): self._data = data @property def __array_interface__(self): return {'shape': (), 'typestr': '<i4', 'data': self._data, 'version': 3} def __mul__(self, other): # For the purposes of this test any multiplication is an # identity operation :) return self one = Arrayish(array(1, dtype='<i4')) five = Arrayish(array(5, dtype='<i4')) assert_equal(linspace(one, five), linspace(1, 5)) def test_denormal_numbers(self): # Regression test for gh-5437. Will probably fail when compiled # with ICC, which flushes denormals to zero for ftype in sctypes['float']: stop = nextafter(ftype(0), ftype(1)) * 5 # A denormal number assert_(any(linspace(0, stop, 10, endpoint=False, dtype=ftype))) def test_equivalent_to_arange(self): for j in range(1000): assert_equal(linspace(0, j, j+1, dtype=int), arange(j+1, dtype=int)) def test_retstep(self): for num in [0, 1, 2]: for ept in [False, True]: y = linspace(0, 1, num, endpoint=ept, retstep=True) assert isinstance(y, tuple) and len(y) == 2 if num == 2: y0_expect = [0.0, 1.0] if ept else [0.0, 0.5] assert_array_equal(y[0], y0_expect) assert_equal(y[1], y0_expect[1]) elif num == 1 and not ept: assert_array_equal(y[0], [0.0]) assert_equal(y[1], 1.0) else: assert_array_equal(y[0], [0.0][:num]) assert isnan(y[1]) def test_object(self): start = array(1, dtype='O') stop = array(2, dtype='O') y = linspace(start, stop, 3) assert_array_equal(y, array([1., 1.5, 2.]))
bsd-3-clause
ironman5366/W.I.L.L-Telegram
parser.py
1
1876
#Builtin imports import logging #External imports import spacy import dataset from spacy.symbols import nsubj, VERB from spacy.matcher import Matcher #Internal imports import plugin_handler log = logging.getLogger() nlp = None matcher = None def parse(bot, update ,job_queue, chat_data): '''Function that calls parsing''' db = dataset.connect('sqlite:///will.db') command = update.message.text username = update.message.from_user.username log.info( "Parsing command {0} from user {1}".format( command, username ) ) #Pull user data from database userdata_table = db['userdata'] user = userdata_table.find_one(username=username) user_first_name = user["first_name"] #Parse the command in spacy log.info("Running command through nlp") doc = nlp(unicode(command)) verbs = set() log.info("Parsing through dependencies") #Use synactic dependencies to look at the words for possible_subject in doc: if possible_subject.dep == nsubj and possible_subject.head.pos == VERB: verbs.add(possible_subject.head.lemma_.lower()) log.info("Finished parsing dependencies, parsing ents") ents = {} #Use spacy's ent recognition for ent in doc.ents: ents.update({ ent.label_:ent.text }) log.info("Finished parsing ents") command_data = { "command": command, "bot": bot, "update": update, "job_queue": job_queue, "chat_data": chat_data, "verbs": verbs, "ents": ents, "doc": doc } log.info("Finished parsing command_data, sending it into events queue") log.debug(command_data) plugin_handler.subscriptions().send_event(command_data) def initialize(): global nlp global matcher nlp = spacy.load('en') matcher = Matcher(nlp.vocab)
mit
ArtsiomCh/tensorflow
tensorflow/python/keras/_impl/keras/applications/mobilenet.py
12
28029
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """MobileNet v1 models for Keras. MobileNet is a general architecture and can be used for multiple use cases. Depending on the use case, it can use different input layer size and different width factors. This allows different width models to reduce the number of multiply-adds and thereby reduce inference cost on mobile devices. MobileNets support any input size greater than 32 x 32, with larger image sizes offering better performance. The number of parameters and number of multiply-adds can be modified by using the `alpha` parameter, which increases/decreases the number of filters in each layer. By altering the image size and `alpha` parameter, all 16 models from the paper can be built, with ImageNet weights provided. The paper demonstrates the performance of MobileNets using `alpha` values of 1.0 (also called 100 % MobileNet), 0.75, 0.5 and 0.25. For each of these `alpha` values, weights for 4 different input image sizes are provided (224, 192, 160, 128). The following table describes the size and accuracy of the 100% MobileNet on size 224 x 224: ---------------------------------------------------------------------------- Width Multiplier (alpha) | ImageNet Acc | Multiply-Adds (M) | Params (M) ---------------------------------------------------------------------------- | 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 | | 0.75 MobileNet-224 | 68.4 % | 325 | 2.6 | | 0.50 MobileNet-224 | 63.7 % | 149 | 1.3 | | 0.25 MobileNet-224 | 50.6 % | 41 | 0.5 | ---------------------------------------------------------------------------- The following table describes the performance of the 100 % MobileNet on various input sizes: ------------------------------------------------------------------------ Resolution | ImageNet Acc | Multiply-Adds (M) | Params (M) ------------------------------------------------------------------------ | 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 | | 1.0 MobileNet-192 | 69.1 % | 529 | 4.2 | | 1.0 MobileNet-160 | 67.2 % | 529 | 4.2 | | 1.0 MobileNet-128 | 64.4 % | 529 | 4.2 | ------------------------------------------------------------------------ The weights for all 16 models are obtained and translated from Tensorflow checkpoints found at https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md # Reference - [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/pdf/1704.04861.pdf)) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import warnings from tensorflow.python.keras._impl.keras import backend as K from tensorflow.python.keras._impl.keras import constraints from tensorflow.python.keras._impl.keras import initializers from tensorflow.python.keras._impl.keras import regularizers from tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape from tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions # pylint: disable=unused-import from tensorflow.python.keras._impl.keras.engine import InputSpec from tensorflow.python.keras._impl.keras.engine.topology import get_source_inputs from tensorflow.python.keras._impl.keras.layers import Activation from tensorflow.python.keras._impl.keras.layers import BatchNormalization from tensorflow.python.keras._impl.keras.layers import Conv2D from tensorflow.python.keras._impl.keras.layers import Dropout from tensorflow.python.keras._impl.keras.layers import GlobalAveragePooling2D from tensorflow.python.keras._impl.keras.layers import GlobalMaxPooling2D from tensorflow.python.keras._impl.keras.layers import Input from tensorflow.python.keras._impl.keras.layers import Reshape from tensorflow.python.keras._impl.keras.models import Model from tensorflow.python.keras._impl.keras.utils import conv_utils from tensorflow.python.keras._impl.keras.utils.data_utils import get_file BASE_WEIGHT_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.6/' def relu6(x): return K.relu(x, max_value=6) def preprocess_input(x): x /= 255. x -= 0.5 x *= 2. return x class DepthwiseConv2D(Conv2D): """Depthwise separable 2D convolution. Depthwise Separable convolutions consists in performing just the first step in a depthwise spatial convolution (which acts on each input channel separately). The `depth_multiplier` argument controls how many output channels are generated per input channel in the depthwise step. Arguments: kernel_size: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: one of `"valid"` or `"same"` (case-insensitive). depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `filters_in * depth_multiplier`. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, height, width, channels)` while `channels_first` corresponds to inputs with shape `(batch, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". activation: Activation function to use (see [activations](../activations.md)). If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. depthwise_initializer: Initializer for the depthwise kernel matrix (see [initializers](../initializers.md)). bias_initializer: Initializer for the bias vector (see [initializers](../initializers.md)). depthwise_regularizer: Regularizer function applied to the depthwise kernel matrix (see [regularizer](../regularizers.md)). bias_regularizer: Regularizer function applied to the bias vector (see [regularizer](../regularizers.md)). activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). (see [regularizer](../regularizers.md)). depthwise_constraint: Constraint function applied to the depthwise kernel matrix (see [constraints](../constraints.md)). bias_constraint: Constraint function applied to the bias vector (see [constraints](../constraints.md)). Input shape: 4D tensor with shape: `[batch, channels, rows, cols]` if data_format='channels_first' or 4D tensor with shape: `[batch, rows, cols, channels]` if data_format='channels_last'. Output shape: 4D tensor with shape: `[batch, filters, new_rows, new_cols]` if data_format='channels_first' or 4D tensor with shape: `[batch, new_rows, new_cols, filters]` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. """ def __init__(self, kernel_size, strides=(1, 1), padding='valid', depth_multiplier=1, data_format=None, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs): super(DepthwiseConv2D, self).__init__( filters=None, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, activation=activation, use_bias=use_bias, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, bias_constraint=bias_constraint, **kwargs) self.depth_multiplier = depth_multiplier self.depthwise_initializer = initializers.get(depthwise_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.bias_initializer = initializers.get(bias_initializer) def build(self, input_shape): if len(input_shape) < 4: raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. ' 'Received input shape:', str(input_shape)) if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = 3 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs to ' '`DepthwiseConv2D` ' 'should be defined. Found `None`.') input_dim = int(input_shape[channel_axis]) depthwise_kernel_shape = (self.kernel_size[0], self.kernel_size[1], input_dim, self.depth_multiplier) self.depthwise_kernel = self.add_weight( shape=depthwise_kernel_shape, initializer=self.depthwise_initializer, name='depthwise_kernel', regularizer=self.depthwise_regularizer, constraint=self.depthwise_constraint) if self.use_bias: self.bias = self.add_weight( shape=(input_dim * self.depth_multiplier,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None # Set input spec. self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim}) self.built = True def call(self, inputs, training=None): outputs = K.depthwise_conv2d( inputs, self.depthwise_kernel, strides=self.strides, padding=self.padding, dilation_rate=self.dilation_rate, data_format=self.data_format) if self.bias: outputs = K.bias_add(outputs, self.bias, data_format=self.data_format) if self.activation is not None: return self.activation(outputs) return outputs def compute_output_shape(self, input_shape): if self.data_format == 'channels_first': rows = input_shape[2] cols = input_shape[3] out_filters = input_shape[1] * self.depth_multiplier elif self.data_format == 'channels_last': rows = input_shape[1] cols = input_shape[2] out_filters = input_shape[3] * self.depth_multiplier rows = conv_utils.conv_output_length(rows, self.kernel_size[0], self.padding, self.strides[0]) cols = conv_utils.conv_output_length(cols, self.kernel_size[1], self.padding, self.strides[1]) if self.data_format == 'channels_first': return (input_shape[0], out_filters, rows, cols) elif self.data_format == 'channels_last': return (input_shape[0], rows, cols, out_filters) def get_config(self): config = super(DepthwiseConv2D, self).get_config() config.pop('filters') config.pop('kernel_initializer') config.pop('kernel_regularizer') config.pop('kernel_constraint') config['depth_multiplier'] = self.depth_multiplier config['depthwise_initializer'] = initializers.serialize( self.depthwise_initializer) config['depthwise_regularizer'] = regularizers.serialize( self.depthwise_regularizer) config['depthwise_constraint'] = constraints.serialize( self.depthwise_constraint) return config def MobileNet(input_shape=None, # pylint: disable=invalid-name alpha=1.0, depth_multiplier=1, dropout=1e-3, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000): """Instantiates the MobileNet architecture. Note that only TensorFlow is supported for now, therefore it only works with the data format `image_data_format='channels_last'` in your Keras config at `~/.keras/keras.json`. To load a MobileNet model via `load_model`, import the custom objects `relu6` and `DepthwiseConv2D` and pass them to the `custom_objects` parameter. E.g. model = load_model('mobilenet.h5', custom_objects={ 'relu6': mobilenet.relu6, 'DepthwiseConv2D': mobilenet.DepthwiseConv2D}) Arguments: input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `channels_last` data format) or (3, 224, 224) (with `channels_first` data format). It should have exactly 3 input channels, and width and height should be no smaller than 32. E.g. `(200, 200, 3)` would be one valid value. alpha: controls the width of the network. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. depth_multiplier: depth multiplier for depthwise convolution (also called the resolution multiplier) dropout: dropout rate include_top: whether to include the fully-connected layer at the top of the network. weights: `None` (random initialization) or `imagenet` (ImageNet weights) input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional layer. - `avg` means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. Returns: A Keras model instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. RuntimeError: If attempting to run this model with a backend that does not support separable convolutions. """ if K.backend() != 'tensorflow': raise RuntimeError('Only TensorFlow backend is currently supported, ' 'as other backends do not support ' 'depthwise convolution.') if weights not in {'imagenet', None}: raise ValueError('The `weights` argument should be either ' '`None` (random initialization) or `imagenet` ' '(pre-training on ImageNet).') if weights == 'imagenet' and include_top and classes != 1000: raise ValueError('If using `weights` as ImageNet with `include_top` ' 'as true, `classes` should be 1000') # Determine proper input shape. if input_shape is None: default_size = 224 else: if K.image_data_format() == 'channels_first': rows = input_shape[1] cols = input_shape[2] else: rows = input_shape[0] cols = input_shape[1] if rows == cols and rows in [128, 160, 192, 224]: default_size = rows else: default_size = 224 input_shape = _obtain_input_shape( input_shape, default_size=default_size, min_size=32, data_format=K.image_data_format(), require_flatten=include_top, weights=weights) if K.image_data_format() == 'channels_last': row_axis, col_axis = (0, 1) else: row_axis, col_axis = (1, 2) rows = input_shape[row_axis] cols = input_shape[col_axis] if weights == 'imagenet': if depth_multiplier != 1: raise ValueError('If imagenet weights are being loaded, ' 'depth multiplier must be 1') if alpha not in [0.25, 0.50, 0.75, 1.0]: raise ValueError('If imagenet weights are being loaded, ' 'alpha can be one of' '`0.25`, `0.50`, `0.75` or `1.0` only.') if rows != cols or rows not in [128, 160, 192, 224]: raise ValueError('If imagenet weights are being loaded, ' 'input must have a static square shape (one of ' '(128,128), (160,160), (192,192), or (224, 224)).' ' Input shape provided = %s' % (input_shape,)) if K.image_data_format() != 'channels_last': warnings.warn('The MobileNet family of models is only available ' 'for the input data format "channels_last" ' '(width, height, channels). ' 'However your settings specify the default ' 'data format "channels_first" (channels, width, height).' ' You should set `image_data_format="channels_last"` ' 'in your Keras config located at ~/.keras/keras.json. ' 'The model being returned right now will expect inputs ' 'to follow the "channels_last" data format.') K.set_image_data_format('channels_last') old_data_format = 'channels_first' else: old_data_format = None if input_tensor is None: img_input = Input(shape=input_shape) else: if not K.is_keras_tensor(input_tensor): img_input = Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor x = _conv_block(img_input, 32, alpha, strides=(2, 2)) x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1) x = _depthwise_conv_block( x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2) x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3) x = _depthwise_conv_block( x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4) x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5) x = _depthwise_conv_block( x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10) x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11) x = _depthwise_conv_block( x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12) x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13) if include_top: if K.image_data_format() == 'channels_first': shape = (int(1024 * alpha), 1, 1) else: shape = (1, 1, int(1024 * alpha)) x = GlobalAveragePooling2D()(x) x = Reshape(shape, name='reshape_1')(x) x = Dropout(dropout, name='dropout')(x) x = Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x) x = Activation('softmax', name='act_softmax')(x) x = Reshape((classes,), name='reshape_2')(x) else: if pooling == 'avg': x = GlobalAveragePooling2D()(x) elif pooling == 'max': x = GlobalMaxPooling2D()(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = get_source_inputs(input_tensor) else: inputs = img_input # Create model. model = Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows)) # load weights if weights == 'imagenet': if K.image_data_format() == 'channels_first': raise ValueError('Weights for "channels_last" format ' 'are not available.') if alpha == 1.0: alpha_text = '1_0' elif alpha == 0.75: alpha_text = '7_5' elif alpha == 0.50: alpha_text = '5_0' else: alpha_text = '2_5' if include_top: model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows) weigh_path = BASE_WEIGHT_PATH + model_name weights_path = get_file(model_name, weigh_path, cache_subdir='models') else: model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows) weigh_path = BASE_WEIGHT_PATH + model_name weights_path = get_file(model_name, weigh_path, cache_subdir='models') model.load_weights(weights_path) if old_data_format: K.set_image_data_format(old_data_format) return model def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)): """Adds an initial convolution layer (with batch normalization and relu6). Arguments: inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last` data format) or (3, rows, cols) (with `channels_first` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 32. E.g. `(224, 224, 3)` would be one valid value. filters: Integer, the dimensionality of the output space (i.e. the number output of filters in the convolution). alpha: controls the width of the network. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. kernel: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. Input shape: 4D tensor with shape: `(samples, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, rows, cols, channels)` if data_format='channels_last'. Output shape: 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to stride. Returns: Output tensor of block. """ channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 filters = int(filters * alpha) x = Conv2D( filters, kernel, padding='same', use_bias=False, strides=strides, name='conv1')(inputs) x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x) return Activation(relu6, name='conv1_relu')(x) def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), block_id=1): """Adds a depthwise convolution block. A depthwise convolution block consists of a depthwise conv, batch normalization, relu6, pointwise convolution, batch normalization and relu6 activation. Arguments: inputs: Input tensor of shape `(rows, cols, channels)` (with `channels_last` data format) or (channels, rows, cols) (with `channels_first` data format). pointwise_conv_filters: Integer, the dimensionality of the output space (i.e. the number output of filters in the pointwise convolution). alpha: controls the width of the network. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `filters_in * depth_multiplier`. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. block_id: Integer, a unique identification designating the block number. Input shape: 4D tensor with shape: `(batch, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, rows, cols, channels)` if data_format='channels_last'. Output shape: 4D tensor with shape: `(batch, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to stride. Returns: Output tensor of block. """ channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 pointwise_conv_filters = int(pointwise_conv_filters * alpha) x = DepthwiseConv2D( # pylint: disable=not-callable (3, 3), padding='same', depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name='conv_dw_%d' % block_id)(inputs) x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x) x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x) x = Conv2D( pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%d' % block_id)(x) x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x) return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
apache-2.0
samthor/intellij-community
python/lib/Lib/rfc822.py
89
33167
"""RFC 2822 message manipulation. Note: This is only a very rough sketch of a full RFC-822 parser; in particular the tokenizing of addresses does not adhere to all the quoting rules. Note: RFC 2822 is a long awaited update to RFC 822. This module should conform to RFC 2822, and is thus mis-named (it's not worth renaming it). Some effort at RFC 2822 updates have been made, but a thorough audit has not been performed. Consider any RFC 2822 non-conformance to be a bug. RFC 2822: http://www.faqs.org/rfcs/rfc2822.html RFC 822 : http://www.faqs.org/rfcs/rfc822.html (obsolete) Directions for use: To create a Message object: first open a file, e.g.: fp = open(file, 'r') You can use any other legal way of getting an open file object, e.g. use sys.stdin or call os.popen(). Then pass the open file object to the Message() constructor: m = Message(fp) This class can work with any input object that supports a readline method. If the input object has seek and tell capability, the rewindbody method will work; also illegal lines will be pushed back onto the input stream. If the input object lacks seek but has an `unread' method that can push back a line of input, Message will use that to push back illegal lines. Thus this class can be used to parse messages coming from a buffered stream. The optional `seekable' argument is provided as a workaround for certain stdio libraries in which tell() discards buffered data before discovering that the lseek() system call doesn't work. For maximum portability, you should set the seekable argument to zero to prevent that initial \code{tell} when passing in an unseekable object such as a a file object created from a socket object. If it is 1 on entry -- which it is by default -- the tell() method of the open file object is called once; if this raises an exception, seekable is reset to 0. For other nonzero values of seekable, this test is not made. To get the text of a particular header there are several methods: str = m.getheader(name) str = m.getrawheader(name) where name is the name of the header, e.g. 'Subject'. The difference is that getheader() strips the leading and trailing whitespace, while getrawheader() doesn't. Both functions retain embedded whitespace (including newlines) exactly as they are specified in the header, and leave the case of the text unchanged. For addresses and address lists there are functions realname, mailaddress = m.getaddr(name) list = m.getaddrlist(name) where the latter returns a list of (realname, mailaddr) tuples. There is also a method time = m.getdate(name) which parses a Date-like field and returns a time-compatible tuple, i.e. a tuple such as returned by time.localtime() or accepted by time.mktime(). See the class definition for lower level access methods. There are also some utility functions here. """ # Cleanup and extensions by Eric S. Raymond <esr@thyrsus.com> import time __all__ = ["Message","AddressList","parsedate","parsedate_tz","mktime_tz"] _blanklines = ('\r\n', '\n') # Optimization for islast() class Message: """Represents a single RFC 2822-compliant message.""" def __init__(self, fp, seekable = 1): """Initialize the class instance and read the headers.""" if seekable == 1: # Exercise tell() to make sure it works # (and then assume seek() works, too) try: fp.tell() except (AttributeError, IOError): seekable = 0 self.fp = fp self.seekable = seekable self.startofheaders = None self.startofbody = None # if self.seekable: try: self.startofheaders = self.fp.tell() except IOError: self.seekable = 0 # self.readheaders() # if self.seekable: try: self.startofbody = self.fp.tell() except IOError: self.seekable = 0 def rewindbody(self): """Rewind the file to the start of the body (if seekable).""" if not self.seekable: raise IOError, "unseekable file" self.fp.seek(self.startofbody) def readheaders(self): """Read header lines. Read header lines up to the entirely blank line that terminates them. The (normally blank) line that ends the headers is skipped, but not included in the returned list. If a non-header line ends the headers, (which is an error), an attempt is made to backspace over it; it is never included in the returned list. The variable self.status is set to the empty string if all went well, otherwise it is an error message. The variable self.headers is a completely uninterpreted list of lines contained in the header (so printing them will reproduce the header exactly as it appears in the file). """ self.dict = {} self.unixfrom = '' self.headers = lst = [] self.status = '' headerseen = "" firstline = 1 startofline = unread = tell = None if hasattr(self.fp, 'unread'): unread = self.fp.unread elif self.seekable: tell = self.fp.tell while 1: if tell: try: startofline = tell() except IOError: startofline = tell = None self.seekable = 0 line = self.fp.readline() if not line: self.status = 'EOF in headers' break # Skip unix From name time lines if firstline and line.startswith('From '): self.unixfrom = self.unixfrom + line continue firstline = 0 if headerseen and line[0] in ' \t': # It's a continuation line. lst.append(line) x = (self.dict[headerseen] + "\n " + line.strip()) self.dict[headerseen] = x.strip() continue elif self.iscomment(line): # It's a comment. Ignore it. continue elif self.islast(line): # Note! No pushback here! The delimiter line gets eaten. break headerseen = self.isheader(line) if headerseen: # It's a legal header line, save it. lst.append(line) self.dict[headerseen] = line[len(headerseen)+1:].strip() continue else: # It's not a header line; throw it back and stop here. if not self.dict: self.status = 'No headers' else: self.status = 'Non-header line where header expected' # Try to undo the read. if unread: unread(line) elif tell: self.fp.seek(startofline) else: self.status = self.status + '; bad seek' break def isheader(self, line): """Determine whether a given line is a legal header. This method should return the header name, suitably canonicalized. You may override this method in order to use Message parsing on tagged data in RFC 2822-like formats with special header formats. """ i = line.find(':') if i > 0: return line[:i].lower() return None def islast(self, line): """Determine whether a line is a legal end of RFC 2822 headers. You may override this method if your application wants to bend the rules, e.g. to strip trailing whitespace, or to recognize MH template separators ('--------'). For convenience (e.g. for code reading from sockets) a line consisting of \r\n also matches. """ return line in _blanklines def iscomment(self, line): """Determine whether a line should be skipped entirely. You may override this method in order to use Message parsing on tagged data in RFC 2822-like formats that support embedded comments or free-text data. """ return False def getallmatchingheaders(self, name): """Find all header lines matching a given header name. Look through the list of headers and find all lines matching a given header name (and their continuation lines). A list of the lines is returned, without interpretation. If the header does not occur, an empty list is returned. If the header occurs multiple times, all occurrences are returned. Case is not important in the header name. """ name = name.lower() + ':' n = len(name) lst = [] hit = 0 for line in self.headers: if line[:n].lower() == name: hit = 1 elif not line[:1].isspace(): hit = 0 if hit: lst.append(line) return lst def getfirstmatchingheader(self, name): """Get the first header line matching name. This is similar to getallmatchingheaders, but it returns only the first matching header (and its continuation lines). """ name = name.lower() + ':' n = len(name) lst = [] hit = 0 for line in self.headers: if hit: if not line[:1].isspace(): break elif line[:n].lower() == name: hit = 1 if hit: lst.append(line) return lst def getrawheader(self, name): """A higher-level interface to getfirstmatchingheader(). Return a string containing the literal text of the header but with the keyword stripped. All leading, trailing and embedded whitespace is kept in the string, however. Return None if the header does not occur. """ lst = self.getfirstmatchingheader(name) if not lst: return None lst[0] = lst[0][len(name) + 1:] return ''.join(lst) def getheader(self, name, default=None): """Get the header value for a name. This is the normal interface: it returns a stripped version of the header value for a given header name, or None if it doesn't exist. This uses the dictionary version which finds the *last* such header. """ return self.dict.get(name.lower(), default) get = getheader def getheaders(self, name): """Get all values for a header. This returns a list of values for headers given more than once; each value in the result list is stripped in the same way as the result of getheader(). If the header is not given, return an empty list. """ result = [] current = '' have_header = 0 for s in self.getallmatchingheaders(name): if s[0].isspace(): if current: current = "%s\n %s" % (current, s.strip()) else: current = s.strip() else: if have_header: result.append(current) current = s[s.find(":") + 1:].strip() have_header = 1 if have_header: result.append(current) return result def getaddr(self, name): """Get a single address from a header, as a tuple. An example return value: ('Guido van Rossum', 'guido@cwi.nl') """ # New, by Ben Escoto alist = self.getaddrlist(name) if alist: return alist[0] else: return (None, None) def getaddrlist(self, name): """Get a list of addresses from a header. Retrieves a list of addresses from a header, where each address is a tuple as returned by getaddr(). Scans all named headers, so it works properly with multiple To: or Cc: headers for example. """ raw = [] for h in self.getallmatchingheaders(name): if h[0] in ' \t': raw.append(h) else: if raw: raw.append(', ') i = h.find(':') if i > 0: addr = h[i+1:] raw.append(addr) alladdrs = ''.join(raw) a = AddressList(alladdrs) return a.addresslist def getdate(self, name): """Retrieve a date field from a header. Retrieves a date field from the named header, returning a tuple compatible with time.mktime(). """ try: data = self[name] except KeyError: return None return parsedate(data) def getdate_tz(self, name): """Retrieve a date field from a header as a 10-tuple. The first 9 elements make up a tuple compatible with time.mktime(), and the 10th is the offset of the poster's time zone from GMT/UTC. """ try: data = self[name] except KeyError: return None return parsedate_tz(data) # Access as a dictionary (only finds *last* header of each type): def __len__(self): """Get the number of headers in a message.""" return len(self.dict) def __getitem__(self, name): """Get a specific header, as from a dictionary.""" return self.dict[name.lower()] def __setitem__(self, name, value): """Set the value of a header. Note: This is not a perfect inversion of __getitem__, because any changed headers get stuck at the end of the raw-headers list rather than where the altered header was. """ del self[name] # Won't fail if it doesn't exist self.dict[name.lower()] = value text = name + ": " + value for line in text.split("\n"): self.headers.append(line + "\n") def __delitem__(self, name): """Delete all occurrences of a specific header, if it is present.""" name = name.lower() if not name in self.dict: return del self.dict[name] name = name + ':' n = len(name) lst = [] hit = 0 for i in range(len(self.headers)): line = self.headers[i] if line[:n].lower() == name: hit = 1 elif not line[:1].isspace(): hit = 0 if hit: lst.append(i) for i in reversed(lst): del self.headers[i] def setdefault(self, name, default=""): lowername = name.lower() if lowername in self.dict: return self.dict[lowername] else: text = name + ": " + default for line in text.split("\n"): self.headers.append(line + "\n") self.dict[lowername] = default return default def has_key(self, name): """Determine whether a message contains the named header.""" return name.lower() in self.dict def __contains__(self, name): """Determine whether a message contains the named header.""" return name.lower() in self.dict def __iter__(self): return iter(self.dict) def keys(self): """Get all of a message's header field names.""" return self.dict.keys() def values(self): """Get all of a message's header field values.""" return self.dict.values() def items(self): """Get all of a message's headers. Returns a list of name, value tuples. """ return self.dict.items() def __str__(self): return ''.join(self.headers) # Utility functions # ----------------- # XXX Should fix unquote() and quote() to be really conformant. # XXX The inverses of the parse functions may also be useful. def unquote(s): """Remove quotes from a string.""" if len(s) > 1: if s.startswith('"') and s.endswith('"'): return s[1:-1].replace('\\\\', '\\').replace('\\"', '"') if s.startswith('<') and s.endswith('>'): return s[1:-1] return s def quote(s): """Add quotes around a string.""" return s.replace('\\', '\\\\').replace('"', '\\"') def parseaddr(address): """Parse an address into a (realname, mailaddr) tuple.""" a = AddressList(address) lst = a.addresslist if not lst: return (None, None) return lst[0] class AddrlistClass: """Address parser class by Ben Escoto. To understand what this class does, it helps to have a copy of RFC 2822 in front of you. http://www.faqs.org/rfcs/rfc2822.html Note: this class interface is deprecated and may be removed in the future. Use rfc822.AddressList instead. """ def __init__(self, field): """Initialize a new instance. `field' is an unparsed address header field, containing one or more addresses. """ self.specials = '()<>@,:;.\"[]' self.pos = 0 self.LWS = ' \t' self.CR = '\r\n' self.atomends = self.specials + self.LWS + self.CR # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it # is obsolete syntax. RFC 2822 requires that we recognize obsolete # syntax, so allow dots in phrases. self.phraseends = self.atomends.replace('.', '') self.field = field self.commentlist = [] def gotonext(self): """Parse up to the start of the next address.""" while self.pos < len(self.field): if self.field[self.pos] in self.LWS + '\n\r': self.pos = self.pos + 1 elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) else: break def getaddrlist(self): """Parse all addresses. Returns a list containing all of the addresses. """ result = [] ad = self.getaddress() while ad: result += ad ad = self.getaddress() return result def getaddress(self): """Parse the next address.""" self.commentlist = [] self.gotonext() oldpos = self.pos oldcl = self.commentlist plist = self.getphraselist() self.gotonext() returnlist = [] if self.pos >= len(self.field): # Bad email address technically, no domain. if plist: returnlist = [(' '.join(self.commentlist), plist[0])] elif self.field[self.pos] in '.@': # email address is just an addrspec # this isn't very efficient since we start over self.pos = oldpos self.commentlist = oldcl addrspec = self.getaddrspec() returnlist = [(' '.join(self.commentlist), addrspec)] elif self.field[self.pos] == ':': # address is a group returnlist = [] fieldlen = len(self.field) self.pos += 1 while self.pos < len(self.field): self.gotonext() if self.pos < fieldlen and self.field[self.pos] == ';': self.pos += 1 break returnlist = returnlist + self.getaddress() elif self.field[self.pos] == '<': # Address is a phrase then a route addr routeaddr = self.getrouteaddr() if self.commentlist: returnlist = [(' '.join(plist) + ' (' + \ ' '.join(self.commentlist) + ')', routeaddr)] else: returnlist = [(' '.join(plist), routeaddr)] else: if plist: returnlist = [(' '.join(self.commentlist), plist[0])] elif self.field[self.pos] in self.specials: self.pos += 1 self.gotonext() if self.pos < len(self.field) and self.field[self.pos] == ',': self.pos += 1 return returnlist def getrouteaddr(self): """Parse a route address (Return-path value). This method just skips all the route stuff and returns the addrspec. """ if self.field[self.pos] != '<': return expectroute = 0 self.pos += 1 self.gotonext() adlist = "" while self.pos < len(self.field): if expectroute: self.getdomain() expectroute = 0 elif self.field[self.pos] == '>': self.pos += 1 break elif self.field[self.pos] == '@': self.pos += 1 expectroute = 1 elif self.field[self.pos] == ':': self.pos += 1 else: adlist = self.getaddrspec() self.pos += 1 break self.gotonext() return adlist def getaddrspec(self): """Parse an RFC 2822 addr-spec.""" aslist = [] self.gotonext() while self.pos < len(self.field): if self.field[self.pos] == '.': aslist.append('.') self.pos += 1 elif self.field[self.pos] == '"': aslist.append('"%s"' % self.getquote()) elif self.field[self.pos] in self.atomends: break else: aslist.append(self.getatom()) self.gotonext() if self.pos >= len(self.field) or self.field[self.pos] != '@': return ''.join(aslist) aslist.append('@') self.pos += 1 self.gotonext() return ''.join(aslist) + self.getdomain() def getdomain(self): """Get the complete domain name from an address.""" sdlist = [] while self.pos < len(self.field): if self.field[self.pos] in self.LWS: self.pos += 1 elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] == '[': sdlist.append(self.getdomainliteral()) elif self.field[self.pos] == '.': self.pos += 1 sdlist.append('.') elif self.field[self.pos] in self.atomends: break else: sdlist.append(self.getatom()) return ''.join(sdlist) def getdelimited(self, beginchar, endchars, allowcomments = 1): """Parse a header fragment delimited by special characters. `beginchar' is the start character for the fragment. If self is not looking at an instance of `beginchar' then getdelimited returns the empty string. `endchars' is a sequence of allowable end-delimiting characters. Parsing stops when one of these is encountered. If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed within the parsed fragment. """ if self.field[self.pos] != beginchar: return '' slist = [''] quote = 0 self.pos += 1 while self.pos < len(self.field): if quote == 1: slist.append(self.field[self.pos]) quote = 0 elif self.field[self.pos] in endchars: self.pos += 1 break elif allowcomments and self.field[self.pos] == '(': slist.append(self.getcomment()) continue # have already advanced pos from getcomment elif self.field[self.pos] == '\\': quote = 1 else: slist.append(self.field[self.pos]) self.pos += 1 return ''.join(slist) def getquote(self): """Get a quote-delimited fragment from self's field.""" return self.getdelimited('"', '"\r', 0) def getcomment(self): """Get a parenthesis-delimited fragment from self's field.""" return self.getdelimited('(', ')\r', 1) def getdomainliteral(self): """Parse an RFC 2822 domain-literal.""" return '[%s]' % self.getdelimited('[', ']\r', 0) def getatom(self, atomends=None): """Parse an RFC 2822 atom. Optional atomends specifies a different set of end token delimiters (the default is to use self.atomends). This is used e.g. in getphraselist() since phrase endings must not include the `.' (which is legal in phrases).""" atomlist = [''] if atomends is None: atomends = self.atomends while self.pos < len(self.field): if self.field[self.pos] in atomends: break else: atomlist.append(self.field[self.pos]) self.pos += 1 return ''.join(atomlist) def getphraselist(self): """Parse a sequence of RFC 2822 phrases. A phrase is a sequence of words, which are in turn either RFC 2822 atoms or quoted-strings. Phrases are canonicalized by squeezing all runs of continuous whitespace into one space. """ plist = [] while self.pos < len(self.field): if self.field[self.pos] in self.LWS: self.pos += 1 elif self.field[self.pos] == '"': plist.append(self.getquote()) elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] in self.phraseends: break else: plist.append(self.getatom(self.phraseends)) return plist class AddressList(AddrlistClass): """An AddressList encapsulates a list of parsed RFC 2822 addresses.""" def __init__(self, field): AddrlistClass.__init__(self, field) if field: self.addresslist = self.getaddrlist() else: self.addresslist = [] def __len__(self): return len(self.addresslist) def __str__(self): return ", ".join(map(dump_address_pair, self.addresslist)) def __add__(self, other): # Set union newaddr = AddressList(None) newaddr.addresslist = self.addresslist[:] for x in other.addresslist: if not x in self.addresslist: newaddr.addresslist.append(x) return newaddr def __iadd__(self, other): # Set union, in-place for x in other.addresslist: if not x in self.addresslist: self.addresslist.append(x) return self def __sub__(self, other): # Set difference newaddr = AddressList(None) for x in self.addresslist: if not x in other.addresslist: newaddr.addresslist.append(x) return newaddr def __isub__(self, other): # Set difference, in-place for x in other.addresslist: if x in self.addresslist: self.addresslist.remove(x) return self def __getitem__(self, index): # Make indexing, slices, and 'in' work return self.addresslist[index] def dump_address_pair(pair): """Dump a (name, address) pair in a canonicalized form.""" if pair[0]: return '"' + pair[0] + '" <' + pair[1] + '>' else: return pair[1] # Parse a date field _monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec', 'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december'] _daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] # The timezone table does not include the military time zones defined # in RFC822, other than Z. According to RFC1123, the description in # RFC822 gets the signs wrong, so we can't rely on any such time # zones. RFC1123 recommends that numeric timezone indicators be used # instead of timezone names. _timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0, 'AST': -400, 'ADT': -300, # Atlantic (used in Canada) 'EST': -500, 'EDT': -400, # Eastern 'CST': -600, 'CDT': -500, # Central 'MST': -700, 'MDT': -600, # Mountain 'PST': -800, 'PDT': -700 # Pacific } def parsedate_tz(data): """Convert a date string to a time tuple. Accounts for military timezones. """ if not data: return None data = data.split() if data[0][-1] in (',', '.') or data[0].lower() in _daynames: # There's a dayname here. Skip it del data[0] else: # no space after the "weekday,"? i = data[0].rfind(',') if i >= 0: data[0] = data[0][i+1:] if len(data) == 3: # RFC 850 date, deprecated stuff = data[0].split('-') if len(stuff) == 3: data = stuff + data[1:] if len(data) == 4: s = data[3] i = s.find('+') if i > 0: data[3:] = [s[:i], s[i+1:]] else: data.append('') # Dummy tz if len(data) < 5: return None data = data[:5] [dd, mm, yy, tm, tz] = data mm = mm.lower() if not mm in _monthnames: dd, mm = mm, dd.lower() if not mm in _monthnames: return None mm = _monthnames.index(mm)+1 if mm > 12: mm = mm - 12 if dd[-1] == ',': dd = dd[:-1] i = yy.find(':') if i > 0: yy, tm = tm, yy if yy[-1] == ',': yy = yy[:-1] if not yy[0].isdigit(): yy, tz = tz, yy if tm[-1] == ',': tm = tm[:-1] tm = tm.split(':') if len(tm) == 2: [thh, tmm] = tm tss = '0' elif len(tm) == 3: [thh, tmm, tss] = tm else: return None try: yy = int(yy) dd = int(dd) thh = int(thh) tmm = int(tmm) tss = int(tss) except ValueError: return None tzoffset = None tz = tz.upper() if tz in _timezones: tzoffset = _timezones[tz] else: try: tzoffset = int(tz) except ValueError: pass # Convert a timezone offset into seconds ; -0500 -> -18000 if tzoffset: if tzoffset < 0: tzsign = -1 tzoffset = -tzoffset else: tzsign = 1 tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60) return (yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset) def parsedate(data): """Convert a time string to a time tuple.""" t = parsedate_tz(data) if t is None: return t return t[:9] def mktime_tz(data): """Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp.""" if data[9] is None: # No zone info, so localtime is better assumption than GMT return time.mktime(data[:8] + (-1,)) else: t = time.mktime(data[:8] + (0,)) return t - data[9] - time.timezone def formatdate(timeval=None): """Returns time format preferred for Internet standards. Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 According to RFC 1123, day and month names must always be in English. If not for that, this code could use strftime(). It can't because strftime() honors the locale and could generated non-English names. """ if timeval is None: timeval = time.time() timeval = time.gmtime(timeval) return "%s, %02d %s %04d %02d:%02d:%02d GMT" % ( ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[timeval[6]], timeval[2], ("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")[timeval[1]-1], timeval[0], timeval[3], timeval[4], timeval[5]) # When used as script, run a small test program. # The first command line argument must be a filename containing one # message in RFC-822 format. if __name__ == '__main__': import sys, os file = os.path.join(os.environ['HOME'], 'Mail/inbox/1') if sys.argv[1:]: file = sys.argv[1] f = open(file, 'r') m = Message(f) print 'From:', m.getaddr('from') print 'To:', m.getaddrlist('to') print 'Subject:', m.getheader('subject') print 'Date:', m.getheader('date') date = m.getdate_tz('date') tz = date[-1] date = time.localtime(mktime_tz(date)) if date: print 'ParsedDate:', time.asctime(date), hhmmss = tz hhmm, ss = divmod(hhmmss, 60) hh, mm = divmod(hhmm, 60) print "%+03d%02d" % (hh, mm), if ss: print ".%02d" % ss, print else: print 'ParsedDate:', None m.rewindbody() n = 0 while f.readline(): n += 1 print 'Lines:', n print '-'*70 print 'len =', len(m) if 'Date' in m: print 'Date =', m['Date'] if 'X-Nonsense' in m: pass print 'keys =', m.keys() print 'values =', m.values() print 'items =', m.items()
apache-2.0
tersmitten/ansible
lib/ansible/modules/network/cloudengine/ce_sflow.py
32
51332
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: ce_sflow version_added: "2.4" short_description: Manages sFlow configuration on HUAWEI CloudEngine switches. description: - Configure Sampled Flow (sFlow) to monitor traffic on an interface in real time, detect abnormal traffic, and locate the source of attack traffic, ensuring stable running of the network. author: QijunPan (@QijunPan) options: agent_ip: description: - Specifies the IPv4/IPv6 address of an sFlow agent. source_ip: description: - Specifies the source IPv4/IPv6 address of sFlow packets. collector_id: description: - Specifies the ID of an sFlow collector. This ID is used when you specify the collector in subsequent sFlow configuration. choices: ['1', '2'] collector_ip: description: - Specifies the IPv4/IPv6 address of the sFlow collector. collector_ip_vpn: description: - Specifies the name of a VPN instance. The value is a string of 1 to 31 case-sensitive characters, spaces not supported. When double quotation marks are used around the string, spaces are allowed in the string. The value C(_public_) is reserved and cannot be used as the VPN instance name. collector_datagram_size: description: - Specifies the maximum length of sFlow packets sent from an sFlow agent to an sFlow collector. The value is an integer, in bytes. It ranges from 1024 to 8100. The default value is 1400. collector_udp_port: description: - Specifies the UDP destination port number of sFlow packets. The value is an integer that ranges from 1 to 65535. The default value is 6343. collector_meth: description: - Configures the device to send sFlow packets through service interfaces, enhancing the sFlow packet forwarding capability. The enhanced parameter is optional. No matter whether you configure the enhanced mode, the switch determines to send sFlow packets through service cards or management port based on the routing information on the collector. When the value is meth, the device forwards sFlow packets at the control plane. When the value is enhanced, the device forwards sFlow packets at the forwarding plane to enhance the sFlow packet forwarding capacity. choices: ['meth', 'enhanced'] collector_description: description: - Specifies the description of an sFlow collector. The value is a string of 1 to 255 case-sensitive characters without spaces. sflow_interface: description: - Full name of interface for Flow Sampling or Counter. It must be a physical interface, Eth-Trunk, or Layer 2 subinterface. sample_collector: description: - Indicates the ID list of the collector. sample_rate: description: - Specifies the flow sampling rate in the format 1/rate. The value is an integer and ranges from 1 to 4294967295. The default value is 8192. sample_length: description: - Specifies the maximum length of sampled packets. The value is an integer and ranges from 18 to 512, in bytes. The default value is 128. sample_direction: description: - Enables flow sampling in the inbound or outbound direction. choices: ['inbound', 'outbound', 'both'] counter_collector: description: - Indicates the ID list of the counter collector. counter_interval: description: - Indicates the counter sampling interval. The value is an integer that ranges from 10 to 4294967295, in seconds. The default value is 20. export_route: description: - Configures the sFlow packets sent by the switch not to carry routing information. choices: ['enable', 'disable'] rate_limit: description: - Specifies the rate of sFlow packets sent from a card to the control plane. The value is an integer that ranges from 100 to 1500, in pps. rate_limit_slot: description: - Specifies the slot where the rate of output sFlow packets is limited. If this parameter is not specified, the rate of sFlow packets sent from all cards to the control plane is limited. The value is an integer or a string of characters. forward_enp_slot: description: - Enable the Embedded Network Processor (ENP) chip function. The switch uses the ENP chip to perform sFlow sampling, and the maximum sFlow sampling interval is 65535. If you set the sampling interval to be larger than 65535, the switch automatically restores it to 65535. The value is an integer or 'all'. state: description: - Determines whether the config should be present or not on the device. default: present choices: ['present', 'absent'] """ EXAMPLES = ''' --- - name: sflow module test hosts: ce128 connection: local gather_facts: no vars: cli: host: "{{ inventory_hostname }}" port: "{{ ansible_ssh_port }}" username: "{{ username }}" password: "{{ password }}" transport: cli tasks: - name: Configuring sFlow Agent ce_sflow: agent_ip: 6.6.6.6 provider: '{{ cli }}' - name: Configuring sFlow Collector ce_sflow: collector_id: 1 collector_ip: 7.7.7.7 collector_ip_vpn: vpn1 collector_description: Collector1 provider: '{{ cli }}' - name: Configure flow sampling. ce_sflow: sflow_interface: 10GE2/0/2 sample_collector: 1 sample_direction: inbound provider: '{{ cli }}' - name: Configure counter sampling. ce_sflow: sflow_interface: 10GE2/0/2 counter_collector: 1 counter_interval: 1000 provider: '{{ cli }}' ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: verbose mode type: dict sample: {"agent_ip": "6.6.6.6", "state": "present"} existing: description: k/v pairs of existing configuration returned: verbose mode type: dict sample: {"agent": {}} end_state: description: k/v pairs of configuration after module execution returned: verbose mode type: dict sample: {"agent": {"family": "ipv4", "ipv4Addr": "1.2.3.4", "ipv6Addr": null}} updates: description: commands sent to the device returned: always type: list sample: ["sflow agent ip 6.6.6.6"] changed: description: check to see if a change was made on the device returned: always type: bool sample: true ''' import re from xml.etree import ElementTree from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr from ansible.module_utils.network.cloudengine.ce import get_config, load_config CE_NC_GET_SFLOW = """ <filter type="subtree"> <sflow xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> <sources> <source> <family></family> <ipv4Addr></ipv4Addr> <ipv6Addr></ipv6Addr> </source> </sources> <agents> <agent> <family></family> <ipv4Addr></ipv4Addr> <ipv6Addr></ipv6Addr> </agent> </agents> <collectors> <collector> <collectorID></collectorID> <family></family> <ipv4Addr></ipv4Addr> <ipv6Addr></ipv6Addr> <vrfName></vrfName> <datagramSize></datagramSize> <port></port> <description></description> <meth></meth> </collector> </collectors> <samplings> <sampling> <ifName>%s</ifName> <collectorID></collectorID> <direction></direction> <length></length> <rate></rate> </sampling> </samplings> <counters> <counter> <ifName>%s</ifName> <collectorID></collectorID> <interval></interval> </counter> </counters> <exports> <export> <ExportRoute></ExportRoute> </export> </exports> </sflow> </filter> """ def is_config_exist(cmp_cfg, test_cfg): """is configuration exist?""" if not cmp_cfg or not test_cfg: return False return bool(test_cfg in cmp_cfg) def is_valid_ip_vpn(vpname): """check ip vpn""" if not vpname: return False if vpname == "_public_": return False if len(vpname) < 1 or len(vpname) > 31: return False return True def get_ip_version(address): """get ip version fast""" if not address: return None if address.count(':') >= 2 and address.count(":") <= 7: return "ipv6" elif address.count('.') == 3: return "ipv4" else: return None def get_interface_type(interface): """get the type of interface, such as 10GE, ETH-TRUNK, VLANIF...""" if interface is None: return None if interface.upper().startswith('GE'): iftype = 'ge' elif interface.upper().startswith('10GE'): iftype = '10ge' elif interface.upper().startswith('25GE'): iftype = '25ge' elif interface.upper().startswith('4X10GE'): iftype = '4x10ge' elif interface.upper().startswith('40GE'): iftype = '40ge' elif interface.upper().startswith('100GE'): iftype = '100ge' elif interface.upper().startswith('VLANIF'): iftype = 'vlanif' elif interface.upper().startswith('LOOPBACK'): iftype = 'loopback' elif interface.upper().startswith('METH'): iftype = 'meth' elif interface.upper().startswith('ETH-TRUNK'): iftype = 'eth-trunk' elif interface.upper().startswith('VBDIF'): iftype = 'vbdif' elif interface.upper().startswith('NVE'): iftype = 'nve' elif interface.upper().startswith('TUNNEL'): iftype = 'tunnel' elif interface.upper().startswith('ETHERNET'): iftype = 'ethernet' elif interface.upper().startswith('FCOE-PORT'): iftype = 'fcoe-port' elif interface.upper().startswith('FABRIC-PORT'): iftype = 'fabric-port' elif interface.upper().startswith('STACK-PORT'): iftype = 'stack-port' elif interface.upper().startswith('NULL'): iftype = 'null' else: return None return iftype.lower() def get_rate_limit(config): """get sflow management-plane export rate-limit info""" get = re.findall(r"sflow management-plane export rate-limit ([0-9]+) slot ([0-9]+)", config) if not get: get = re.findall(r"sflow management-plane export rate-limit ([0-9]+)", config) if not get: return None else: return dict(rate_limit=get[0]) else: limit = list() for slot in get: limit.append(dict(rate_limit=slot[0], slot_id=slot[1])) return limit def get_forward_enp(config): """get assign forward enp sflow enable slot info""" get = re.findall(r"assign forward enp sflow enable slot (\S+)", config) if not get: return None else: return list(get) class Sflow(object): """Manages sFlow""" def __init__(self, argument_spec): self.spec = argument_spec self.module = None self.__init_module__() # module input info self.agent_ip = self.module.params['agent_ip'] self.agent_version = None self.source_ip = self.module.params['source_ip'] self.source_version = None self.export_route = self.module.params['export_route'] self.rate_limit = self.module.params['rate_limit'] self.rate_limit_slot = self.module.params['rate_limit_slot'] self.forward_enp_slot = self.module.params['forward_enp_slot'] self.collector_id = self.module.params['collector_id'] self.collector_ip = self.module.params['collector_ip'] self.collector_version = None self.collector_ip_vpn = self.module.params['collector_ip_vpn'] self.collector_datagram_size = self.module.params['collector_datagram_size'] self.collector_udp_port = self.module.params['collector_udp_port'] self.collector_meth = self.module.params['collector_meth'] self.collector_description = self.module.params['collector_description'] self.sflow_interface = self.module.params['sflow_interface'] self.sample_collector = self.module.params['sample_collector'] or list() self.sample_rate = self.module.params['sample_rate'] self.sample_length = self.module.params['sample_length'] self.sample_direction = self.module.params['sample_direction'] self.counter_collector = self.module.params['counter_collector'] or list() self.counter_interval = self.module.params['counter_interval'] self.state = self.module.params['state'] # state self.config = "" # current config self.sflow_dict = dict() self.changed = False self.updates_cmd = list() self.commands = list() self.results = dict() self.proposed = dict() self.existing = dict() self.end_state = dict() def __init_module__(self): """init module""" required_together = [("collector_id", "collector_ip")] self.module = AnsibleModule( argument_spec=self.spec, required_together=required_together, supports_check_mode=True) def check_response(self, con_obj, xml_name): """Check if response message is already succeed""" xml_str = con_obj.xml if "<ok/>" not in xml_str: self.module.fail_json(msg='Error: %s failed.' % xml_name) def netconf_set_config(self, xml_str, xml_name): """netconf set config""" rcv_xml = set_nc_config(self.module, xml_str) if "<ok/>" not in rcv_xml: self.module.fail_json(msg='Error: %s failed.' % xml_name) def cli_load_config(self, commands): """load config by cli""" if not self.module.check_mode: load_config(self.module, commands) def get_current_config(self): """get current configuration""" flags = list() exp = "" if self.rate_limit: exp += "assign sflow management-plane export rate-limit %s" % self.rate_limit if self.rate_limit_slot: exp += " slot %s" % self.rate_limit_slot exp += "$" if self.forward_enp_slot: if exp: exp += "|" exp += "assign forward enp sflow enable slot %s$" % self.forward_enp_slot if exp: exp = " | ignore-case include " + exp flags.append(exp) return get_config(self.module, flags) else: return "" def cli_add_command(self, command, undo=False): """add command to self.update_cmd and self.commands""" if undo and command.lower() not in ["quit", "return"]: cmd = "undo " + command else: cmd = command self.commands.append(cmd) # set to device if command.lower() not in ["quit", "return"]: self.updates_cmd.append(cmd) # show updates result def get_sflow_dict(self): """ sflow config dict""" sflow_dict = dict(source=list(), agent=dict(), collector=list(), sampling=dict(), counter=dict(), export=dict()) conf_str = CE_NC_GET_SFLOW % ( self.sflow_interface, self.sflow_interface) if not self.collector_meth: conf_str = conf_str.replace("<meth></meth>", "") rcv_xml = get_nc_config(self.module, conf_str) if "<data/>" in rcv_xml: return sflow_dict xml_str = rcv_xml.replace('\r', '').replace('\n', '').\ replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\ replace('xmlns="http://www.huawei.com/netconf/vrp"', "") root = ElementTree.fromstring(xml_str) # get source info srcs = root.findall("data/sflow/sources/source") if srcs: for src in srcs: attrs = dict() for attr in src: if attr.tag in ["family", "ipv4Addr", "ipv6Addr"]: attrs[attr.tag] = attr.text sflow_dict["source"].append(attrs) # get agent info agent = root.find("data/sflow/agents/agent") if agent: for attr in agent: if attr.tag in ["family", "ipv4Addr", "ipv6Addr"]: sflow_dict["agent"][attr.tag] = attr.text # get collector info collectors = root.findall("data/sflow/collectors/collector") if collectors: for collector in collectors: attrs = dict() for attr in collector: if attr.tag in ["collectorID", "family", "ipv4Addr", "ipv6Addr", "vrfName", "datagramSize", "port", "description", "meth"]: attrs[attr.tag] = attr.text sflow_dict["collector"].append(attrs) # get sampling info sample = root.find("data/sflow/samplings/sampling") if sample: for attr in sample: if attr.tag in ["ifName", "collectorID", "direction", "length", "rate"]: sflow_dict["sampling"][attr.tag] = attr.text # get counter info counter = root.find("data/sflow/counters/counter") if counter: for attr in counter: if attr.tag in ["ifName", "collectorID", "interval"]: sflow_dict["counter"][attr.tag] = attr.text # get export info export = root.find("data/sflow/exports/export") if export: for attr in export: if attr.tag == "ExportRoute": sflow_dict["export"][attr.tag] = attr.text return sflow_dict def config_agent(self): """configures sFlow agent""" xml_str = '' if not self.agent_ip: return xml_str self.agent_version = get_ip_version(self.agent_ip) if not self.agent_version: self.module.fail_json(msg="Error: agent_ip is invalid.") if self.state == "present": if self.agent_ip != self.sflow_dict["agent"].get("ipv4Addr") \ and self.agent_ip != self.sflow_dict["agent"].get("ipv6Addr"): xml_str += '<agents><agent operation="merge">' xml_str += '<family>%s</family>' % self.agent_version if self.agent_version == "ipv4": xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.agent_ip self.updates_cmd.append("sflow agent ip %s" % self.agent_ip) else: xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.agent_ip self.updates_cmd.append("sflow agent ipv6 %s" % self.agent_ip) xml_str += '</agent></agents>' else: if self.agent_ip == self.sflow_dict["agent"].get("ipv4Addr") \ or self.agent_ip == self.sflow_dict["agent"].get("ipv6Addr"): xml_str += '<agents><agent operation="delete"></agent></agents>' self.updates_cmd.append("undo sflow agent") return xml_str def config_source(self): """configures the source IP address for sFlow packets""" xml_str = '' if not self.source_ip: return xml_str self.source_version = get_ip_version(self.source_ip) if not self.source_version: self.module.fail_json(msg="Error: source_ip is invalid.") src_dict = dict() for src in self.sflow_dict["source"]: if src.get("family") == self.source_version: src_dict = src break if self.state == "present": if self.source_ip != src_dict.get("ipv4Addr") \ and self.source_ip != src_dict.get("ipv6Addr"): xml_str += '<sources><source operation="merge">' xml_str += '<family>%s</family>' % self.source_version if self.source_version == "ipv4": xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.source_ip self.updates_cmd.append("sflow source ip %s" % self.source_ip) else: xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.source_ip self.updates_cmd.append( "sflow source ipv6 %s" % self.source_ip) xml_str += '</source ></sources>' else: if self.source_ip == src_dict.get("ipv4Addr"): xml_str += '<sources><source operation="delete"><family>ipv4</family></source ></sources>' self.updates_cmd.append("undo sflow source ip %s" % self.source_ip) elif self.source_ip == src_dict.get("ipv6Addr"): xml_str += '<sources><source operation="delete"><family>ipv6</family></source ></sources>' self.updates_cmd.append("undo sflow source ipv6 %s" % self.source_ip) return xml_str def config_collector(self): """creates an sFlow collector and sets or modifies optional parameters for the sFlow collector""" xml_str = '' if not self.collector_id: return xml_str if self.state == "present" and not self.collector_ip: return xml_str if self.collector_ip: self.collector_version = get_ip_version(self.collector_ip) if not self.collector_version: self.module.fail_json(msg="Error: collector_ip is invalid.") # get collector dict exist_dict = dict() for collector in self.sflow_dict["collector"]: if collector.get("collectorID") == self.collector_id: exist_dict = collector break change = False if self.state == "present": if not exist_dict: change = True elif self.collector_version != exist_dict.get("family"): change = True elif self.collector_version == "ipv4" and self.collector_ip != exist_dict.get("ipv4Addr"): change = True elif self.collector_version == "ipv6" and self.collector_ip != exist_dict.get("ipv6Addr"): change = True elif self.collector_ip_vpn and self.collector_ip_vpn != exist_dict.get("vrfName"): change = True elif not self.collector_ip_vpn and exist_dict.get("vrfName") != "_public_": change = True elif self.collector_udp_port and self.collector_udp_port != exist_dict.get("port"): change = True elif not self.collector_udp_port and exist_dict.get("port") != "6343": change = True elif self.collector_datagram_size and self.collector_datagram_size != exist_dict.get("datagramSize"): change = True elif not self.collector_datagram_size and exist_dict.get("datagramSize") != "1400": change = True elif self.collector_meth and self.collector_meth != exist_dict.get("meth"): change = True elif not self.collector_meth and exist_dict.get("meth") and exist_dict.get("meth") != "meth": change = True elif self.collector_description and self.collector_description != exist_dict.get("description"): change = True elif not self.collector_description and exist_dict.get("description"): change = True else: pass else: # absent # collector not exist if not exist_dict: return xml_str if self.collector_version and self.collector_version != exist_dict.get("family"): return xml_str if self.collector_version == "ipv4" and self.collector_ip != exist_dict.get("ipv4Addr"): return xml_str if self.collector_version == "ipv6" and self.collector_ip != exist_dict.get("ipv6Addr"): return xml_str if self.collector_ip_vpn and self.collector_ip_vpn != exist_dict.get("vrfName"): return xml_str if self.collector_udp_port and self.collector_udp_port != exist_dict.get("port"): return xml_str if self.collector_datagram_size and self.collector_datagram_size != exist_dict.get("datagramSize"): return xml_str if self.collector_meth and self.collector_meth != exist_dict.get("meth"): return xml_str if self.collector_description and self.collector_description != exist_dict.get("description"): return xml_str change = True if not change: return xml_str # update or delete if self.state == "absent": xml_str += '<collectors><collector operation="delete"><collectorID>%s</collectorID>' % self.collector_id self.updates_cmd.append("undo collector %s" % self.collector_id) else: xml_str += '<collectors><collector operation="merge"><collectorID>%s</collectorID>' % self.collector_id cmd = "sflow collector %s" % self.collector_id xml_str += '<family>%s</family>' % self.collector_version if self.collector_version == "ipv4": cmd += " ip %s" % self.collector_ip xml_str += '<ipv4Addr>%s</ipv4Addr>' % self.collector_ip else: cmd += " ipv6 %s" % self.collector_ip xml_str += '<ipv6Addr>%s</ipv6Addr>' % self.collector_ip if self.collector_ip_vpn: cmd += " vpn-instance %s" % self.collector_ip_vpn xml_str += '<vrfName>%s</vrfName>' % self.collector_ip_vpn if self.collector_datagram_size: cmd += " length %s" % self.collector_datagram_size xml_str += '<datagramSize>%s</datagramSize>' % self.collector_datagram_size if self.collector_udp_port: cmd += " udp-port %s" % self.collector_udp_port xml_str += '<port>%s</port>' % self.collector_udp_port if self.collector_description: cmd += " description %s" % self.collector_description xml_str += '<description>%s</description>' % self.collector_description else: xml_str += '<description></description>' if self.collector_meth: if self.collector_meth == "enhanced": cmd += " enhanced" xml_str += '<meth>%s</meth>' % self.collector_meth self.updates_cmd.append(cmd) xml_str += "</collector></collectors>" return xml_str def config_sampling(self): """configure sflow sampling on an interface""" xml_str = '' if not self.sflow_interface: return xml_str if not self.sflow_dict["sampling"] and self.state == "absent": return xml_str self.updates_cmd.append("interface %s" % self.sflow_interface) if self.state == "present": xml_str += '<samplings><sampling operation="merge"><ifName>%s</ifName>' % self.sflow_interface else: xml_str += '<samplings><sampling operation="delete"><ifName>%s</ifName>' % self.sflow_interface # sample_collector if self.sample_collector: if self.sflow_dict["sampling"].get("collectorID") \ and self.sflow_dict["sampling"].get("collectorID") != "invalid": existing = self.sflow_dict["sampling"].get("collectorID").split(',') else: existing = list() if self.state == "present": diff = list(set(self.sample_collector) - set(existing)) if diff: self.updates_cmd.append( "sflow sampling collector %s" % ' '.join(diff)) new_set = list(self.sample_collector + existing) xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(new_set))) else: same = list(set(self.sample_collector) & set(existing)) if same: self.updates_cmd.append( "undo sflow sampling collector %s" % ' '.join(same)) xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(same))) # sample_rate if self.sample_rate: exist = bool(self.sample_rate == self.sflow_dict["sampling"].get("rate")) if self.state == "present" and not exist: self.updates_cmd.append( "sflow sampling rate %s" % self.sample_rate) xml_str += '<rate>%s</rate>' % self.sample_rate elif self.state == "absent" and exist: self.updates_cmd.append( "undo sflow sampling rate %s" % self.sample_rate) xml_str += '<rate>%s</rate>' % self.sample_rate # sample_length if self.sample_length: exist = bool(self.sample_length == self.sflow_dict["sampling"].get("length")) if self.state == "present" and not exist: self.updates_cmd.append( "sflow sampling length %s" % self.sample_length) xml_str += '<length>%s</length>' % self.sample_length elif self.state == "absent" and exist: self.updates_cmd.append( "undo sflow sampling length %s" % self.sample_length) xml_str += '<length>%s</length>' % self.sample_length # sample_direction if self.sample_direction: direction = list() if self.sample_direction == "both": direction = ["inbound", "outbound"] else: direction.append(self.sample_direction) existing = list() if self.sflow_dict["sampling"].get("direction"): if self.sflow_dict["sampling"].get("direction") == "both": existing = ["inbound", "outbound"] else: existing.append( self.sflow_dict["sampling"].get("direction")) if self.state == "present": diff = list(set(direction) - set(existing)) if diff: new_set = list(set(direction + existing)) self.updates_cmd.append( "sflow sampling %s" % ' '.join(diff)) if len(new_set) > 1: new_dir = "both" else: new_dir = new_set[0] xml_str += '<direction>%s</direction>' % new_dir else: same = list(set(existing) & set(direction)) if same: self.updates_cmd.append("undo sflow sampling %s" % ' '.join(same)) if len(same) > 1: del_dir = "both" else: del_dir = same[0] xml_str += '<direction>%s</direction>' % del_dir if xml_str.endswith("</ifName>"): self.updates_cmd.pop() return "" xml_str += '</sampling></samplings>' return xml_str def config_counter(self): """configures sflow counter on an interface""" xml_str = '' if not self.sflow_interface: return xml_str if not self.sflow_dict["counter"] and self.state == "absent": return xml_str self.updates_cmd.append("interface %s" % self.sflow_interface) if self.state == "present": xml_str += '<counters><counter operation="merge"><ifName>%s</ifName>' % self.sflow_interface else: xml_str += '<counters><counter operation="delete"><ifName>%s</ifName>' % self.sflow_interface # counter_collector if self.counter_collector: if self.sflow_dict["counter"].get("collectorID") \ and self.sflow_dict["counter"].get("collectorID") != "invalid": existing = self.sflow_dict["counter"].get("collectorID").split(',') else: existing = list() if self.state == "present": diff = list(set(self.counter_collector) - set(existing)) if diff: self.updates_cmd.append("sflow counter collector %s" % ' '.join(diff)) new_set = list(self.counter_collector + existing) xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(new_set))) else: same = list(set(self.counter_collector) & set(existing)) if same: self.updates_cmd.append( "undo sflow counter collector %s" % ' '.join(same)) xml_str += '<collectorID>%s</collectorID>' % ','.join(list(set(same))) # counter_interval if self.counter_interval: exist = bool(self.counter_interval == self.sflow_dict["counter"].get("interval")) if self.state == "present" and not exist: self.updates_cmd.append( "sflow counter interval %s" % self.counter_interval) xml_str += '<interval>%s</interval>' % self.counter_interval elif self.state == "absent" and exist: self.updates_cmd.append( "undo sflow counter interval %s" % self.counter_interval) xml_str += '<interval>%s</interval>' % self.counter_interval if xml_str.endswith("</ifName>"): self.updates_cmd.pop() return "" xml_str += '</counter></counters>' return xml_str def config_export(self): """configure sflow export""" xml_str = '' if not self.export_route: return xml_str if self.export_route == "enable": if self.sflow_dict["export"] and self.sflow_dict["export"].get("ExportRoute") == "disable": xml_str = '<exports><export operation="delete"><ExportRoute>disable</ExportRoute></export></exports>' self.updates_cmd.append("undo sflow export extended-route-data disable") else: # disable if not self.sflow_dict["export"] or self.sflow_dict["export"].get("ExportRoute") != "disable": xml_str = '<exports><export operation="create"><ExportRoute>disable</ExportRoute></export></exports>' self.updates_cmd.append("sflow export extended-route-data disable") return xml_str def config_assign(self): """configure assign""" # assign sflow management-plane export rate-limit rate-limit [ slot slot-id ] if self.rate_limit: cmd = "assign sflow management-plane export rate-limit %s" % self.rate_limit if self.rate_limit_slot: cmd += " slot %s" % self.rate_limit_slot exist = is_config_exist(self.config, cmd) if self.state == "present" and not exist: self.cli_add_command(cmd) elif self.state == "absent" and exist: self.cli_add_command(cmd, undo=True) # assign forward enp sflow enable slot { slot-id | all } if self.forward_enp_slot: cmd = "assign forward enp sflow enable slot %s" % self.forward_enp_slot exist = is_config_exist(self.config, cmd) if self.state == "present" and not exist: self.cli_add_command(cmd) elif self.state == "absent" and exist: self.cli_add_command(cmd, undo=True) def netconf_load_config(self, xml_str): """load sflow config by netconf""" if not xml_str: return xml_cfg = """ <config> <sflow xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0"> %s </sflow> </config>""" % xml_str self.netconf_set_config(xml_cfg, "SET_SFLOW") self.changed = True def check_params(self): """Check all input params""" # check agent_ip if self.agent_ip: self.agent_ip = self.agent_ip.upper() if not check_ip_addr(self.agent_ip): self.module.fail_json(msg="Error: agent_ip is invalid.") # check source_ip if self.source_ip: self.source_ip = self.source_ip.upper() if not check_ip_addr(self.source_ip): self.module.fail_json(msg="Error: source_ip is invalid.") # check collector if self.collector_id: # check collector_ip and collector_ip_vpn if self.collector_ip: self.collector_ip = self.collector_ip.upper() if not check_ip_addr(self.collector_ip): self.module.fail_json( msg="Error: collector_ip is invalid.") if self.collector_ip_vpn and not is_valid_ip_vpn(self.collector_ip_vpn): self.module.fail_json( msg="Error: collector_ip_vpn is invalid.") # check collector_datagram_size ranges from 1024 to 8100 if self.collector_datagram_size: if not self.collector_datagram_size.isdigit(): self.module.fail_json( msg="Error: collector_datagram_size is not digit.") if int(self.collector_datagram_size) < 1024 or int(self.collector_datagram_size) > 8100: self.module.fail_json( msg="Error: collector_datagram_size is not ranges from 1024 to 8100.") # check collector_udp_port ranges from 1 to 65535 if self.collector_udp_port: if not self.collector_udp_port.isdigit(): self.module.fail_json( msg="Error: collector_udp_port is not digit.") if int(self.collector_udp_port) < 1 or int(self.collector_udp_port) > 65535: self.module.fail_json( msg="Error: collector_udp_port is not ranges from 1 to 65535.") # check collector_description 1 to 255 case-sensitive characters if self.collector_description: if self.collector_description.count(" "): self.module.fail_json( msg="Error: collector_description should without spaces.") if len(self.collector_description) < 1 or len(self.collector_description) > 255: self.module.fail_json( msg="Error: collector_description is not ranges from 1 to 255.") # check sflow_interface if self.sflow_interface: intf_type = get_interface_type(self.sflow_interface) if not intf_type: self.module.fail_json(msg="Error: intf_type is invalid.") if intf_type not in ['ge', '10ge', '25ge', '4x10ge', '40ge', '100ge', 'eth-trunk']: self.module.fail_json( msg="Error: interface %s is not support sFlow." % self.sflow_interface) # check sample_collector if self.sample_collector: self.sample_collector.sort() if self.sample_collector not in [["1"], ["2"], ["1", "2"]]: self.module.fail_json( msg="Error: sample_collector is invalid.") # check sample_rate ranges from 1 to 4294967295 if self.sample_rate: if not self.sample_rate.isdigit(): self.module.fail_json( msg="Error: sample_rate is not digit.") if int(self.sample_rate) < 1 or int(self.sample_rate) > 4294967295: self.module.fail_json( msg="Error: sample_rate is not ranges from 1 to 4294967295.") # check sample_length ranges from 18 to 512 if self.sample_length: if not self.sample_length.isdigit(): self.module.fail_json( msg="Error: sample_rate is not digit.") if int(self.sample_length) < 18 or int(self.sample_length) > 512: self.module.fail_json( msg="Error: sample_length is not ranges from 18 to 512.") # check counter_collector if self.counter_collector: self.counter_collector.sort() if self.counter_collector not in [["1"], ["2"], ["1", "2"]]: self.module.fail_json( msg="Error: counter_collector is invalid.") # counter_interval ranges from 10 to 4294967295 if self.counter_interval: if not self.counter_interval.isdigit(): self.module.fail_json( msg="Error: counter_interval is not digit.") if int(self.counter_interval) < 10 or int(self.counter_interval) > 4294967295: self.module.fail_json( msg="Error: sample_length is not ranges from 10 to 4294967295.") # check rate_limit ranges from 100 to 1500 and check rate_limit_slot if self.rate_limit: if not self.rate_limit.isdigit(): self.module.fail_json(msg="Error: rate_limit is not digit.") if int(self.rate_limit) < 100 or int(self.rate_limit) > 1500: self.module.fail_json( msg="Error: rate_limit is not ranges from 100 to 1500.") if self.rate_limit_slot and not self.rate_limit_slot.isdigit(): self.module.fail_json( msg="Error: rate_limit_slot is not digit.") # check forward_enp_slot if self.forward_enp_slot: self.forward_enp_slot.lower() if not self.forward_enp_slot.isdigit() and self.forward_enp_slot != "all": self.module.fail_json( msg="Error: forward_enp_slot is invalid.") def get_proposed(self): """get proposed info""" # base config if self.agent_ip: self.proposed["agent_ip"] = self.agent_ip if self.source_ip: self.proposed["source_ip"] = self.source_ip if self.export_route: self.proposed["export_route"] = self.export_route if self.rate_limit: self.proposed["rate_limit"] = self.rate_limit self.proposed["rate_limit_slot"] = self.rate_limit_slot if self.forward_enp_slot: self.proposed["forward_enp_slot"] = self.forward_enp_slot if self.collector_id: self.proposed["collector_id"] = self.collector_id if self.collector_ip: self.proposed["collector_ip"] = self.collector_ip self.proposed["collector_ip_vpn"] = self.collector_ip_vpn if self.collector_datagram_size: self.proposed[ "collector_datagram_size"] = self.collector_datagram_size if self.collector_udp_port: self.proposed["collector_udp_port"] = self.collector_udp_port if self.collector_meth: self.proposed["collector_meth"] = self.collector_meth if self.collector_description: self.proposed[ "collector_description"] = self.collector_description # sample and counter config if self.sflow_interface: self.proposed["sflow_interface"] = self.sflow_interface if self.sample_collector: self.proposed["sample_collector"] = self.sample_collector if self.sample_rate: self.proposed["sample_rate"] = self.sample_rate if self.sample_length: self.proposed["sample_length"] = self.sample_length if self.sample_direction: self.proposed["sample_direction"] = self.sample_direction if self.counter_collector: self.proposed["counter_collector"] = self.counter_collector if self.counter_interval: self.proposed["counter_interval"] = self.counter_interval self.proposed["state"] = self.state def get_existing(self): """get existing info""" if self.config: if self.rate_limit: self.existing["rate_limit"] = get_rate_limit(self.config) if self.forward_enp_slot: self.existing["forward_enp_slot"] = get_forward_enp( self.config) if not self.sflow_dict: return if self.agent_ip: self.existing["agent"] = self.sflow_dict["agent"] if self.source_ip: self.existing["source"] = self.sflow_dict["source"] if self.collector_id: self.existing["collector"] = self.sflow_dict["collector"] if self.export_route: self.existing["export"] = self.sflow_dict["export"] if self.sflow_interface: self.existing["sampling"] = self.sflow_dict["sampling"] self.existing["counter"] = self.sflow_dict["counter"] def get_end_state(self): """get end state info""" config = self.get_current_config() if config: if self.rate_limit: self.end_state["rate_limit"] = get_rate_limit(config) if self.forward_enp_slot: self.end_state["forward_enp_slot"] = get_forward_enp(config) sflow_dict = self.get_sflow_dict() if not sflow_dict: return if self.agent_ip: self.end_state["agent"] = sflow_dict["agent"] if self.source_ip: self.end_state["source"] = sflow_dict["source"] if self.collector_id: self.end_state["collector"] = sflow_dict["collector"] if self.export_route: self.end_state["export"] = sflow_dict["export"] if self.sflow_interface: self.end_state["sampling"] = sflow_dict["sampling"] self.end_state["counter"] = sflow_dict["counter"] def work(self): """worker""" self.check_params() self.sflow_dict = self.get_sflow_dict() self.config = self.get_current_config() self.get_existing() self.get_proposed() # deal present or absent xml_str = '' if self.export_route: xml_str += self.config_export() if self.agent_ip: xml_str += self.config_agent() if self.source_ip: xml_str += self.config_source() if self.state == "present": if self.collector_id and self.collector_ip: xml_str += self.config_collector() if self.sflow_interface: xml_str += self.config_sampling() xml_str += self.config_counter() else: if self.sflow_interface: xml_str += self.config_sampling() xml_str += self.config_counter() if self.collector_id: xml_str += self.config_collector() if self.rate_limit or self.forward_enp_slot: self.config_assign() if self.commands: self.cli_load_config(self.commands) self.changed = True if xml_str: self.netconf_load_config(xml_str) self.changed = True self.get_end_state() self.results['changed'] = self.changed self.results['proposed'] = self.proposed self.results['existing'] = self.existing self.results['end_state'] = self.end_state if self.changed: self.results['updates'] = self.updates_cmd else: self.results['updates'] = list() self.module.exit_json(**self.results) def main(): """Module main""" argument_spec = dict( agent_ip=dict(required=False, type='str'), source_ip=dict(required=False, type='str'), export_route=dict(required=False, type='str', choices=['enable', 'disable']), rate_limit=dict(required=False, type='str'), rate_limit_slot=dict(required=False, type='str'), forward_enp_slot=dict(required=False, type='str'), collector_id=dict(required=False, type='str', choices=['1', '2']), collector_ip=dict(required=False, type='str'), collector_ip_vpn=dict(required=False, type='str'), collector_datagram_size=dict(required=False, type='str'), collector_udp_port=dict(required=False, type='str'), collector_meth=dict(required=False, type='str', choices=['meth', 'enhanced']), collector_description=dict(required=False, type='str'), sflow_interface=dict(required=False, type='str'), sample_collector=dict(required=False, type='list'), sample_rate=dict(required=False, type='str'), sample_length=dict(required=False, type='str'), sample_direction=dict(required=False, type='str', choices=['inbound', 'outbound', 'both']), counter_collector=dict(required=False, type='list'), counter_interval=dict(required=False, type='str'), state=dict(required=False, default='present', choices=['present', 'absent']) ) argument_spec.update(ce_argument_spec) module = Sflow(argument_spec) module.work() if __name__ == '__main__': main()
gpl-3.0
raviperi/storm
dev-tools/report/formatter.py
28
3248
#!/usr/bin/python # -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def encode(obj, encoding='UTF-8'): """ Check if the object supports encode() method, and if so, encodes it. Encoding defaults to UTF-8. For example objects of type 'int' do not support encode """ return obj.encode(encoding) if 'encode' in dir(obj) else obj class Formatter: def __init__(self, fields_tuple=(), row_tuple=(), min_width_tuple=None): # Format to pass as first argument to the print function, e.g. '%s%s%s' self.format = "" # data_format will be of the form ['{!s:43}'],'{!s:39}','{!s:11}','{!s:25}'] # the widths are determined from the data in order to print output with nice format # Each entry of the data_format list will be used by the advanced string formatter: # "{!s:43}".format("Text") # Advanced string formatter as detailed in here: https://www.python.org/dev/peps/pep-3101/ self.data_format = [] Formatter._assert(fields_tuple, row_tuple, min_width_tuple) self._build_format_tuples(fields_tuple, row_tuple, min_width_tuple) @staticmethod def _assert(o1, o2, o3): if len(o1) != len(o2) and (o3 is not None and len(o2) != len(o3)): raise RuntimeError("Object collections must have the same length. " "len(o1)={0}, len(o2)={1}, len(o3)={2}" .format(len(o1), len(o2), -1 if o3 is None else len(o3))) # determines the widths from the data in order to print output with nice format @staticmethod def _find_sizes(fields_tuple, row_tuple, min_width_tuple): sizes = [] padding = 3 for i in range(0, len(row_tuple)): max_len = max(len(encode(fields_tuple[i])), len(str(encode(row_tuple[i])))) if min_width_tuple is not None: max_len = max(max_len, min_width_tuple[i]) sizes += [max_len + padding] return sizes def _build_format_tuples(self, fields_tuple, row_tuple, min_width_tuple): sizes = Formatter._find_sizes(fields_tuple, row_tuple, min_width_tuple) for i in range(0, len(row_tuple)): self.format += "%s" self.data_format += ["{!s:" + str(sizes[i]) + "}"] # Returns a tuple where each entry has a string that is the result of # statements with the pattern "{!s:43}".format("Text") def row_str_format(self, row_tuple): format_with_values = [str(self.data_format[0].format(encode(row_tuple[0])))] for i in range(1, len(row_tuple)): format_with_values += [str(self.data_format[i].format(encode(row_tuple[i])))] return tuple(format_with_values)
apache-2.0
telefar/stockEye
coursera-compinvest1-master/coursera-compinvest1-master/homework/Homework4.py
1
3966
''' (c) 2011, 2012 Georgia Tech Research Corporation This source code is released under the New BSD license. Please see http://wiki.quantsoftware.org/index.php?title=QSTK_License for license details. ''' import pandas as pd import numpy as np import math import copy import QSTK.qstkutil.qsdateutil as du import datetime as dt import QSTK.qstkutil.DataAccess as da import QSTK.qstkutil.tsutil as tsu import csv import QSTK.qstkstudy.EventProfiler as ep """ Accepts a list of symbols along with start and end date Returns the Event Matrix which is a pandas Datamatrix Event matrix has the following structure : |IBM |GOOG|XOM |MSFT| GS | JP | (d1)|nan |nan | 1 |nan |nan | 1 | (d2)|nan | 1 |nan |nan |nan |nan | (d3)| 1 |nan | 1 |nan | 1 |nan | (d4)|nan | 1 |nan | 1 |nan |nan | ................................... ................................... Also, d1 = start date nan = no information about any event. 1 = status bit(positively confirms the event occurence) """ def find_events(ls_symbols, d_data): ''' Finding the event dataframe ''' df_close = d_data['actual_close'] ts_market = df_close['SPY'] print "Finding Events" # Creating an empty dataframe df_events = copy.deepcopy(df_close) df_events = df_events * np.NAN # Time stamps for the event range ldt_timestamps = df_close.index writer = csv.writer(open('orders.csv', 'wb'), delimiter=',') for s_sym in ls_symbols: for i in range(1, len(ldt_timestamps)): # Calculating the returns for this timestamp f_symprice_today = df_close[s_sym].ix[ldt_timestamps[i]] f_symprice_yest = df_close[s_sym].ix[ldt_timestamps[i - 1]] f_marketprice_today = ts_market.ix[ldt_timestamps[i]] f_marketprice_yest = ts_market.ix[ldt_timestamps[i - 1]] f_symreturn_today = (f_symprice_today / f_symprice_yest) - 1 f_marketreturn_today = (f_marketprice_today / f_marketprice_yest) - 1 i_shares = 100 # Event is found if the symbol is down more then 3% while the # market is up more then 2% # if f_symreturn_today <= -0.03 and f_marketreturn_today >= 0.02: # df_events[s_sym].ix[ldt_timestamps[i]] = 1 f_cutoff = 10.0 if f_symprice_today < f_cutoff and f_symprice_yest >= f_cutoff: df_events[s_sym].ix[ldt_timestamps[i]] = 1 row_to_enter = [str(ldt_timestamps[i].year), str(ldt_timestamps[i].month), \ str(ldt_timestamps[i].day), s_sym, 'Buy', i_shares] writer.writerow(row_to_enter) try: time_n = ldt_timestamps[i + 5] except: time_n = ldt_timestamps[-1] row_to_enter = [str(time_n.year), str(time_n.month), \ str(time_n.day), s_sym, 'Sell', i_shares] writer.writerow(row_to_enter) return df_events if __name__ == '__main__': dt_start = dt.datetime(2008, 1, 1) dt_end = dt.datetime(2009, 12, 31) ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16)) dataobj = da.DataAccess('Yahoo') ls_symbols = dataobj.get_symbols_from_list('sp5002012') ls_symbols.append('SPY') ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close'] ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys) d_data = dict(zip(ls_keys, ldf_data)) for s_key in ls_keys: d_data[s_key] = d_data[s_key].fillna(method = 'ffill') d_data[s_key] = d_data[s_key].fillna(method = 'bfill') d_data[s_key] = d_data[s_key].fillna(1.0) df_events = find_events(ls_symbols, d_data) # print "Creating Study" # ep.eventprofiler(df_events, d_data, i_lookback=20, i_lookforward=20, # s_filename='MyEventStudy.pdf', b_market_neutral=True, b_errorbars=True, # s_market_sym='SPY')
bsd-3-clause
gpfreitas/bokeh
bokeh/_legacy_charts/builder/scatter_builder.py
43
7792
"""This is the Bokeh charts interface. It gives you a high level API to build complex plot is a simple way. This is the Scatter class which lets you build your Scatter charts just passing the arguments to the Chart class and calling the proper functions. """ #----------------------------------------------------------------------------- # Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import absolute_import import numpy as np try: import pandas as pd except: pd = None from collections import OrderedDict from ..utils import chunk, cycle_colors, make_scatter from .._builder import create_and_build, Builder from .._data_adapter import DataAdapter from ...models import ColumnDataSource, Range1d from ...properties import String #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- def Scatter(values, **kws): """ Create a scatter chart using :class:`ScatterBuilder <bokeh.charts.builder.scatter_builder.ScatterBuilder>` to render the geometry from values. Args: values (iterable): iterable 2d representing the data series values matrix. In addition the the parameters specific to this chart, :ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters. Returns: a new :class:`Chart <bokeh.charts.Chart>` Examples: .. bokeh-plot:: :source-position: above from collections import OrderedDict from bokeh.charts import Scatter, output_file, show # (dict, OrderedDict, lists, arrays and DataFrames of (x, y) tuples are valid inputs) xyvalues = OrderedDict() xyvalues['python'] = [(1, 2), (3, 3), (4, 7), (5, 5), (8, 26)] xyvalues['pypy'] = [(1, 12), (2, 23), (4, 47), (5, 15), (8, 46)] xyvalues['jython'] = [(1, 22), (2, 43), (4, 10), (6, 25), (8, 26)] scatter = Scatter(xyvalues, title="Scatter", legend="top_left", ylabel='Languages') output_file('scatter.html') show(scatter) """ return create_and_build(ScatterBuilder, values, **kws) class ScatterBuilder(Builder): """This is the Scatter class and it is in charge of plotting Scatter charts in an easy and intuitive way. Essentially, we provide a way to ingest the data, make the proper calculations and push the references into a source object. We additionally make calculations for the ranges. And finally add the needed glyphs (markers) taking the references from the source. """ # TODO: (bev) should be an enumeration marker = String("circle", help=""" The marker type to use (default: ``circle``). """) def _process_data(self): """Take the scatter.values data to calculate the chart properties accordingly. Then build a dict containing references to all the calculated points to be used by the marker glyph inside the ``_yield_renderers`` method. """ self._data = dict() # list to save all the attributes we are going to create self._attr = [] # list to save all the groups available in the incoming input self._groups.extend(self._values.keys()) # Grouping self.parse_data() @property def parse_data(self): """Parse data received from self._values and create correct x, y series values checking if input is a pandas DataFrameGroupBy object or one of the stardard supported types (that can be converted to a DataAdapter) """ if pd is not None and \ isinstance(self._values, pd.core.groupby.DataFrameGroupBy): return self._parse_groupped_data else: return self._parse_data def _parse_groupped_data(self): """Parse data in self._values in case it's a pandas DataFrameGroupBy and create the data 'x_...' and 'y_...' values for all data series """ for i, val in enumerate(self._values.keys()): xy = self._values[val] self._set_and_get("x_", val, xy[:, 0]) self._set_and_get("y_", val, xy[:, 1]) def _parse_data(self): """Parse data in self._values in case it's an iterable (not a pandas DataFrameGroupBy) and create the data 'x_...' and 'y_...' values for all data series """ for i, val in enumerate(self._values.keys()): x_, y_ = [], [] xy = self._values[val] for value in self._values.index: x_.append(xy[value][0]) y_.append(xy[value][1]) self.set_and_get("x_", val, x_) self.set_and_get("y_", val, y_) def _set_sources(self): """Push the Scatter data into the ColumnDataSource and calculate the proper ranges.""" self._source = ColumnDataSource(self._data) x_names, y_names = self._attr[::2], self._attr[1::2] endx = max(max(self._data[i]) for i in x_names) startx = min(min(self._data[i]) for i in x_names) self.x_range = Range1d( start=startx - 0.1 * (endx - startx), end=endx + 0.1 * (endx - startx) ) endy = max(max(self._data[i]) for i in y_names) starty = min(min(self._data[i]) for i in y_names) self.y_range = Range1d( start=starty - 0.1 * (endy - starty), end=endy + 0.1 * (endy - starty) ) def _yield_renderers(self): """Use the marker glyphs to display the points. Takes reference points from data loaded at the ColumnDataSource. """ duplets = list(chunk(self._attr, 2)) colors = cycle_colors(duplets, self.palette) for i, duplet in enumerate(duplets, start=1): renderer = make_scatter( self._source, duplet[0], duplet[1], self.marker, colors[i - 1] ) self._legends.append((self._groups[i-1], [renderer])) yield renderer def _adapt_values(self): """Prepare context before main show method is invoked. Customize show preliminary actions by handling DataFrameGroupBy values in order to create the series values and labels.""" # check if pandas is installed if pd: # if it is we try to take advantage of it's data structures # asumming we get an groupby object if isinstance(self._values, pd.core.groupby.DataFrameGroupBy): pdict = OrderedDict() for i in self._values.groups.keys(): self._labels = self._values.get_group(i).columns xname = self._values.get_group(i).columns[0] yname = self._values.get_group(i).columns[1] x = getattr(self._values.get_group(i), xname) y = getattr(self._values.get_group(i), yname) pdict[i] = np.array([x.values, y.values]).T self._values = DataAdapter(pdict) self._labels = self._values.keys() else: self._values = DataAdapter(self._values) self._labels = self._values.keys() else: self._values = DataAdapter(self._values) self._labels = self._values.keys()
bsd-3-clause
dafei2015/hugular_cstolua
Client/tools/site-packages/stringconvServer.py
8
17336
# # Utility to convert strings from game2D's XML format to Excel and back. # # C.D. July 2006 # import xml.dom.minidom from pyExcelerator import * # stuff to handle text encodings import codecs enc, dec, read_wrap, write_wrap = codecs.lookup('utf-8') import struct import os RUSSIAN_MAPPING_UTF8_TO_ASCII = [ (1040,'A'), (1042,'B'), (1045,'E'), (1047,'3'), (1050,'K'), (1052,'M'), (1053,'H'), (1054,'O'), (1056,'P'), (1057,'C'), (1058,'T'), (1059,'Y'), (1061,'X') ] EXPORTED_SHEET_NAMES = [] def parseXMLStrings( inputfile, group_prefix): strings = [] langs = set() doc = xml.dom.minidom.parse( inputfile ).documentElement doc.normalize() for node in doc.getElementsByTagName('table') : section = {} sectionName = node.getAttribute('name') sectionName = sectionName.lstrip(group_prefix) section['name'] = sectionName for child in node.childNodes : if child.nodeType == child.ELEMENT_NODE : name = child.tagName langs.add(name) data = [] for item in child.childNodes : if item.nodeType == child.ELEMENT_NODE : data.append( item.getAttribute('value') ) section[name] = data strings.append( section ) langs.remove('ids') #put english first... langs.remove('english') langlist = ['english'] + list(langs) return (langlist,strings) # # # def parseXLSStrings( inputfile, group_prefix, sheet): # try: langs = [] sections = {} name,vals = parse_xls(inputfile, group_prefix)[sheet] #count languages col = 1 while vals.has_key( (0,col) ) : langs.append( vals[(0, col)] ) col += 1 numLangs = col - 1 #count strings row = 1 while vals.has_key( (row,0) ) : row += 1 numStrings = row for line in range(1,numStrings) : sectionName = vals[(line, 0)] sectionName = group_prefix + sectionName if not sections.has_key( sectionName ) : sections[sectionName] = { 'name' : sectionName, 'ids' : [] } for lang in langs : sections[sectionName][lang] = [] section = sections[sectionName] strId = vals[(line, 0)] section['ids'].append(strId) for i in range(numLangs) : strn = ' ' if vals.has_key( (line, i + 1) ) : strn = vals[(line, i + 1)] if isinstance(strn, float):##HACK: For value "5" excel return float.. so we don't want "5.0", is why we convert it in INT then STR strn = int(strn) strn = unicode(strn) # replace non-ascii characters strn = strn.replace( u'\u2026', u'...') section[langs[i]].append( strn ) # except Exception, exc: # raise Exception('Error Converting Strings: [' + name + '][' + sectionName + '][' + strId + '][' + langs[i] + ']') return ( list(langs), sections.values() ) # # # def stringsXML2XLS( inputfile, outputfile, group_prefix) : (langs,strings) = parseXMLStrings(inputfile, group_prefix) wb = Workbook() ws = wb.add_sheet('loc-base') columns = ['section','id'] + langs for i in range(len(columns)) : ws.col(i).width = 200 * 32 #ws.col(i).name = columns[i] # TODO: figure out how to write the column titles ws.write(0, i, columns[i]) line = 1 for section in strings : sectionName = section['name'] ids = section['ids'] for i in range(len(ids)) : idname = ids[i] ws.write(line, 0, sectionName) ws.write(line, 1, idname) for j in range(len(langs)) : transl = section[langs[j]] if i < len(transl) : ws.write(line, 2 + j, transl[i] ) line += 1 wb.save(outputfile) # # # def stringsXLS2XML(inputfile, outputfile, group_prefix, caseoption) : (langs,strings) = parseXLSStrings(inputfile, group_prefix, 0) dom = xml.dom.minidom.parseString( "<strings/>" ) doc = dom.documentElement groups = ['ids'] + langs for section in strings : sectionName = section['name'] sectionNode = dom.createElement('table') sectionNode.setAttribute('name',sectionName) doc.appendChild(sectionNode) # dump groups for group in groups : groupSection = dom.createElement(group) sectionNode.appendChild(groupSection) for item in section[group] : if group == 'ids' : elemName = 'id' else: elemName = 'string' if caseoption == 'up': item = item.upper() if caseoption == 'down': item = item.lower() itemnode = dom.createElement( elemName ) itemnode.setAttribute('value',item) groupSection.appendChild(itemnode) f = open(outputfile,'wb') f = write_wrap(f) f.write( dom.toprettyxml() ) def stringsXLSSheet2XML(inputfile, group_prefix, spec_name, ignoreList) : sheetcounter = 0 xlWorkBook = parse_xls(inputfile, group_prefix) if spec_name != "": #find the specified spec sheet specSheetIndex = 0 index = 0 for sheet_name, values in xlWorkBook: if sheet_name == spec_name: specSheetIndex = index index += 1 #organize redefined string in spec sheet (langs,strings) = parseXLSStrings(inputfile, group_prefix, specSheetIndex) redefinedString = {} for section in strings: redefinedString[section['name']] = section for sheet_name, values in xlWorkBook: (langs,strings) = parseXLSStrings(inputfile, group_prefix, sheetcounter) sheetcounter += 1 dom = xml.dom.minidom.parseString( "<strings/>" ) doc = dom.documentElement groups = ['ids'] + langs for section in strings : sectionName = section['name'] if spec_name != "": #overwrite texts redefined in spec sheet if redefinedString.has_key(sectionName): textToRemove = [] for textId in section['ids']: if textId in redefinedString[sectionName]['ids']: stringIndex = section['ids'].index(textId) newTextIndex = redefinedString[sectionName]['ids'].index(textId) isEmptyLine = True #update text for lang in langs: newText = redefinedString[sectionName][lang][newTextIndex] if newText.strip() != "": section[lang][stringIndex] = newText isEmptyLine = False if isEmptyLine: textToRemove.append(stringIndex) #remove text textToRemove.sort() textToRemove.reverse() for stringIndex in textToRemove: for group in groups: section[group].pop(stringIndex) sectionNode = dom.createElement('table') sectionNode.setAttribute('name',sectionName) doc.appendChild(sectionNode) # dump groups for group in groups : groupSection = dom.createElement(group) sectionNode.appendChild(groupSection) for item in section[group] : elemName = 'string' if group == 'ids' : elemName = 'id' itemnode = dom.createElement( elemName ) item = removeIgnored(item, ignoreList) itemnode.setAttribute('value',item) groupSection.appendChild(itemnode) f = open(outfile + sheet_name + '.xml','wb') f = write_wrap(f) f.write( dom.toprettyxml() ) def stringsXLSSheet2BIN(inputfile, group_prefix, spec_name, ignoreList): sheetcounter = 0 xlWorkBook = parse_xls(inputfile, group_prefix) stringsData = {} for sheet_name, values in xlWorkBook: (langs,strings) = parseXLSStrings(inputfile, group_prefix, sheetcounter) sheetcounter += 1 stringsData[sheet_name] = (langs, strings) ExportBinary(stringsData) ExportJavaConstants( "test.h", stringsData, "StrID") def ExportBinary( stringsData ): global EXPORTED_SHEET_NAMES for sheetname, data in stringsData.iteritems(): (langs, strings) = data filename = sheetname.lower() + "." EXPORTED_SHEET_NAMES.append( sheetname.lower() ) for section in strings: for lang in langs: completeFilename = filename + lang.lower() f = open(completeFilename,'wb') numString = len( section[lang] ) #write total number of strings f.write( struct.pack('I', numString) ) numString = len(section[lang]) for i in range(0,numString): item = section[lang][i] item = item.upper() item = item.replace("\\N", "\n") item = item.replace("\\N", '\n') item = item.replace( u'\u2026' ,'...') item = item.encode('ISO-8859-1') section[lang][i] = item #write offset table (stringlen) #for item in section[lang]: # if len(item) > 127: # real_l = (0x8000 | (len(item)& 0xFFFF)) # first_byte = (real_l>>8) & 0xFF # sec_byte = (real_l) & 0xFF # f.write( struct.pack('B', first_byte)) # f.write( struct.pack('B', sec_byte)) # else: # f.write( struct.pack('B', (len(item)&0xFF))) #write string data. for item in section[lang]: numByte = len(item) f.write( struct.pack('H', numByte) ) for i in range(0, numByte): f.write( item[i] ) f.close() def ExportStringPackage( languageToUse, outPackageFile, stringsData): global EXPORTED_SHEET_NAMES for sheetname, data in stringsData.iteritems(): EXPORTED_SHEET_NAMES.append( sheetname.lower() ) ExportLanguageArray( languageToUse, outPackageFile) def stringsLoadData( inputfile, group_prefix ): sheetcounter = 0 xlWorkBook = parse_xls(inputfile, group_prefix) stringsData = {} for sheet_name, values in xlWorkBook: (langs,strings) = parseXLSStrings(inputfile, group_prefix, sheetcounter) sheetcounter += 1 stringsData[sheet_name] = (langs, strings) return stringsData def stringsXLSSheet2BIN_UTF8(inputfile, group_prefix, spec_name, ignoreList): sheetcounter = 0 xlWorkBook = parse_xls(inputfile, group_prefix) stringsData = {} for sheet_name, values in xlWorkBook: (langs,strings) = parseXLSStrings(inputfile, group_prefix, sheetcounter) sheetcounter += 1 stringsData[sheet_name] = (langs, strings) ExportUTF8(stringsData) ExportJavaConstants( "test.h", stringsData, "StrID") def getBinaryFilename(outFolder,sheet,lang): #TODO replace 'bad' filecharacters filename = sheet.lower() + "." + lang.lower() return os.path.join( outFolder, filename) def getIdxMapFilename(outFolder, sheet): filename = sheet.lower() + ".idx" return os.path.join( outFolder, filename) def getAllIdxFilenames( stringsData, outFolder=""): targets = [] for sheetname, data in stringsData.iteritems(): targets.append(getIdxMapFilename(outFolder, sheetname)) return targets def getAllBinaryFilenames( stringsData, outFolder=""): targets = [] for sheetname, data in stringsData.iteritems(): (langs, strings) = data for section in strings: for lang in langs: targets.append(getBinaryFilename(outFolder,sheetname,lang)) return targets def OpenFileAsciiOrUnicode( filename, openMode, openType ): if openType == 'UTF-8': s = codecs.open(filename, openMode,'UTF-8') elif openType == 'UTF-16' : s = codecs.open(filename, openMode,'UTF-16') else: s = codecs.open(filename, openMode ) return s def ExportCharactersMap( outfile, stringsData ): f = OpenFileAsciiOrUnicode( outfile, 'w', 'UTF-16') charMap = {} outMap = [] for sheetname, data in stringsData.iteritems(): (langs, strings) = data for section in strings: for lang in langs: numStrings = len(section[lang]) for i in range(0, numStrings): item = section[lang][i] item = item.replace(u'\u2026', u'...') for c in item: charMap[c] = 1 for (k, _) in charMap.iteritems(): outMap.append(k) outMap.sort() #print("----------------------- Char Map") #print(outMap) for c in outMap: f.write(c) f.close() def ExportUTF8( stringsData, outFolder=""): global EXPORTED_SHEET_NAMES for sheetname, data in stringsData.iteritems(): (langs, strings) = data EXPORTED_SHEET_NAMES.append( sheetname.lower() ) numString = 0 for lang in langs: completeFilename = lang+'.yml'#getBinaryFilename(outFolder,sheetname,lang) f = open(completeFilename,'w') f.write( lang + ':\n') for section in strings: f.write(' ') for item in section['ids']: f.write( item ) f.write(': ') numString = len( section[lang] ) for i in range(0,numString): item = section[lang][i] item = item.replace(u'\u2026', u'...') item = item.encode('utf-8') section[lang][i] = item for item in section[lang]: numByte = len(item) f.write('"') for i in range(0, numByte): f.write( item[i] ) f.write('"\n') f.close() def ExportLuaConstants( outfile, stringsData, classname ): f = open(outfile,'w') f.write("-- File generated by " + sys.argv[0] + "\n") f.write("\n") f.write(classname + " = {\n") f.write("\n") sheetID = 0; for sheetname, data in stringsData.iteritems(): f.write("\n") f.write("\t--------------------------------------------------\n") f.write("\t-- sheet " + sheetname + "\n") f.write("\t--------------------------------------------------\n") f.write("\t" + sheetname.upper() + " = {\n") (langs, strings) = data for section in strings: for item in section['ids']: f.write("\t\t" + item.upper() + " = \"" + item.upper() + "\",\n") f.write("\t},\n") f.write("}\n"); f.close() ########################################################## def ExportLanguageArray( languageData, outputPack ): f = open("LanguagesArrays.h",'w') f.write("// File generated by " + sys.argv[0] + "\n") f.write("// File generated by " + sys.argv[0] + "\n") f.write("// File generated by " + sys.argv[0] + "\n") f.write("\n") f.write("final static short[] Languages = "+ "\n") f.write("{\n") i = 0 numLanguage = len(languageData) while i < numLanguage: (langID, name, strID ) = languageData[i] i = i + 1 f.write("\t" + strID) if i < numLanguage: f.write(",") f.write("\n") f.write("};\n") f.write("final static short[] s_LanguagePackNames = "+ "\n") f.write("{\n") i = 0 while i < numLanguage: (langID, name, strID ) = languageData[i] i = i + 1 f.write("\t" + "DATA.PACK_TEXTS_"+langID+"_ID") if i < numLanguage: f.write(",") f.write("\n") f.write("};\n") f.close() f = open(outputPack,'w') i = 0 print "hi " + str(numLanguage) while i < numLanguage: (langID, name, strID ) = languageData[i] i = i + 1 f.write("-----------------------------------------------"+"\n") f.write("package:TEXTS_"+langID+"\n") for sheetname in EXPORTED_SHEET_NAMES: f.write("id:" + sheetname.upper() + "\t\t\t\t\t"+"file:" + sheetname.lower() + "."+ name + "\n") f.write("\n") f.close() def GetWantedLanguage( languageDataFile, outputPack ): global EXPORTED_SHEET_NAMES languageToAdd = [] fileToRead = file(languageDataFile) languageToAddLines = fileToRead.readlines() for line in languageToAddLines: line=line.replace('\n','') if line.find("//") == -1 : tmp = line.split(",") if len(tmp) != 3: print "Wrong number of value :"+line elif os.environ.get('USE_LANGUAGE_'+tmp[0]) == '1': print tmp[0] +"("+tmp[1]+") will be include in the build" languageToAdd.append( (tmp[0],tmp[1],tmp[2]) ) ExportLanguageArray( languageToAdd, outputPack ) def removeIgnored(item, ignoreList): if item == None: return for ignoreString in ignoreList: item = item.replace(ignoreString, '') return item # tests #stringsXML2XLS('strings.xml','strings.xls') #stringsXLS2XML('strings.xls','strings_out.xml') #wen xiao temp modify for this project if __name__ == '__main__': if len(sys.argv) < 2: print "Usage: stringconv.py (-xml2xls|-xls2xml|-xlssheets2xml|-xlssheets2bin|-xlssheets2binutf8) infile outfile configfile outputPack [group_prefix] [--spec xls_sheet_name] [--upcase|--downcase] [--ignore string]" else: infile = sys.argv[1] outfile = sys.argv[2] ExportUTF8(stringsLoadData(infile, ""), outfile) #if __name__ == '__main__': #if len(sys.argv) < 6: # print "Usage: stringconv.py (-xml2xls|-xls2xml|-xlssheets2xml|-xlssheets2bin|-xlssheets2binutf8) infile outfile configfile outputPack [group_prefix] [--spec xls_sheet_name] [--upcase|--downcase] [--ignore string]" #else: # knownoptions = [ '--spec', '--upcase', '--downcase', '--ignore' ] # infile = sys.argv[2] # outfile = sys.argv[3] # configfile = sys.argv[4] # outputpack = sys.argv[5] # # group_prefix = "" # ignoreList = [] # if len(sys.argv) > 6 and not (sys.argv[6] in knownoptions): # group_prefix = sys.argv[6] # # spec_name = "" # if '--spec' in sys.argv: # spec_name = sys.argv[sys.argv.index('--spec') + 1] # # caseoption = None # if '--upcase' in sys.argv: # caseoption = 'up' # if '--downcase' in sys.argv: # caseoption = 'down' # # try: # ignoreIndex = sys.argv.index('--ignore') # while ignoreIndex > -1: # ignoreList.append(sys.argv[ignoreIndex + 1]) # ignoreIndex = sys.argv.index('--ignore', ignoreIndex+2) # except: # pass # # if sys.argv[1] == '-xml2xls' : # stringsXML2XLS(infile, outfile, group_prefix) # elif sys.argv[1] == '-xls2xml' : # stringsXLS2XML(infile, outfile, group_prefix, caseoption) # elif sys.argv[1] == '-xlssheets2xml' : # stringsXLSSheet2XML(infile, group_prefix, spec_name, ignoreList) # elif sys.argv[1] == '-xlssheets2bin': # stringsXLSSheet2BIN(infile, group_prefix, spec_name, ignoreList) # elif sys.argv[1] == '-xlssheets2binutf8': # stringsXLSSheet2BIN_UTF8(infile, group_prefix, spec_name, ignoreList) # else : # print 'Invalid conversion requested :' + sys.argv[1] # # GetWantedLanguage(configfile, outputpack); # # print 'Done'
mit
cloudify-cosmo/cloudify-agent
cloudify_agent/installer/runners/fabric_runner.py
1
13089
######### # Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. import os import sys import logging from fabric import Connection from paramiko import RSAKey, ECDSAKey, Ed25519Key, SSHException from cloudify._compat import reraise, StringIO from cloudify.utils import CommandExecutionResponse from cloudify.utils import setup_logger from cloudify.exceptions import CommandExecutionException from cloudify.exceptions import CommandExecutionError from cloudify_agent.installer import exceptions from cloudify_agent.api import utils as api_utils from cloudify_rest_client.utils import is_kerberos_env DEFAULT_REMOTE_EXECUTION_PORT = 22 PRIVATE_KEY_PREFIX = '-----BEGIN' COMMON_ENV = { 'forward_agent': True, } class FabricRunner(object): def __init__(self, logger=None, host=None, user=None, key=None, port=None, password=None, validate_connection=True, fabric_env=None, tmpdir=None): # logger self.logger = logger or setup_logger('fabric_runner') # silence paramiko logging.getLogger('paramiko.transport').setLevel(logging.WARNING) # connection details self.port = port or DEFAULT_REMOTE_EXECUTION_PORT self.password = password self.user = user self.host = host self.key = key self.tmpdir = tmpdir # fabric environment self.env = self._set_env() self.env.update(fabric_env or {}) self._connection = None self._validate_ssh_config() if validate_connection: self.validate_connection() def _validate_ssh_config(self): if not self.host: raise exceptions.AgentInstallerConfigurationError('Missing host') if not self.user: raise exceptions.AgentInstallerConfigurationError('Missing user') if not is_kerberos_env() and not self.password and not self.key: raise exceptions.AgentInstallerConfigurationError( 'Must specify either key or password') def _load_private_key(self, key_contents): """Load the private key and return a paramiko PKey subclass. :param key_contents: the contents of a keyfile, as a string starting with "---BEGIN" :return: A paramiko PKey subclass - RSA, ECDSA or Ed25519 """ for cls in (RSAKey, ECDSAKey, Ed25519Key): try: return cls.from_private_key(StringIO(key_contents)) except SSHException: continue raise exceptions.AgentInstallerConfigurationError( 'Could not load the private key as an ' 'RSA, ECDSA, or Ed25519 key' ) def _set_env(self): env = { 'host': self.host, 'port': self.port, 'user': self.user, 'connect_kwargs': {} } if self.key: if self.key.startswith(PRIVATE_KEY_PREFIX): env['connect_kwargs']['pkey'] = \ self._load_private_key(self.key) else: env['connect_kwargs']['key_filename'] = self.key if self.password: env['connect_kwargs']['password'] = self.password if is_kerberos_env(): # For GSSAPI, the fabric env just needs to have # gss_auth and gss_kex set to True env['gss_auth'] = True env['gss_kex'] = True env.update(COMMON_ENV) return env def validate_connection(self): self.logger.debug('Validating SSH connection') self.ping() self.logger.debug('SSH connection is ready') def _ensure_connection(self): if self._connection is None: self._connection = Connection(**self.env) try: self._connection.open() except Exception as e: _, _, tb = sys.exc_info() reraise( FabricCommandExecutionError, FabricCommandExecutionError(str(e)), tb ) def run(self, command, execution_env=None, **attributes): """ Execute a command. :param command: The command to execute. :param execution_env: environment variables to be applied before running the command :param quiet: run the command silently :param attributes: custom attributes passed directly to fabric's run command :return: a response object containing information about the execution :rtype: FabricCommandExecutionResponse """ if execution_env is None: execution_env = {} self._ensure_connection() attributes.setdefault('hide', self.logger.isEnabledFor(logging.DEBUG)) attributes.setdefault('warn', True) r = self._connection.run(command, **attributes) if r.return_code != 0: raise FabricCommandExecutionException( command=command, error=r.stderr, output=r.stdout, code=r.return_code ) return FabricCommandExecutionResponse( command=command, std_out=r.stdout, std_err=None, return_code=r.return_code ) def sudo(self, command, **attributes): """ Execute a command under sudo. :param command: The command to execute. :param attributes: custom attributes passed directly to fabric's run command :return: a response object containing information about the execution :rtype: FabricCommandExecutionResponse """ return self.run('sudo {0}'.format(command), **attributes) def run_script(self, script): """ Execute a script. :param script: The path to the script to execute. :return: a response object containing information about the execution :rtype: FabricCommandExecutionResponse :raise: FabricCommandExecutionException """ remote_path = self.put_file(script) try: self.sudo('chmod +x {0}'.format(remote_path)) result = self.sudo(remote_path) finally: # The script is pushed to a remote directory created with mkdtemp. # Hence, to cleanup the whole directory has to be removed. self.delete(os.path.dirname(remote_path)) return result def put_file(self, src, dst=None, sudo=False, **attributes): """ Copies a file from the src path to the dst path. :param src: Path to a local file. :param dst: The remote path the file will copied to. :param sudo: indicates that this operation will require sudo permissions :param attributes: custom attributes passed directly to fabric's run command :return: the destination path """ if dst: self.verify_dir_exists(os.path.dirname(dst)) else: basename = os.path.basename(src) tempdir = self.mkdtemp() dst = os.path.join(tempdir, basename) self._ensure_connection() if dst is None: dst = os.path.basename(src) target_path = dst if sudo: dst = os.path.basename(dst) self._connection.put(src, dst) if sudo: self.sudo('sudo mv {0} {1}'.format(dst, target_path)) return target_path def ping(self, **attributes): """ Tests that the connection is working. :param attributes: custom attributes passed directly to fabric's run command :return: a response object containing information about the execution :rtype: FabricCommandExecutionResponse """ return self.run('echo', **attributes) def mktemp(self, create=True, directory=False, **attributes): """ Creates a temporary path. :param create: actually create the file or just construct the path :param directory: path should be a directory or not. :param attributes: custom attributes passed directly to fabric's run command :return: the temporary path """ flags = [] if not create: flags.append('-u') if directory: flags.append('-d') if self.tmpdir is not None: flags.append('-p "{0}"'.format(self.tmpdir)) return self.run('mktemp {0}' .format(' '.join(flags)), **attributes).std_out.rstrip() def mkdtemp(self, create=True, **attributes): """ Creates a temporary directory path. :param create: actually create the file or just construct the path :param attributes: custom attributes passed directly to fabric's run command :return: the temporary path """ return self.mktemp(create=create, directory=True, **attributes) def home_dir(self, username): """ Retrieve the path of the user's home directory. :param username: the username :return: path to the home directory """ return self.python( imports_line='import pwd', command='pwd.getpwnam(\'{0}\').pw_dir' .format(username)) def verify_dir_exists(self, dirname): self.run('mkdir -p {0}'.format(dirname)) def python(self, imports_line, command, **attributes): """ Run a python command and return the output. To overcome the situation where additional info is printed to stdout when a command execution occurs, a string is appended to the output. This will then search for the string and the following closing brackets to retrieve the original output. :param imports_line: The imports needed for the command. :param command: The python command to run. :param attributes: custom attributes passed directly to fabric's run command :return: the string representation of the return value of the python command """ python_bin = '$(command which python ' \ '|| command which python3 ' \ '|| echo "python")' start = '###CLOUDIFYCOMMANDOPEN' end = 'CLOUDIFYCOMMANDCLOSE###' stdout = self.run('{0} -c "import sys; {1}; ' 'sys.stdout.write(\'{2}{3}{4}\\n\'' '.format({5}))"' .format(python_bin, imports_line, start, '{0}', end, command), **attributes).std_out result = stdout[stdout.find(start) - 1 + len(end): stdout.find(end)] return result def machine_distribution(self, **attributes): """ Retrieves the distribution information of the host. :param attributes: custom attributes passed directly to fabric's run command :return: dictionary of the platform distribution as returned from 'platform.dist()' """ response = self.python( imports_line='import platform, json', command='json.dumps(platform.dist())', **attributes ) return api_utils.json_loads(response) def delete(self, path): self.run('rm -rf {0}'.format(path)) def close(self): if self._connection is not None: self._connection.close() class FabricCommandExecutionError(CommandExecutionError): """ Indicates a failure occurred while trying to execute the command. """ pass class FabricCommandExecutionException(CommandExecutionException): """ Indicates the command was executed but a failure occurred. """ pass class FabricCommandExecutionResponse(CommandExecutionResponse): """ Wrapper for indicating the command was originated with fabric api. """ pass
apache-2.0
derwebcoder/lead
lead/jobs/DockerJob.py
1
2448
import os, sys from lead.jobs.Job import Job from lead.helpers.path_helpers import get_cwd, compute_home_directory, compute_absolute_path from lead.helpers.logging import log, log_error from lead.helpers.DockerHelper import DockerHelper class DockerJob(Job): def __init__(self, name, function, image, **kwargs): super().__init__(name, function, **kwargs) self.workspace_volume = { get_cwd(): { 'bind': '/source', 'mode': 'rw' } } self.daemon_volume = { '/var/run/docker.sock': { 'bind': '/var/run/docker.sock', 'mode': 'ro' } } self.image = image self.volumes = self.__parse_volumes( kwargs.get('volumes', []), kwargs.get('mount_daemon', False)) use_host_user = kwargs.get('use_host_user', False) self.user = None if use_host_user is True: self.user = str(os.getuid())+":"+str(os.getgid()) self.dockerHelper = DockerHelper() def run(self, *args, **kwargs): container = self.dockerHelper.create_container(self.image, self.volumes, self.user) exec_func = self.dockerHelper.create_exec(container) self.function(exec=exec_func, *args, **kwargs) self.dockerHelper.kill_container(container) def __parse_volumes(self, volumes=None, mount_daemon=False): parsed_volumes = {} parsed_volumes.update(self.workspace_volume) if mount_daemon is True: parsed_volumes.update(self.daemon_volume) if volumes is None: return parsed_volumes for item in volumes: options = item.split(":") host_path = "" container_path = "" mode = "rw" if len(options) < 2 or len(options) > 3: log("ERROR | The volume \"" + item + "\" is not a valid volume description. " + "Try <HOST_PATH>:<CONTAINER_PATH>[:ro|rw].") sys.exit(8) if len(options) >= 2: host_path = compute_absolute_path(compute_home_directory(options[0])) container_path = options[1] if len(options) == 3: mode = options[2] parsed_volumes[host_path] = { 'bind': container_path, 'mode': mode } return parsed_volumes
apache-2.0
tempbottle/kbengine
kbe/res/scripts/common/Lib/encodings/iso2022_jp_2004.py
816
1073
# # iso2022_jp_2004.py: Python Unicode Codec for ISO2022_JP_2004 # # Written by Hye-Shik Chang <perky@FreeBSD.org> # import _codecs_iso2022, codecs import _multibytecodec as mbc codec = _codecs_iso2022.getcodec('iso2022_jp_2004') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo( name='iso2022_jp_2004', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
lgpl-3.0
activitynet/ActivityNet
Evaluation/ava/np_box_mask_list_ops.py
1
15471
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Operations for np_box_mask_list.BoxMaskList. Example box operations that are supported: * Areas: compute bounding box areas * IOU: pairwise intersection-over-union scores """ import numpy as np import np_box_list_ops import np_box_mask_list import np_mask_ops def box_list_to_box_mask_list(boxlist): """Converts a BoxList containing 'masks' into a BoxMaskList. Args: boxlist: An np_box_list.BoxList object. Returns: An np_box_mask_list.BoxMaskList object. Raises: ValueError: If boxlist does not contain `masks` as a field. """ if not boxlist.has_field('masks'): raise ValueError('boxlist does not contain mask field.') box_mask_list = np_box_mask_list.BoxMaskList( box_data=boxlist.get(), mask_data=boxlist.get_field('masks')) extra_fields = boxlist.get_extra_fields() for key in extra_fields: if key != 'masks': box_mask_list.data[key] = boxlist.get_field(key) return box_mask_list def area(box_mask_list): """Computes area of masks. Args: box_mask_list: np_box_mask_list.BoxMaskList holding N boxes and masks Returns: a numpy array with shape [N*1] representing mask areas """ return np_mask_ops.area(box_mask_list.get_masks()) def intersection(box_mask_list1, box_mask_list2): """Compute pairwise intersection areas between masks. Args: box_mask_list1: BoxMaskList holding N boxes and masks box_mask_list2: BoxMaskList holding M boxes and masks Returns: a numpy array with shape [N*M] representing pairwise intersection area """ return np_mask_ops.intersection(box_mask_list1.get_masks(), box_mask_list2.get_masks()) def iou(box_mask_list1, box_mask_list2): """Computes pairwise intersection-over-union between box and mask collections. Args: box_mask_list1: BoxMaskList holding N boxes and masks box_mask_list2: BoxMaskList holding M boxes and masks Returns: a numpy array with shape [N, M] representing pairwise iou scores. """ return np_mask_ops.iou(box_mask_list1.get_masks(), box_mask_list2.get_masks()) def ioa(box_mask_list1, box_mask_list2): """Computes pairwise intersection-over-area between box and mask collections. Intersection-over-area (ioa) between two masks mask1 and mask2 is defined as their intersection area over mask2's area. Note that ioa is not symmetric, that is, IOA(mask1, mask2) != IOA(mask2, mask1). Args: box_mask_list1: np_box_mask_list.BoxMaskList holding N boxes and masks box_mask_list2: np_box_mask_list.BoxMaskList holding M boxes and masks Returns: a numpy array with shape [N, M] representing pairwise ioa scores. """ return np_mask_ops.ioa(box_mask_list1.get_masks(), box_mask_list2.get_masks()) def gather(box_mask_list, indices, fields=None): """Gather boxes from np_box_mask_list.BoxMaskList according to indices. By default, gather returns boxes corresponding to the input index list, as well as all additional fields stored in the box_mask_list (indexing into the first dimension). However one can optionally only gather from a subset of fields. Args: box_mask_list: np_box_mask_list.BoxMaskList holding N boxes indices: a 1-d numpy array of type int_ fields: (optional) list of fields to also gather from. If None (default), all fields are gathered from. Pass an empty fields list to only gather the box coordinates. Returns: subbox_mask_list: a np_box_mask_list.BoxMaskList corresponding to the subset of the input box_mask_list specified by indices Raises: ValueError: if specified field is not contained in box_mask_list or if the indices are not of type int_ """ if fields is not None: if 'masks' not in fields: fields.append('masks') return box_list_to_box_mask_list( np_box_list_ops.gather( boxlist=box_mask_list, indices=indices, fields=fields)) def sort_by_field(box_mask_list, field, order=np_box_list_ops.SortOrder.DESCEND): """Sort boxes and associated fields according to a scalar field. A common use case is reordering the boxes according to descending scores. Args: box_mask_list: BoxMaskList holding N boxes. field: A BoxMaskList field for sorting and reordering the BoxMaskList. order: (Optional) 'descend' or 'ascend'. Default is descend. Returns: sorted_box_mask_list: A sorted BoxMaskList with the field in the specified order. """ return box_list_to_box_mask_list( np_box_list_ops.sort_by_field( boxlist=box_mask_list, field=field, order=order)) def non_max_suppression(box_mask_list, max_output_size=10000, iou_threshold=1.0, score_threshold=-10.0): """Non maximum suppression. This op greedily selects a subset of detection bounding boxes, pruning away boxes that have high IOU (intersection over union) overlap (> thresh) with already selected boxes. In each iteration, the detected bounding box with highest score in the available pool is selected. Args: box_mask_list: np_box_mask_list.BoxMaskList holding N boxes. Must contain a 'scores' field representing detection scores. All scores belong to the same class. max_output_size: maximum number of retained boxes iou_threshold: intersection over union threshold. score_threshold: minimum score threshold. Remove the boxes with scores less than this value. Default value is set to -10. A very low threshold to pass pretty much all the boxes, unless the user sets a different score threshold. Returns: an np_box_mask_list.BoxMaskList holding M boxes where M <= max_output_size Raises: ValueError: if 'scores' field does not exist ValueError: if threshold is not in [0, 1] ValueError: if max_output_size < 0 """ if not box_mask_list.has_field('scores'): raise ValueError('Field scores does not exist') if iou_threshold < 0. or iou_threshold > 1.0: raise ValueError('IOU threshold must be in [0, 1]') if max_output_size < 0: raise ValueError('max_output_size must be bigger than 0.') box_mask_list = filter_scores_greater_than(box_mask_list, score_threshold) if box_mask_list.num_boxes() == 0: return box_mask_list box_mask_list = sort_by_field(box_mask_list, 'scores') # Prevent further computation if NMS is disabled. if iou_threshold == 1.0: if box_mask_list.num_boxes() > max_output_size: selected_indices = np.arange(max_output_size) return gather(box_mask_list, selected_indices) else: return box_mask_list masks = box_mask_list.get_masks() num_masks = box_mask_list.num_boxes() # is_index_valid is True only for all remaining valid boxes, is_index_valid = np.full(num_masks, 1, dtype=bool) selected_indices = [] num_output = 0 for i in range(num_masks): if num_output < max_output_size: if is_index_valid[i]: num_output += 1 selected_indices.append(i) is_index_valid[i] = False valid_indices = np.where(is_index_valid)[0] if valid_indices.size == 0: break intersect_over_union = np_mask_ops.iou( np.expand_dims(masks[i], axis=0), masks[valid_indices]) intersect_over_union = np.squeeze(intersect_over_union, axis=0) is_index_valid[valid_indices] = np.logical_and( is_index_valid[valid_indices], intersect_over_union <= iou_threshold) return gather(box_mask_list, np.array(selected_indices)) def multi_class_non_max_suppression(box_mask_list, score_thresh, iou_thresh, max_output_size): """Multi-class version of non maximum suppression. This op greedily selects a subset of detection bounding boxes, pruning away boxes that have high IOU (intersection over union) overlap (> thresh) with already selected boxes. It operates independently for each class for which scores are provided (via the scores field of the input box_list), pruning boxes with score less than a provided threshold prior to applying NMS. Args: box_mask_list: np_box_mask_list.BoxMaskList holding N boxes. Must contain a 'scores' field representing detection scores. This scores field is a tensor that can be 1 dimensional (in the case of a single class) or 2-dimensional, in which case we assume that it takes the shape [num_boxes, num_classes]. We further assume that this rank is known statically and that scores.shape[1] is also known (i.e., the number of classes is fixed and known at graph construction time). score_thresh: scalar threshold for score (low scoring boxes are removed). iou_thresh: scalar threshold for IOU (boxes that that high IOU overlap with previously selected boxes are removed). max_output_size: maximum number of retained boxes per class. Returns: a box_mask_list holding M boxes with a rank-1 scores field representing corresponding scores for each box with scores sorted in decreasing order and a rank-1 classes field representing a class label for each box. Raises: ValueError: if iou_thresh is not in [0, 1] or if input box_mask_list does not have a valid scores field. """ if not 0 <= iou_thresh <= 1.0: raise ValueError('thresh must be between 0 and 1') if not isinstance(box_mask_list, np_box_mask_list.BoxMaskList): raise ValueError('box_mask_list must be a box_mask_list') if not box_mask_list.has_field('scores'): raise ValueError('input box_mask_list must have \'scores\' field') scores = box_mask_list.get_field('scores') if len(scores.shape) == 1: scores = np.reshape(scores, [-1, 1]) elif len(scores.shape) == 2: if scores.shape[1] is None: raise ValueError('scores field must have statically defined second ' 'dimension') else: raise ValueError('scores field must be of rank 1 or 2') num_boxes = box_mask_list.num_boxes() num_scores = scores.shape[0] num_classes = scores.shape[1] if num_boxes != num_scores: raise ValueError('Incorrect scores field length: actual vs expected.') selected_boxes_list = [] for class_idx in range(num_classes): box_mask_list_and_class_scores = np_box_mask_list.BoxMaskList( box_data=box_mask_list.get(), mask_data=box_mask_list.get_masks()) class_scores = np.reshape(scores[0:num_scores, class_idx], [-1]) box_mask_list_and_class_scores.add_field('scores', class_scores) box_mask_list_filt = filter_scores_greater_than( box_mask_list_and_class_scores, score_thresh) nms_result = non_max_suppression( box_mask_list_filt, max_output_size=max_output_size, iou_threshold=iou_thresh, score_threshold=score_thresh) nms_result.add_field( 'classes', np.zeros_like(nms_result.get_field('scores')) + class_idx) selected_boxes_list.append(nms_result) selected_boxes = np_box_list_ops.concatenate(selected_boxes_list) sorted_boxes = np_box_list_ops.sort_by_field(selected_boxes, 'scores') return box_list_to_box_mask_list(boxlist=sorted_boxes) def prune_non_overlapping_masks(box_mask_list1, box_mask_list2, minoverlap=0.0): """Prunes the boxes in list1 that overlap less than thresh with list2. For each mask in box_mask_list1, we want its IOA to be more than minoverlap with at least one of the masks in box_mask_list2. If it does not, we remove it. If the masks are not full size image, we do the pruning based on boxes. Args: box_mask_list1: np_box_mask_list.BoxMaskList holding N boxes and masks. box_mask_list2: np_box_mask_list.BoxMaskList holding M boxes and masks. minoverlap: Minimum required overlap between boxes, to count them as overlapping. Returns: A pruned box_mask_list with size [N', 4]. """ intersection_over_area = ioa(box_mask_list2, box_mask_list1) # [M, N] tensor intersection_over_area = np.amax(intersection_over_area, axis=0) # [N] tensor keep_bool = np.greater_equal(intersection_over_area, np.array(minoverlap)) keep_inds = np.nonzero(keep_bool)[0] new_box_mask_list1 = gather(box_mask_list1, keep_inds) return new_box_mask_list1 def concatenate(box_mask_lists, fields=None): """Concatenate list of box_mask_lists. This op concatenates a list of input box_mask_lists into a larger box_mask_list. It also handles concatenation of box_mask_list fields as long as the field tensor shapes are equal except for the first dimension. Args: box_mask_lists: list of np_box_mask_list.BoxMaskList objects fields: optional list of fields to also concatenate. By default, all fields from the first BoxMaskList in the list are included in the concatenation. Returns: a box_mask_list with number of boxes equal to sum([box_mask_list.num_boxes() for box_mask_list in box_mask_list]) Raises: ValueError: if box_mask_lists is invalid (i.e., is not a list, is empty, or contains non box_mask_list objects), or if requested fields are not contained in all box_mask_lists """ if fields is not None: if 'masks' not in fields: fields.append('masks') return box_list_to_box_mask_list( np_box_list_ops.concatenate(boxlists=box_mask_lists, fields=fields)) def filter_scores_greater_than(box_mask_list, thresh): """Filter to keep only boxes and masks with score exceeding a given threshold. This op keeps the collection of boxes and masks whose corresponding scores are greater than the input threshold. Args: box_mask_list: BoxMaskList holding N boxes and masks. Must contain a 'scores' field representing detection scores. thresh: scalar threshold Returns: a BoxMaskList holding M boxes and masks where M <= N Raises: ValueError: if box_mask_list not a np_box_mask_list.BoxMaskList object or if it does not have a scores field """ if not isinstance(box_mask_list, np_box_mask_list.BoxMaskList): raise ValueError('box_mask_list must be a BoxMaskList') if not box_mask_list.has_field('scores'): raise ValueError('input box_mask_list must have \'scores\' field') scores = box_mask_list.get_field('scores') if len(scores.shape) > 2: raise ValueError('Scores should have rank 1 or 2') if len(scores.shape) == 2 and scores.shape[1] != 1: raise ValueError('Scores should have rank 1 or have shape ' 'consistent with [None, 1]') high_score_indices = np.reshape(np.where(np.greater(scores, thresh)), [-1]).astype(np.int32) return gather(box_mask_list, high_score_indices)
mit
InnovaLangues/DicoAPI
vendor/doctrine/orm/docs/en/_exts/configurationblock.py
2577
3506
#Copyright (c) 2010 Fabien Potencier # #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is furnished #to do so, subject to the following conditions: # #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #THE SOFTWARE. from docutils.parsers.rst import Directive, directives from docutils import nodes from string import upper class configurationblock(nodes.General, nodes.Element): pass class ConfigurationBlock(Directive): has_content = True required_arguments = 0 optional_arguments = 0 final_argument_whitespace = True option_spec = {} formats = { 'html': 'HTML', 'xml': 'XML', 'php': 'PHP', 'yaml': 'YAML', 'jinja': 'Twig', 'html+jinja': 'Twig', 'jinja+html': 'Twig', 'php+html': 'PHP', 'html+php': 'PHP', 'ini': 'INI', 'php-annotations': 'Annotations', } def run(self): env = self.state.document.settings.env node = nodes.Element() node.document = self.state.document self.state.nested_parse(self.content, self.content_offset, node) entries = [] for i, child in enumerate(node): if isinstance(child, nodes.literal_block): # add a title (the language name) before each block #targetid = "configuration-block-%d" % env.new_serialno('configuration-block') #targetnode = nodes.target('', '', ids=[targetid]) #targetnode.append(child) innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']]) para = nodes.paragraph() para += [innernode, child] entry = nodes.list_item('') entry.append(para) entries.append(entry) resultnode = configurationblock() resultnode.append(nodes.bullet_list('', *entries)) return [resultnode] def visit_configurationblock_html(self, node): self.body.append(self.starttag(node, 'div', CLASS='configuration-block')) def depart_configurationblock_html(self, node): self.body.append('</div>\n') def visit_configurationblock_latex(self, node): pass def depart_configurationblock_latex(self, node): pass def setup(app): app.add_node(configurationblock, html=(visit_configurationblock_html, depart_configurationblock_html), latex=(visit_configurationblock_latex, depart_configurationblock_latex)) app.add_directive('configuration-block', ConfigurationBlock)
mit
bguillot/OpenUpgrade
addons/mail/mail_group_menu.py
101
2718
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2012-today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## from openerp import SUPERUSER_ID from openerp.osv import osv from openerp.osv import fields class ir_ui_menu(osv.osv): """ Override of ir.ui.menu class. When adding mail_thread module, each new mail.group will create a menu entry. This overrides checks that the current user is in the mail.group followers. If not, the menu entry is taken off the list of menu ids. This way the user will see menu entries for the mail.group he is following. """ _inherit = 'ir.ui.menu' _columns = { 'mail_group_id': fields.many2one('mail.group', 'Mail Group') } def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False): """ Override to take off menu entries (mail.group) the user is not following. Access are done using SUPERUSER_ID to avoid access rights issues for an internal back-end algorithm. """ ids = super(ir_ui_menu, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=False) partner_id = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0] follower_obj = self.pool.get('mail.followers') for menu in self.browse(cr, uid, ids, context=context): if menu.mail_group_id: sub_ids = follower_obj.search(cr, SUPERUSER_ID, [ ('partner_id', '=', partner_id), ('res_model', '=', 'mail.group'), ('res_id', '=', menu.mail_group_id.id) ], context=context) if not sub_ids: ids.remove(menu.id) if count: return len(ids) return ids
agpl-3.0
wtpayne/hiai
a3_src/h70_internal/da/check/schema/spec/spec_silcfg.py
1
1304
# -*- coding: utf-8 -*- """ Unit tests for the da.check.schema.silcfg module. --- type: python_module validation_level: v00_minimum protection: k00_public copyright: "Copyright 2016 High Integrity Artificial Intelligence Systems" license: "Licensed under the Apache License, Version 2.0 (the License); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License." ... """ # ============================================================================= class SpecifyGet: """ Specify the da.check.schema.silcfg.get() function. """ # ------------------------------------------------------------------------- def it_returns_a_callable(self): """ The get() function returns a callable. """ import da.check.schema.silcfg schema = da.check.schema.silcfg.get() assert callable(schema)
apache-2.0
gunicorn/gunicorn
gunicorn/app/pasterapp.py
10
6372
# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. import os import pkg_resources import sys try: import configparser as ConfigParser except ImportError: import ConfigParser from paste.deploy import loadapp, loadwsgi SERVER = loadwsgi.SERVER from gunicorn.app.base import Application from gunicorn.config import Config, get_default_config_file from gunicorn import util def _configure_logging_from_paste_config(paste_file): logger_cfg_file = paste_file.split(':')[1] cfg_parser = ConfigParser.ConfigParser() cfg_parser.read([logger_cfg_file]) if cfg_parser.has_section('loggers'): from logging.config import fileConfig config_file = os.path.abspath(logger_cfg_file) fileConfig(config_file, dict(__file__=config_file, here=os.path.dirname(config_file))) def paste_config(gconfig, config_url, relative_to, global_conf=None): # add entry to pkg_resources sys.path.insert(0, relative_to) pkg_resources.working_set.add_entry(relative_to) config_url = config_url.split('#')[0] cx = loadwsgi.loadcontext(SERVER, config_url, relative_to=relative_to, global_conf=global_conf) gc, lc = cx.global_conf.copy(), cx.local_conf.copy() cfg = {} host, port = lc.pop('host', ''), lc.pop('port', '') if host and port: cfg['bind'] = '%s:%s' % (host, port) elif host: cfg['bind'] = host.split(',') cfg['workers'] = int(lc.get('workers', 1)) cfg['umask'] = int(lc.get('umask', 0)) cfg['default_proc_name'] = gc.get('__file__') # init logging configuration _configure_logging_from_paste_config(config_url) for k, v in gc.items(): if k not in gconfig.settings: continue cfg[k] = v for k, v in lc.items(): if k not in gconfig.settings: continue cfg[k] = v return cfg def load_pasteapp(config_url, relative_to, global_conf=None): return loadapp(config_url, relative_to=relative_to, global_conf=global_conf) class PasterBaseApplication(Application): gcfg = None def app_config(self): return paste_config(self.cfg, self.cfgurl, self.relpath, global_conf=self.gcfg) def load_config(self): super(PasterBaseApplication, self).load_config() # reload logging conf if hasattr(self, "cfgfname"): parser = ConfigParser.ConfigParser() parser.read([self.cfgfname]) if parser.has_section('loggers'): from logging.config import fileConfig config_file = os.path.abspath(self.cfgfname) fileConfig(config_file, dict(__file__=config_file, here=os.path.dirname(config_file))) class PasterApplication(PasterBaseApplication): def init(self, parser, opts, args): if len(args) != 1: parser.error("No application name specified.") cwd = util.getcwd() cfgfname = os.path.normpath(os.path.join(cwd, args[0])) cfgfname = os.path.abspath(cfgfname) if not os.path.exists(cfgfname): parser.error("Config file not found: %s" % cfgfname) self.cfgurl = 'config:%s' % cfgfname self.relpath = os.path.dirname(cfgfname) self.cfgfname = cfgfname sys.path.insert(0, self.relpath) pkg_resources.working_set.add_entry(self.relpath) return self.app_config() def load(self): # chdir to the configured path before loading, # default is the current dir os.chdir(self.cfg.chdir) return load_pasteapp(self.cfgurl, self.relpath, global_conf=self.gcfg) class PasterServerApplication(PasterBaseApplication): def __init__(self, app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs): self.cfg = Config() self.gcfg = gcfg # need to hold this for app_config self.app = app self.callable = None gcfg = gcfg or {} cfgfname = gcfg.get("__file__") if cfgfname is not None: self.cfgurl = 'config:%s' % cfgfname self.relpath = os.path.dirname(cfgfname) self.cfgfname = cfgfname cfg = kwargs.copy() if port and not host.startswith("unix:"): bind = "%s:%s" % (host, port) else: bind = host cfg["bind"] = bind.split(',') if gcfg: for k, v in gcfg.items(): cfg[k] = v cfg["default_proc_name"] = cfg['__file__'] try: for k, v in cfg.items(): if k.lower() in self.cfg.settings and v is not None: self.cfg.set(k.lower(), v) except Exception as e: sys.stderr.write("\nConfig error: %s\n" % str(e)) sys.stderr.flush() sys.exit(1) if cfg.get("config"): self.load_config_from_file(cfg["config"]) else: default_config = get_default_config_file() if default_config is not None: self.load_config_from_file(default_config) def load(self): # chdir to the configured path before loading, # default is the current dir os.chdir(self.cfg.chdir) return self.app def run(): """\ The ``gunicorn_paster`` command for launching Paster compatible applications like Pylons or Turbogears2 """ util.warn("""This command is deprecated. You should now use the `--paste` option. Ex.: gunicorn --paste development.ini """) from gunicorn.app.pasterapp import PasterApplication PasterApplication("%(prog)s [OPTIONS] pasteconfig.ini").run() def paste_server(app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs): """\ A paster server. Then entry point in your paster ini file should looks like this: [server:main] use = egg:gunicorn#main host = 127.0.0.1 port = 5000 """ util.warn("""This command is deprecated. You should now use the `--paste` option. Ex.: gunicorn --paste development.ini """) from gunicorn.app.pasterapp import PasterServerApplication PasterServerApplication(app, gcfg=gcfg, host=host, port=port, *args, **kwargs).run()
mit
MaterialsDiscovery/PyChemia
pychemia/core/composition.py
1
16450
""" Chemical composition is just the description of the amount of atoms of each specie. In the case of clusters or molecules, ie a finite structure, it represents the complete set of atoms. For periodic structures it represents the species present on a cell. """ import re from numpy import array, argsort from math import gcd as _gcd from math import pi from pychemia.utils.periodic import atomic_symbols, electronegativity, atomic_number, covalent_radius from pychemia.utils.computing import deep_unicode from functools import reduce from collections.abc import Mapping class Composition(Mapping): """ A Composition is basically a mapping between a number of species and a integer indicating how many atoms of that specie are present in the structure. A composition object do not contain geometrical information or bonding. The main purpose of this class is to be able to parse formulas into compositions and return string formulas sorted in various ways. """ def __init__(self, value=None): """ Creates a new composition, currently only absolute formulas are supported. :param value: (str, dict) The input argument could be a string with a chemical formula or the actual dictionary of species and values. The order of species is not guaranteed to be preserved. A iterable of atomic symbols is also accepted to build a composition object. :rtype: Composition >>> cp = Composition({'Ba': 2, 'Cu': 3, 'O': 7, 'Y': 1}) >>> cp.formula 'Ba2Cu3O7Y' >>> cp = Composition('Ba2Cu3O7Y') >>> cp2 = Composition(cp) >>> len(cp2) 4 >>> cp.nspecies 4 >>> cp = Composition(['O', 'H', 'O']) >>> len(cp) 2 >>> cp['O'] 2 """ # The internal dictionary where atom species and numbers of atoms of each specie are stored. self._composition = {} # Convert strings and dictionaries into unicode if value is not None: value = deep_unicode(value) # Case 1: The input is a formula if isinstance(value, str): self._set_composition(self.formula_parser(value)) # Case 2: The input is a dictionary elif isinstance(value, dict): self._set_composition(value) # Case 3: The input is another composition object elif isinstance(value, Composition): self._set_composition(value.composition) # Case 4: The input is an iterable of atomic symbols elif hasattr(value, "__len__"): dvalue = {} for i in value: if i in dvalue: dvalue[i] += 1 else: dvalue[i] = 1 self._set_composition(dvalue) else: self._composition = {} def __len__(self): return len(self._composition) def __getitem__(self, specie): """ Returns the number of atoms of a given specie :param specie: Atomic Symbol for which the value will be returned :return: number of atoms of the given specie :rtype: int >>> comp = Composition('H2') >>> comp['H'] 2 >>> comp['He'] 0 """ if specie in self._composition: return self._composition[specie] else: return 0 def __repr__(self): """ Evaluable representation of Composition object :return: Text representation that can be evaluated :rtype: str >>> cp1 = Composition('H2O') >>> cp2 = eval(repr(cp1)) >>> cp2 == cp1 True """ return 'Composition(' + str(self.composition) + ')' def __str__(self): """ :return: String representation of the composition >>> cp = Composition('YBa2Cu3O7') >>> 'Cu' in str(cp) True """ ret = '' for i in self.species: ret += " %3s: %4d " % (i, self.composition[i]) return ret def __iter__(self): return iter(self.composition) def __contains__(self, specie): """True if 'specie' is present in composition :return: True if specie is present :param specie: atomic specie :rtype: bool >>> cp = Composition('H2O') >>> 'He' in cp False """ return specie in self._composition def _set_composition(self, value): """ Checks the values of a dictionary before setting the actual composition :param value: (dict) :rtype: None """ for i in value: assert (i in atomic_symbols) assert (isinstance(value[i], int)) self._composition = value.copy() @property def composition(self): """Dictionary with composition :return: The composition dictionary :rtype: dict >>> import pprint >>> cp = Composition('H2O') >>> pprint.pprint(cp.composition) {'H': 2, 'O': 1} """ return self._composition def covalent_volume(self, packing='cubes'): """ :param packing: The kind of packing could be 'cubes' or 'spheres' :type packing: str :return: The volume occupied by a given formula assuming a 'cubes' packing or 'spheres' packing :rtype: (float) >>> cp = Composition('C5H10') >>> cp.covalent_volume() 19.942320000000002 >>> cp.covalent_volume(packing='spheres') 10.441774334589468 """ if packing == 'cubes': factor = 8 elif packing == 'spheres': factor = 4 * pi / 3.0 else: raise ValueError('Non-valid packing: "%s"' % packing) # find volume of unit cell by adding cubes volume = 0.0 for specie in self: number_atoms_specie = self.composition[specie] # Pack each atom in a cube (2*r)^3 volume += factor * number_atoms_specie * covalent_radius(specie) ** 3 return volume @property def formula(self): """Chemical formula :return: The chemical formula with atoms sorted alphabetically :rtype: str >>> cp = Composition('NaCl') >>> cp.formula 'ClNa' """ return self.sorted_formula(sortby='alpha', reduced=True) @staticmethod def formula_parser(value): """Return a dictionary from a chemical formula :return: Convert an string representing a chemical formula into a dictionary with the species as keys and values as the number of atoms of that specie :param value: (str) Chemical formula :rtype: dict >>> import pprint >>> Composition.formula_parser('Au20') {'Au': 20} >>> ret = Composition.formula_parser('UutUupUusUuo') >>> pprint.pprint(ret) {'Uuo': 1, 'Uup': 1, 'Uus': 1, 'Uut': 1} """ ret = {} jump = False for i in range(len(value)): if jump > 0: # This char belongs to the current atom, move on jump -= 1 elif value[i].isupper(): # Atom Name starts with Uppercase if i + 1 < len(value) and value[i + 1].islower(): # Atom name has more than 1 char if i + 2 < len(value) and value[i + 2].islower(): # Atom name has more than 2 chars specie = value[i:i + 3] jump = 2 else: specie = value[i:i + 2] jump = 1 else: specie = value[i] jump = 0 j = 1 number = '' while True: if i + jump + j < len(value) and value[i + jump + j].isdigit(): number += value[i + jump + j] j += 1 else: break if number == '': ret[specie] = 1 else: ret[specie] = int(number) return ret @staticmethod def formula_to_list(formula, nunits=1): """ Reads a formula and returns a list of atomic symbols consistent with the formula and the number of formulas given by nunits :param formula: (str) Chemical formula as string :param nunits: (int) Number of formulas to apply :return: list of atomic symbols :rtype: list >>> Composition.formula_to_list('NaCl') ['Na', 'Cl'] >>> flist = Composition.formula_to_list(u'Uut2Uup3Uus4Uuo5') >>> len(flist) 14 >>> flist = Composition.formula_to_list('Uut2Uup3Uus4Uuo5', nunits=2) >>> len(flist) 28 """ # decompose composition a = re.findall(r"[A-Z][a-z0-9]*", formula) composition = [] for i in a: m = re.match(r"([A-Za-z]+)([0-9]*)", i) if m.group(2) == "": n = int(1) else: n = int(m.group(2)) for j in range(n * nunits): composition.append(m.group(1)) return composition @property def gcd(self): """ Number of minimal formulas on a given composition. :return: The number of formulas that can be extracted from a composition ie, the greatest common denominator for the composition. :rtype: int >>> cp = Composition('NaCl') >>> cp.gcd 1 >>> cp = Composition('Na2Cl2') >>> cp.gcd 2 >>> cp = Composition() >>> cp.gcd is None True """ if self.natom > 0: return reduce(_gcd, self.values) else: return None @staticmethod def get_species_from_hex(arg): """List of species encoded for hex string produced by species_hex :return: Return a set of species from the encoded species created by the output of "species_hex" method. :param arg: str String with hexadecimal representation of list of species. >>> Composition.get_species_from_hex('0x38271d08') [8, 29, 39, 56] """ num = int(arg, 16) ret = [] while num > 0: ret.append(num % 256) num = (num-ret[-1])//256 return ret @property def natom(self): """ :return: The number of atoms in the composition :rtype: int >>> cp = Composition('H2O') >>> cp.natom 3 """ return sum(self.values) @property def nspecies(self): """ :return: Number of species in the composition :rtype: int >>> cp = Composition('H2O') >>> cp.nspecies 2 """ return len(self.species) @property def symbols(self): """List of species on the composition :return: A list of atomic symbols :rtype: list >>> cp = Composition('H2O') >>> cp.symbols ['H', 'H', 'O'] """ ret = [] for specie in self: number_atoms_specie = self.composition[specie] for i in range(number_atoms_specie): ret.append(specie) return sorted(deep_unicode(ret)) @property def species(self): """List of species on the composition :return: The list of species, no particular order but atoms of the same specie are contiguous. :rtype: list >>> cp = Composition('H2O') >>> sorted(cp.species) ['H', 'O'] """ return [deep_unicode(x) for x in self._composition] def sorted_formula(self, sortby='alpha', reduced=True): """ :return: The chemical formula. It could be sorted alphabetically using sortby='alpha', by electronegativity using sortby='electronegativity' or using Hill System with sortby='Hill' Just the first 3 letters are unambiguous and case is not taken in account so you can use 'alp', 'hil' or 'ele' :param sortby: (str) 'alpha' : Alphabetically 'electronegativity' : Electronegativity 'hill' : Hill System :param reduced: (bool) If the formula should be normalized :rtype: str .. notes: Hill exceptions have not being implemented yet >>> cp = Composition('YBa2Cu3O7') >>> cp.sorted_formula() 'Ba2Cu3O7Y' >>> cp.sorted_formula(sortby='hill') 'Ba2Cu3O7Y' >>> cp.sorted_formula(sortby='electroneg') 'Ba2YCu3O7' >>> cp = Composition('H10C5') >>> cp.sorted_formula(sortby='hill', reduced=True) 'CH2' >>> cp = Composition('IBr') >>> cp.sorted_formula(sortby='hill', reduced=False) 'BrI' >>> cp = Composition('Cl4C') >>> cp.sorted_formula(sortby='hill', reduced=False) 'CCl4' >>> cp = Composition('IH3C') >>> cp.sorted_formula(sortby='hill', reduced=False) 'CH3I' >>> cp = Composition('BrH5C2') >>> cp.sorted_formula(sortby='hill', reduced=False) 'C2H5Br' >>> cp = Composition('S04H2') >>> cp.sorted_formula(sortby='hill', reduced=False) 'H2S4' >>> cp = Composition('SO4H2') >>> cp.sorted_formula(sortby='hill', reduced=False) 'H2O4S' """ if reduced and self.gcd > 1: comp = Composition(self.composition) for i in comp.composition: comp._composition[i] //= self.gcd else: comp = self if sortby.lower()[:3] == 'ele': electroneg = list(electronegativity(comp.species)) # Not longer needed as electronegativy will return 0 for 'None' values # for i in range(len(electroneg)): # if electroneg[i] is None: # electroneg[i] = -1 sortedspecies = array(comp.species)[argsort(electroneg)] elif sortby.lower()[:3] == "hil": # FIXME: Hill system exceptions not implemented sortedspecies = [] presortedspecies = sorted(comp.species) if 'C' in presortedspecies: sortedspecies.append('C') presortedspecies.pop(presortedspecies.index('C')) if 'H' in presortedspecies: sortedspecies.append('H') presortedspecies.pop(presortedspecies.index('H')) sortedspecies += presortedspecies else: sortedspecies = sorted(comp.species) ret = u'' for specie in sortedspecies: ret += '%s' % specie if comp.composition[specie] > 1: ret += "%d" % comp.composition[specie] return deep_unicode(ret) def species_encoded(self, base): """Encode the list of species with a number :return: Encodes the species as a number. :param base: Integer used as base for encoding. :rtype: int >>> cp = Composition('H2O') >>> cp.species_encoded(100) 801 """ ret = 0 i = 0 for atom_number in sorted(atomic_number(self.species)): ret += atom_number * (base ** i) i += 1 return ret def species_hex(self): """Encoding in hexadecimal with 2 bytes per specie (base 256) :return: Encodes the species into a hexadecimal representation where each specie is stored on a 2-Byte slot ordered by atomic number. The output produces a unique encoding where each 2 character from the hexadecimal will encode a single species and the species are ordered by atomic number making the codification unique. :rtype: str >>> cp = Composition('YBa2Cu3O7') >>> cp.species_hex() '0x38271d08' """ enc = self.species_encoded(256) return hex(enc) @property def values(self): """ :return: The number of atoms of each specie :rtype: list >>> cp = Composition('YBa2Cu3O7') >>> sorted(cp.values) [1, 2, 3, 7] """ return [self._composition[x] for x in self._composition]
mit
alfonsodev/ansible-modules-extras
monitoring/datadog_monitor.py
74
10398
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # import module snippets # Import Datadog try: from datadog import initialize, api HAS_DATADOG = True except: HAS_DATADOG = False DOCUMENTATION = ''' --- module: datadog_monitor short_description: Manages Datadog monitors description: - "Manages monitors within Datadog" - "Options like described on http://docs.datadoghq.com/api/" version_added: "2.0" author: "Sebastian Kornehl (@skornehl)" notes: [] requirements: [datadog] options: api_key: description: ["Your DataDog API key."] required: true app_key: description: ["Your DataDog app key."] required: true state: description: ["The designated state of the monitor."] required: true choices: ['present', 'absent', 'muted', 'unmuted'] type: description: ["The type of the monitor."] required: false default: null choices: ['metric alert', 'service check'] query: description: ["he monitor query to notify on with syntax varying depending on what type of monitor you are creating."] required: false default: null name: description: ["The name of the alert."] required: true message: description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events."] required: false default: null silenced: description: ["Dictionary of scopes to timestamps or None. Each scope will be muted until the given POSIX timestamp or forever if the value is None. "] required: false default: "" notify_no_data: description: ["A boolean indicating whether this monitor will notify when data stops reporting.."] required: false default: False no_data_timeframe: description: ["The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks."] required: false default: 2x timeframe for metric, 2 minutes for service timeout_h: description: ["The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state."] required: false default: null renotify_interval: description: ["The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's not resolved."] required: false default: null escalation_message: description: ["A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval is None"] required: false default: null notify_audit: description: ["A boolean indicating whether tagged users will be notified on changes to this monitor."] required: false default: False thresholds: description: ["A dictionary of thresholds by status. Because service checks can have multiple thresholds, we don't define them directly in the query."] required: false default: {'ok': 1, 'critical': 1, 'warning': 1} ''' EXAMPLES = ''' # Create a metric monitor datadog_monitor: type: "metric alert" name: "Test monitor" state: "present" query: "datadog.agent.up".over("host:host1").last(2).count_by_status()" message: "Some message." api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" # Deletes a monitor datadog_monitor: name: "Test monitor" state: "absent" api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" # Mutes a monitor datadog_monitor: name: "Test monitor" state: "mute" silenced: '{"*":None}' api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" # Unmutes a monitor datadog_monitor: name: "Test monitor" state: "unmute" api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" ''' def main(): module = AnsibleModule( argument_spec=dict( api_key=dict(required=True), app_key=dict(required=True), state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']), type=dict(required=False, choises=['metric alert', 'service check']), name=dict(required=True), query=dict(required=False), message=dict(required=False, default=None), silenced=dict(required=False, default=None, type='dict'), notify_no_data=dict(required=False, default=False, choices=BOOLEANS), no_data_timeframe=dict(required=False, default=None), timeout_h=dict(required=False, default=None), renotify_interval=dict(required=False, default=None), escalation_message=dict(required=False, default=None), notify_audit=dict(required=False, default=False, choices=BOOLEANS), thresholds=dict(required=False, type='dict', default={'ok': 1, 'critical': 1, 'warning': 1}), ) ) # Prepare Datadog if not HAS_DATADOG: module.fail_json(msg='datadogpy required for this module') options = { 'api_key': module.params['api_key'], 'app_key': module.params['app_key'] } initialize(**options) if module.params['state'] == 'present': install_monitor(module) elif module.params['state'] == 'absent': delete_monitor(module) elif module.params['state'] == 'mute': mute_monitor(module) elif module.params['state'] == 'unmute': unmute_monitor(module) def _get_monitor(module): for monitor in api.Monitor.get_all(): if monitor['name'] == module.params['name']: return monitor return {} def _post_monitor(module, options): try: msg = api.Monitor.create(type=module.params['type'], query=module.params['query'], name=module.params['name'], message=module.params['message'], options=options) if 'errors' in msg: module.fail_json(msg=str(msg['errors'])) else: module.exit_json(changed=True, msg=msg) except Exception, e: module.fail_json(msg=str(e)) def _equal_dicts(a, b, ignore_keys): ka = set(a).difference(ignore_keys) kb = set(b).difference(ignore_keys) return ka == kb and all(a[k] == b[k] for k in ka) def _update_monitor(module, monitor, options): try: msg = api.Monitor.update(id=monitor['id'], query=module.params['query'], name=module.params['name'], message=module.params['message'], options=options) if 'errors' in msg: module.fail_json(msg=str(msg['errors'])) elif _equal_dicts(msg, monitor, ['creator', 'overall_state']): module.exit_json(changed=False, msg=msg) else: module.exit_json(changed=True, msg=msg) except Exception, e: module.fail_json(msg=str(e)) def install_monitor(module): options = { "silenced": module.params['silenced'], "notify_no_data": module.boolean(module.params['notify_no_data']), "no_data_timeframe": module.params['no_data_timeframe'], "timeout_h": module.params['timeout_h'], "renotify_interval": module.params['renotify_interval'], "escalation_message": module.params['escalation_message'], "notify_audit": module.boolean(module.params['notify_audit']), } if module.params['type'] == "service check": options["thresholds"] = module.params['thresholds'] monitor = _get_monitor(module) if not monitor: _post_monitor(module, options) else: _update_monitor(module, monitor, options) def delete_monitor(module): monitor = _get_monitor(module) if not monitor: module.exit_json(changed=False) try: msg = api.Monitor.delete(monitor['id']) module.exit_json(changed=True, msg=msg) except Exception, e: module.fail_json(msg=str(e)) def mute_monitor(module): monitor = _get_monitor(module) if not monitor: module.fail_json(msg="Monitor %s not found!" % module.params['name']) elif monitor['options']['silenced']: module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) - set(module.params['silenced'])) == 0): module.exit_json(changed=False) try: if module.params['silenced'] is None or module.params['silenced'] == "": msg = api.Monitor.mute(id=monitor['id']) else: msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced']) module.exit_json(changed=True, msg=msg) except Exception, e: module.fail_json(msg=str(e)) def unmute_monitor(module): monitor = _get_monitor(module) if not monitor: module.fail_json(msg="Monitor %s not found!" % module.params['name']) elif not monitor['options']['silenced']: module.exit_json(changed=False) try: msg = api.Monitor.unmute(monitor['id']) module.exit_json(changed=True, msg=msg) except Exception, e: module.fail_json(msg=str(e)) from ansible.module_utils.basic import * from ansible.module_utils.urls import * main()
gpl-3.0
yeKcim/warmux
old/wormux-0.8.1/tools/view_stat/view_stat.py
55
5046
#!/usr/bin/python # -*- coding: UTF-8 -*- import sys, os, types import pygtk pygtk.require ('2.0') import gtk, gtk.glade from xml.dom.minidom import parse class GUI: def __init__(self, glade_xml): xml = gtk.glade.XML(glade_xml, "main_window") self.window = xml.get_widget('main_window') self.stats = xml.get_widget('stats') xml.signal_autoconnect(self) self.window.set_size_request(600,400) self.build_stats_widget() def createTreeviewColumns(self, treeview, colums): num = 0 for name in colums: col = gtk.TreeViewColumn(name) treeview.append_column(col) cell = gtk.CellRendererText() col.pack_start(cell, True) col.add_attribute(cell, 'text', num) treeview.set_search_column(num) col.set_sort_column_id(num) num = num + 1 def build_stats_widget(self): self.stats_store = gtk.TreeStore(str, str, int, int, int, int, int) self.stats.set_model(self.stats_store) self.createTreeviewColumns(self.stats, \ ("Function", "Percent", "Count", "Total", "Average", "Min", "Max",)) def addStat(self, parent, stat): return self.stats_store.append(parent, \ (stat.function, "%.2f%%" % stat.percent, stat.count, \ stat.total, stat.average, stat.min, stat.max,)) def run(self): try: gtk.main() except KeyboardInterrupt: print "Interrupted (CTRL+C)." else: print "Quit." def on_main_window_destroy(self, widget): gtk.main_quit() class StatItem: def __init__(self, function, percent, count, total, min, max): self.function = function self.percent = percent self.count = count self.total = total self.min = min self.max = max self.average = total/count self.tree = None class StatTree: def __init__(self, name="root", parent=None): self.name = name self.parent = parent self.nodes = {} self.total = 0 def addNode(self, node): self._addNode(node) def _addNode(self, node): index = node.function.split(":") if 1<len(index): parent = index[0] index = index[1:] node.function = ":".join(index) if parent not in self.nodes: subtree = StatItem(parent, 0, 1, 0, 0, 0) subtree.function = parent subtree.tree = StatTree(parent, self) self.nodes[parent] = subtree self.nodes[parent].tree._addNode(node) else: self.nodes[node.function] = node self.total = self.total + node.total def getFullname(self): t = "" s = self while s != None: if t != "": t = s.name + ">" + t else: t = s.name s = s.parent return t def __getitem__(self, index): return self.nodes[index] class Stats: def __init__(self, filename): self.tree = StatTree() self.parents = {} xml = parse(filename) root = xml.documentElement self.total_time = int(root.getAttribute("total_time")) self.description = root.getAttribute("description") # Create stat tree for node in root.childNodes: if node.nodeType == node.ELEMENT_NODE and node.tagName == "item": # Load on item total = int(node.getAttribute("total")) function = node.getAttribute("function") item = StatItem( \ function, total * 100 / self.total_time, int(node.getAttribute("count")), total, int(node.getAttribute("min")), int(node.getAttribute("max"))) # Insert item in the tree self.tree.addNode(item) def updateGUI(self, gui, tree=None, gui_parent=None, parents=[]): if tree==None: tree = self.tree for parent in tree.nodes: item = tree[parent] if item.tree != None: item.total = item.tree.total item.percent = item.total*100/tree.total new_gui_parent = gui.addStat(gui_parent, item) self.updateGUI(gui, item.tree, new_gui_parent) else: item.percent = item.total*100/tree.total gui.addStat(gui_parent, item) def usage(): print "Usage: %s file.xml" % sys.argv[0] sys.exit(1) def main(): if len(sys.argv) < 2: usage() filename = sys.argv[1] # Load GUI xml= os.path.join(os.path.dirname(__file__), "view_stat.glade") gui = GUI(xml) # Load stats stats = Stats(filename) stats.updateGUI(gui) # Run GUI gui.run() if __name__=="__main__": main()
gpl-2.0
haikuginger/urllib3
urllib3/contrib/appengine.py
3
9630
""" This module provides a pool manager that uses Google App Engine's `URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_. Example usage:: from urllib3 import PoolManager from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox if is_appengine_sandbox(): # AppEngineManager uses AppEngine's URLFetch API behind the scenes http = AppEngineManager() else: # PoolManager uses a socket-level API behind the scenes http = PoolManager() r = http.request('GET', 'https://google.com/') There are `limitations <https://cloud.google.com/appengine/docs/python/\ urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be the best choice for your application. There are three options for using urllib3 on Google App Engine: 1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is cost-effective in many circumstances as long as your usage is within the limitations. 2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets. Sockets also have `limitations and restrictions <https://cloud.google.com/appengine/docs/python/sockets/\ #limitations-and-restrictions>`_ and have a lower free quota than URLFetch. To use sockets, be sure to specify the following in your ``app.yaml``:: env_variables: GAE_USE_SOCKETS_HTTPLIB : 'true' 3. If you are using `App Engine Flexible <https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard :class:`PoolManager` without any configuration or special environment variables. """ from __future__ import absolute_import import logging import os import warnings from ..exceptions import ( HTTPError, HTTPWarning, MaxRetryError, ProtocolError, TimeoutError, SSLError ) from ..packages.six import BytesIO from ..request import RequestMethods from ..response import HTTPResponse from ..util.timeout import Timeout from ..util.retry import Retry try: from google.appengine.api import urlfetch except ImportError: urlfetch = None log = logging.getLogger(__name__) class AppEnginePlatformWarning(HTTPWarning): pass class AppEnginePlatformError(HTTPError): pass class AppEngineManager(RequestMethods): """ Connection manager for Google App Engine sandbox applications. This manager uses the URLFetch service directly instead of using the emulated httplib, and is subject to URLFetch limitations as described in the App Engine documentation `here <https://cloud.google.com/appengine/docs/python/urlfetch>`_. Notably it will raise an :class:`AppEnginePlatformError` if: * URLFetch is not available. * If you attempt to use this on App Engine Flexible, as full socket support is available. * If a request size is more than 10 megabytes. * If a response size is more than 32 megabtyes. * If you use an unsupported request method such as OPTIONS. Beyond those cases, it will raise normal urllib3 errors. """ def __init__(self, headers=None, retries=None, validate_certificate=True): if not urlfetch: raise AppEnginePlatformError( "URLFetch is not available in this environment.") if is_prod_appengine_mvms(): raise AppEnginePlatformError( "Use normal urllib3.PoolManager instead of AppEngineManager" "on Managed VMs, as using URLFetch is not necessary in " "this environment.") warnings.warn( "urllib3 is using URLFetch on Google App Engine sandbox instead " "of sockets. To use sockets directly instead of URLFetch see " "https://urllib3.readthedocs.io/en/latest/contrib.html.", AppEnginePlatformWarning) RequestMethods.__init__(self, headers) self.validate_certificate = validate_certificate self.retries = retries or Retry.DEFAULT def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): # Return False to re-raise any potential exceptions return False def urlopen(self, method, url, body=None, headers=None, retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT, **response_kw): retries = self._get_retries(retries, redirect) try: response = urlfetch.fetch( url, payload=body, method=method, headers=headers or {}, allow_truncated=False, follow_redirects=( redirect and retries.redirect != 0 and retries.total), deadline=self._get_absolute_timeout(timeout), validate_certificate=self.validate_certificate, ) except urlfetch.DeadlineExceededError as e: raise TimeoutError(self, e) except urlfetch.InvalidURLError as e: if 'too large' in str(e): raise AppEnginePlatformError( "URLFetch request too large, URLFetch only " "supports requests up to 10mb in size.", e) raise ProtocolError(e) except urlfetch.DownloadError as e: if 'Too many redirects' in str(e): raise MaxRetryError(self, url, reason=e) raise ProtocolError(e) except urlfetch.ResponseTooLargeError as e: raise AppEnginePlatformError( "URLFetch response too large, URLFetch only supports" "responses up to 32mb in size.", e) except urlfetch.SSLCertificateError as e: raise SSLError(e) except urlfetch.InvalidMethodError as e: raise AppEnginePlatformError( "URLFetch does not support method: %s" % method, e) http_response = self._urlfetch_response_to_http_response( response, retries=retries, **response_kw) # Check for redirect response if (http_response.get_redirect_location() and retries.raise_on_redirect and redirect): raise MaxRetryError(self, url, "too many redirects") # Check if we should retry the HTTP response. if retries.is_forced_retry(method, status_code=http_response.status): retries = retries.increment( method, url, response=http_response, _pool=self) log.info("Forced retry: %s", url) retries.sleep() return self.urlopen( method, url, body=body, headers=headers, retries=retries, redirect=redirect, timeout=timeout, **response_kw) return http_response def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw): if is_prod_appengine(): # Production GAE handles deflate encoding automatically, but does # not remove the encoding header. content_encoding = urlfetch_resp.headers.get('content-encoding') if content_encoding == 'deflate': del urlfetch_resp.headers['content-encoding'] transfer_encoding = urlfetch_resp.headers.get('transfer-encoding') # We have a full response's content, # so let's make sure we don't report ourselves as chunked data. if transfer_encoding == 'chunked': encodings = transfer_encoding.split(",") encodings.remove('chunked') urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings) return HTTPResponse( # In order for decoding to work, we must present the content as # a file-like object. body=BytesIO(urlfetch_resp.content), headers=urlfetch_resp.headers, status=urlfetch_resp.status_code, **response_kw ) def _get_absolute_timeout(self, timeout): if timeout is Timeout.DEFAULT_TIMEOUT: return None # Defer to URLFetch's default. if isinstance(timeout, Timeout): if timeout._read is not None or timeout._connect is not None: warnings.warn( "URLFetch does not support granular timeout settings, " "reverting to total or default URLFetch timeout.", AppEnginePlatformWarning) return timeout.total return timeout def _get_retries(self, retries, redirect): if not isinstance(retries, Retry): retries = Retry.from_int( retries, redirect=redirect, default=self.retries) if retries.connect or retries.read or retries.redirect: warnings.warn( "URLFetch only supports total retries and does not " "recognize connect, read, or redirect retry parameters.", AppEnginePlatformWarning) return retries def is_appengine(): return (is_local_appengine() or is_prod_appengine() or is_prod_appengine_mvms()) def is_appengine_sandbox(): return is_appengine() and not is_prod_appengine_mvms() def is_local_appengine(): return ('APPENGINE_RUNTIME' in os.environ and 'Development/' in os.environ['SERVER_SOFTWARE']) def is_prod_appengine(): return ('APPENGINE_RUNTIME' in os.environ and 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and not is_prod_appengine_mvms()) def is_prod_appengine_mvms(): return os.environ.get('GAE_VM', False) == 'true'
mit
almarklein/scikit-image
skimage/transform/finite_radon_transform.py
3
3206
""" :author: Gary Ruben, 2009 :license: modified BSD """ __all__ = ["frt2", "ifrt2"] import numpy as np from numpy import roll, newaxis def frt2(a): """Compute the 2-dimensional finite radon transform (FRT) for an n x n integer array. Parameters ---------- a : array_like A 2-D square n x n integer array. Returns ------- FRT : 2-D ndarray Finite Radon Transform array of (n+1) x n integer coefficients. See Also -------- ifrt2 : The two-dimensional inverse FRT. Notes ----- The FRT has a unique inverse iff n is prime. [FRT] The idea for this algorithm is due to Vlad Negnevitski. Examples -------- Generate a test image: Use a prime number for the array dimensions >>> SIZE = 59 >>> img = np.tri(SIZE, dtype=np.int32) Apply the Finite Radon Transform: >>> f = frt2(img) References ---------- .. [FRT] A. Kingston and I. Svalbe, "Projective transforms on periodic discrete image arrays," in P. Hawkes (Ed), Advances in Imaging and Electron Physics, 139 (2006) """ if a.ndim != 2 or a.shape[0] != a.shape[1]: raise ValueError("Input must be a square, 2-D array") ai = a.copy() n = ai.shape[0] f = np.empty((n + 1, n), np.uint32) f[0] = ai.sum(axis=0) for m in range(1, n): # Roll the pth row of ai left by p places for row in range(1, n): ai[row] = roll(ai[row], -row) f[m] = ai.sum(axis=0) f[n] = ai.sum(axis=1) return f def ifrt2(a): """Compute the 2-dimensional inverse finite radon transform (iFRT) for an (n+1) x n integer array. Parameters ---------- a : array_like A 2-D (n+1) row x n column integer array. Returns ------- iFRT : 2-D n x n ndarray Inverse Finite Radon Transform array of n x n integer coefficients. See Also -------- frt2 : The two-dimensional FRT Notes ----- The FRT has a unique inverse iff n is prime. See [1]_ for an overview. The idea for this algorithm is due to Vlad Negnevitski. Examples -------- >>> SIZE = 59 >>> img = np.tri(SIZE, dtype=np.int32) Apply the Finite Radon Transform: >>> f = frt2(img) Apply the Inverse Finite Radon Transform to recover the input >>> fi = ifrt2(f) Check that it's identical to the original >>> assert len(np.nonzero(img-fi)[0]) == 0 References ---------- .. [1] A. Kingston and I. Svalbe, "Projective transforms on periodic discrete image arrays," in P. Hawkes (Ed), Advances in Imaging and Electron Physics, 139 (2006) """ if a.ndim != 2 or a.shape[0] != a.shape[1] + 1: raise ValueError("Input must be an (n+1) row x n column, 2-D array") ai = a.copy()[:-1] n = ai.shape[1] f = np.empty((n, n), np.uint32) f[0] = ai.sum(axis=0) for m in range(1, n): # Rolls the pth row of ai right by p places. for row in range(1, ai.shape[0]): ai[row] = roll(ai[row], row) f[m] = ai.sum(axis=0) f += a[-1][newaxis].T f = (f - ai[0].sum()) / n return f
bsd-3-clause
chriswardchrisward/blog-api
src/posts/views.py
9
4540
try: from urllib import quote_plus #python 2 except: pass try: from urllib.parse import quote_plus #python 3 except: pass from django.contrib import messages from django.contrib.contenttypes.models import ContentType from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.db.models import Q from django.http import HttpResponse, HttpResponseRedirect, Http404 from django.shortcuts import render, get_object_or_404, redirect from django.utils import timezone from comments.forms import CommentForm from comments.models import Comment from .forms import PostForm from .models import Post def post_create(request): if not request.user.is_staff or not request.user.is_superuser: raise Http404 form = PostForm(request.POST or None, request.FILES or None) if form.is_valid(): instance = form.save(commit=False) instance.user = request.user instance.save() # message success messages.success(request, "Successfully Created") return HttpResponseRedirect(instance.get_absolute_url()) context = { "form": form, } return render(request, "post_form.html", context) def post_detail(request, slug=None): instance = get_object_or_404(Post, slug=slug) if instance.publish > timezone.now().date() or instance.draft: if not request.user.is_staff or not request.user.is_superuser: raise Http404 share_string = quote_plus(instance.content) initial_data = { "content_type": instance.get_content_type, "object_id": instance.id } form = CommentForm(request.POST or None, initial=initial_data) if form.is_valid() and request.user.is_authenticated(): c_type = form.cleaned_data.get("content_type") content_type = ContentType.objects.get(model=c_type) obj_id = form.cleaned_data.get('object_id') content_data = form.cleaned_data.get("content") parent_obj = None try: parent_id = int(request.POST.get("parent_id")) except: parent_id = None if parent_id: parent_qs = Comment.objects.filter(id=parent_id) if parent_qs.exists() and parent_qs.count() == 1: parent_obj = parent_qs.first() new_comment, created = Comment.objects.get_or_create( user = request.user, content_type= content_type, object_id = obj_id, content = content_data, parent = parent_obj, ) return HttpResponseRedirect(new_comment.content_object.get_absolute_url()) comments = instance.comments context = { "title": instance.title, "instance": instance, "share_string": share_string, "comments": comments, "comment_form":form, } return render(request, "post_detail.html", context) def post_list(request): today = timezone.now().date() queryset_list = Post.objects.active() #.order_by("-timestamp") if request.user.is_staff or request.user.is_superuser: queryset_list = Post.objects.all() query = request.GET.get("q") if query: queryset_list = queryset_list.filter( Q(title__icontains=query)| Q(content__icontains=query)| Q(user__first_name__icontains=query) | Q(user__last_name__icontains=query) ).distinct() paginator = Paginator(queryset_list, 8) # Show 25 contacts per page page_request_var = "page" page = request.GET.get(page_request_var) try: queryset = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. queryset = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. queryset = paginator.page(paginator.num_pages) context = { "object_list": queryset, "title": "List", "page_request_var": page_request_var, "today": today, } return render(request, "post_list.html", context) def post_update(request, slug=None): if not request.user.is_staff or not request.user.is_superuser: raise Http404 instance = get_object_or_404(Post, slug=slug) form = PostForm(request.POST or None, request.FILES or None, instance=instance) if form.is_valid(): instance = form.save(commit=False) instance.save() messages.success(request, "<a href='#'>Item</a> Saved", extra_tags='html_safe') return HttpResponseRedirect(instance.get_absolute_url()) context = { "title": instance.title, "instance": instance, "form":form, } return render(request, "post_form.html", context) def post_delete(request, slug=None): if not request.user.is_staff or not request.user.is_superuser: raise Http404 instance = get_object_or_404(Post, slug=slug) instance.delete() messages.success(request, "Successfully deleted") return redirect("posts:list")
mit
rasata/ansible
lib/ansible/executor/stats.py
251
1716
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type class AggregateStats: ''' holds stats about per-host activity during playbook runs ''' def __init__(self): self.processed = {} self.failures = {} self.ok = {} self.dark = {} self.changed = {} self.skipped = {} def increment(self, what, host): ''' helper function to bump a statistic ''' self.processed[host] = 1 prev = (getattr(self, what)).get(host, 0) getattr(self, what)[host] = prev+1 def summarize(self, host): ''' return information about a particular host ''' return dict( ok = self.ok.get(host, 0), failures = self.failures.get(host, 0), unreachable = self.dark.get(host,0), changed = self.changed.get(host, 0), skipped = self.skipped.get(host, 0) )
gpl-3.0
ojengwa/odoo
addons/hr_holidays/wizard/hr_holidays_summary_department.py
337
2335
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # $Id: account.py 1005 2005-07-25 08:41:42Z nicoe $ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields, osv from openerp.tools.translate import _ class hr_holidays_summary_dept(osv.osv_memory): _name = 'hr.holidays.summary.dept' _description = 'HR Leaves Summary Report By Department' _columns = { 'date_from': fields.date('From', required=True), 'depts': fields.many2many('hr.department', 'summary_dept_rel', 'sum_id', 'dept_id', 'Department(s)'), 'holiday_type': fields.selection([('Approved','Approved'),('Confirmed','Confirmed'),('both','Both Approved and Confirmed')], 'Leave Type', required=True) } _defaults = { 'date_from': lambda *a: time.strftime('%Y-%m-01'), 'holiday_type': 'Approved' } def print_report(self, cr, uid, ids, context=None): data = self.read(cr, uid, ids, context=context)[0] if not data['depts']: raise osv.except_osv(_('Error!'), _('You have to select at least one Department. And try again.')) datas = { 'ids': [], 'model': 'ir.ui.menu', 'form': data } return { 'type': 'ir.actions.report.xml', 'report_name': 'holidays.summary', 'datas': datas, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
fx2003/tensorflow-study
TensorFlowๅฎžๆˆ˜/models/object_detection/matchers/argmax_matcher_test.py
21
9704
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.matchers.argmax_matcher.""" import numpy as np import tensorflow as tf from object_detection.matchers import argmax_matcher class ArgMaxMatcherTest(tf.test.TestCase): def test_return_correct_matches_with_default_thresholds(self): similarity = np.array([[1., 1, 1, 3, 1], [2, -1, 2, 0, 4], [3, 0, -1, 0, 0]]) matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None) expected_matched_rows = np.array([2, 0, 1, 0, 1]) sim = tf.constant(similarity) match = matcher.match(sim) matched_cols = match.matched_column_indices() matched_rows = match.matched_row_indices() unmatched_cols = match.unmatched_column_indices() with self.test_session() as sess: res_matched_cols = sess.run(matched_cols) res_matched_rows = sess.run(matched_rows) res_unmatched_cols = sess.run(unmatched_cols) self.assertAllEqual(res_matched_rows, expected_matched_rows) self.assertAllEqual(res_matched_cols, np.arange(similarity.shape[1])) self.assertEmpty(res_unmatched_cols) def test_return_correct_matches_with_empty_rows(self): matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None) sim = 0.2*tf.ones([0, 5]) match = matcher.match(sim) unmatched_cols = match.unmatched_column_indices() with self.test_session() as sess: res_unmatched_cols = sess.run(unmatched_cols) self.assertAllEqual(res_unmatched_cols, np.arange(5)) def test_return_correct_matches_with_matched_threshold(self): similarity = np.array([[1, 1, 1, 3, 1], [2, -1, 2, 0, 4], [3, 0, -1, 0, 0]], dtype=np.int32) matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3) expected_matched_cols = np.array([0, 3, 4]) expected_matched_rows = np.array([2, 0, 1]) expected_unmatched_cols = np.array([1, 2]) sim = tf.constant(similarity) match = matcher.match(sim) matched_cols = match.matched_column_indices() matched_rows = match.matched_row_indices() unmatched_cols = match.unmatched_column_indices() init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) res_matched_cols = sess.run(matched_cols) res_matched_rows = sess.run(matched_rows) res_unmatched_cols = sess.run(unmatched_cols) self.assertAllEqual(res_matched_rows, expected_matched_rows) self.assertAllEqual(res_matched_cols, expected_matched_cols) self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols) def test_return_correct_matches_with_matched_and_unmatched_threshold(self): similarity = np.array([[1, 1, 1, 3, 1], [2, -1, 2, 0, 4], [3, 0, -1, 0, 0]], dtype=np.int32) matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3, unmatched_threshold=2) expected_matched_cols = np.array([0, 3, 4]) expected_matched_rows = np.array([2, 0, 1]) expected_unmatched_cols = np.array([1]) # col 2 has too high maximum val sim = tf.constant(similarity) match = matcher.match(sim) matched_cols = match.matched_column_indices() matched_rows = match.matched_row_indices() unmatched_cols = match.unmatched_column_indices() with self.test_session() as sess: res_matched_cols = sess.run(matched_cols) res_matched_rows = sess.run(matched_rows) res_unmatched_cols = sess.run(unmatched_cols) self.assertAllEqual(res_matched_rows, expected_matched_rows) self.assertAllEqual(res_matched_cols, expected_matched_cols) self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols) def test_return_correct_matches_negatives_lower_than_unmatched_false(self): similarity = np.array([[1, 1, 1, 3, 1], [2, -1, 2, 0, 4], [3, 0, -1, 0, 0]], dtype=np.int32) matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3, unmatched_threshold=2, negatives_lower_than_unmatched=False) expected_matched_cols = np.array([0, 3, 4]) expected_matched_rows = np.array([2, 0, 1]) expected_unmatched_cols = np.array([2]) # col 1 has too low maximum val sim = tf.constant(similarity) match = matcher.match(sim) matched_cols = match.matched_column_indices() matched_rows = match.matched_row_indices() unmatched_cols = match.unmatched_column_indices() with self.test_session() as sess: res_matched_cols = sess.run(matched_cols) res_matched_rows = sess.run(matched_rows) res_unmatched_cols = sess.run(unmatched_cols) self.assertAllEqual(res_matched_rows, expected_matched_rows) self.assertAllEqual(res_matched_cols, expected_matched_cols) self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols) def test_return_correct_matches_unmatched_row_not_using_force_match(self): similarity = np.array([[1, 1, 1, 3, 1], [-1, 0, -2, -2, -1], [3, 0, -1, 2, 0]], dtype=np.int32) matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3, unmatched_threshold=2) expected_matched_cols = np.array([0, 3]) expected_matched_rows = np.array([2, 0]) expected_unmatched_cols = np.array([1, 2, 4]) sim = tf.constant(similarity) match = matcher.match(sim) matched_cols = match.matched_column_indices() matched_rows = match.matched_row_indices() unmatched_cols = match.unmatched_column_indices() with self.test_session() as sess: res_matched_cols = sess.run(matched_cols) res_matched_rows = sess.run(matched_rows) res_unmatched_cols = sess.run(unmatched_cols) self.assertAllEqual(res_matched_rows, expected_matched_rows) self.assertAllEqual(res_matched_cols, expected_matched_cols) self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols) def test_return_correct_matches_unmatched_row_while_using_force_match(self): similarity = np.array([[1, 1, 1, 3, 1], [-1, 0, -2, -2, -1], [3, 0, -1, 2, 0]], dtype=np.int32) matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=3, unmatched_threshold=2, force_match_for_each_row=True) expected_matched_cols = np.array([0, 1, 3]) expected_matched_rows = np.array([2, 1, 0]) expected_unmatched_cols = np.array([2, 4]) # col 2 has too high max val sim = tf.constant(similarity) match = matcher.match(sim) matched_cols = match.matched_column_indices() matched_rows = match.matched_row_indices() unmatched_cols = match.unmatched_column_indices() with self.test_session() as sess: res_matched_cols = sess.run(matched_cols) res_matched_rows = sess.run(matched_rows) res_unmatched_cols = sess.run(unmatched_cols) self.assertAllEqual(res_matched_rows, expected_matched_rows) self.assertAllEqual(res_matched_cols, expected_matched_cols) self.assertAllEqual(res_unmatched_cols, expected_unmatched_cols) def test_valid_arguments_corner_case(self): argmax_matcher.ArgMaxMatcher(matched_threshold=1, unmatched_threshold=1) def test_invalid_arguments_corner_case_negatives_lower_than_thres_false(self): with self.assertRaises(ValueError): argmax_matcher.ArgMaxMatcher(matched_threshold=1, unmatched_threshold=1, negatives_lower_than_unmatched=False) def test_invalid_arguments_no_matched_threshold(self): with self.assertRaises(ValueError): argmax_matcher.ArgMaxMatcher(matched_threshold=None, unmatched_threshold=4) def test_invalid_arguments_unmatched_thres_larger_than_matched_thres(self): with self.assertRaises(ValueError): argmax_matcher.ArgMaxMatcher(matched_threshold=1, unmatched_threshold=2) def test_set_values_using_indicator(self): input_a = np.array([3, 4, 5, 1, 4, 3, 2]) expected_b = np.array([3, 0, 0, 1, 0, 3, 2]) # Set a>3 to 0 expected_c = np.array( [3., 4., 5., -1., 4., 3., -1.]) # Set a<3 to -1. Float32 idxb_ = input_a > 3 idxc_ = input_a < 3 matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=None) a = tf.constant(input_a) idxb = tf.constant(idxb_) idxc = tf.constant(idxc_) b = matcher._set_values_using_indicator(a, idxb, 0) c = matcher._set_values_using_indicator(tf.cast(a, tf.float32), idxc, -1) with self.test_session() as sess: res_b = sess.run(b) res_c = sess.run(c) self.assertAllEqual(res_b, expected_b) self.assertAllEqual(res_c, expected_c) if __name__ == '__main__': tf.test.main()
mit
azlanismail/prismgames
examples/games/car/networkx/algorithms/tests/test_swap.py
1
1302
#!/usr/bin/env python from nose.tools import * from networkx import * def test_double_edge_swap(): graph = barabasi_albert_graph(200,1) degrees = sorted(graph.degree().values()) G = double_edge_swap(graph, 40) assert_equal(degrees, sorted(graph.degree().values())) def test_connected_double_edge_swap(): graph = barabasi_albert_graph(200,1) degrees = sorted(graph.degree().values()) G = connected_double_edge_swap(graph, 40) assert_true(is_connected(graph)) assert_equal(degrees, sorted(graph.degree().values())) @raises(NetworkXError) def test_double_edge_swap_small(): G = nx.double_edge_swap(nx.path_graph(3)) @raises(NetworkXError) def test_double_edge_swap_tries(): G = nx.double_edge_swap(nx.path_graph(10),nswap=1,max_tries=0) @raises(NetworkXError) def test_connected_double_edge_swap_small(): G = nx.connected_double_edge_swap(nx.path_graph(3)) @raises(NetworkXError) def test_connected_double_edge_swap_not_connected(): G = nx.path_graph(3) G.add_path([10,11,12]) G = nx.connected_double_edge_swap(G) def test_degree_seq_c4(): G = cycle_graph(4) degrees = sorted(G.degree().values()) G = double_edge_swap(G,1,100) assert_equal(degrees, sorted(G.degree().values()))
gpl-2.0
timorieber/wagtail
wagtail/core/management/commands/replace_text.py
18
2068
from django.core.management.base import BaseCommand from django.db import models from modelcluster.models import get_all_child_relations from wagtail.core.models import PageRevision, get_page_models def replace_in_model(model, from_text, to_text): text_field_names = [field.name for field in model._meta.fields if ( isinstance(field, models.TextField) or isinstance(field, models.CharField) ) ] updated_fields = [] for field in text_field_names: field_value = getattr(model, field) if field_value and (from_text in field_value): updated_fields.append(field) setattr(model, field, field_value.replace(from_text, to_text)) if updated_fields: model.save(update_fields=updated_fields) class Command(BaseCommand): def add_arguments(self, parser): # Positional arguments parser.add_argument('from_text') parser.add_argument('to_text') def handle(self, *args, **options): from_text = options['from_text'] to_text = options['to_text'] for revision in PageRevision.objects.filter(content_json__contains=from_text): revision.content_json = revision.content_json.replace(from_text, to_text) revision.save(update_fields=['content_json']) for page_class in get_page_models(): self.stdout.write("scanning %s" % page_class._meta.verbose_name) child_relation_names = [rel.get_accessor_name() for rel in get_all_child_relations(page_class)] # Find all pages of this exact type; exclude subclasses, as they will # appear in the get_page_models() list in their own right, and this # ensures that replacement happens only once for page in page_class.objects.exact_type(page_class): replace_in_model(page, from_text, to_text) for child_rel in child_relation_names: for child in getattr(page, child_rel).all(): replace_in_model(child, from_text, to_text)
bsd-3-clause
mollstam/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/auth_tests/test_forms.py
12
21296
from __future__ import unicode_literals import re from django import forms from django.contrib.auth.forms import ( AuthenticationForm, PasswordChangeForm, PasswordResetForm, ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget, SetPasswordForm, UserChangeForm, UserCreationForm, ) from django.contrib.auth.models import User from django.contrib.sites.models import Site from django.core import mail from django.core.mail import EmailMultiAlternatives from django.forms.fields import CharField, Field from django.test import TestCase, override_settings from django.utils import translation from django.utils.encoding import force_text from django.utils.text import capfirst from django.utils.translation import ugettext as _ from .settings import AUTH_TEMPLATES @override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher']) class UserCreationFormTest(TestCase): fixtures = ['authtestdata.json'] def test_user_already_exists(self): data = { 'username': 'testclient', 'password1': 'test123', 'password2': 'test123', } form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form["username"].errors, [force_text(User._meta.get_field('username').error_messages['unique'])]) def test_invalid_data(self): data = { 'username': 'jsmith!', 'password1': 'test123', 'password2': 'test123', } form = UserCreationForm(data) self.assertFalse(form.is_valid()) validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid') self.assertEqual(form["username"].errors, [force_text(validator.message)]) def test_password_verification(self): # The verification password is incorrect. data = { 'username': 'jsmith', 'password1': 'test123', 'password2': 'test', } form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form["password2"].errors, [force_text(form.error_messages['password_mismatch'])]) def test_both_passwords(self): # One (or both) passwords weren't given data = {'username': 'jsmith'} form = UserCreationForm(data) required_error = [force_text(Field.default_error_messages['required'])] self.assertFalse(form.is_valid()) self.assertEqual(form['password1'].errors, required_error) self.assertEqual(form['password2'].errors, required_error) data['password2'] = 'test123' form = UserCreationForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form['password1'].errors, required_error) self.assertEqual(form['password2'].errors, []) def test_success(self): # The success case. data = { 'username': 'jsmith@example.com', 'password1': 'test123', 'password2': 'test123', } form = UserCreationForm(data) self.assertTrue(form.is_valid()) u = form.save() self.assertEqual(repr(u), '<User: jsmith@example.com>') @override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher']) class AuthenticationFormTest(TestCase): fixtures = ['authtestdata.json'] def test_invalid_username(self): # The user submits an invalid username. data = { 'username': 'jsmith_does_not_exist', 'password': 'test123', } form = AuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual(form.non_field_errors(), [force_text(form.error_messages['invalid_login'] % { 'username': User._meta.get_field('username').verbose_name })]) def test_inactive_user(self): # The user is inactive. data = { 'username': 'inactive', 'password': 'password', } form = AuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual(form.non_field_errors(), [force_text(form.error_messages['inactive'])]) def test_inactive_user_i18n(self): with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True): # The user is inactive. data = { 'username': 'inactive', 'password': 'password', } form = AuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual(form.non_field_errors(), [force_text(form.error_messages['inactive'])]) def test_custom_login_allowed_policy(self): # The user is inactive, but our custom form policy allows them to log in. data = { 'username': 'inactive', 'password': 'password', } class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm): def confirm_login_allowed(self, user): pass form = AuthenticationFormWithInactiveUsersOkay(None, data) self.assertTrue(form.is_valid()) # If we want to disallow some logins according to custom logic, # we should raise a django.forms.ValidationError in the form. class PickyAuthenticationForm(AuthenticationForm): def confirm_login_allowed(self, user): if user.username == "inactive": raise forms.ValidationError("This user is disallowed.") raise forms.ValidationError("Sorry, nobody's allowed in.") form = PickyAuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual(form.non_field_errors(), ['This user is disallowed.']) data = { 'username': 'testclient', 'password': 'password', } form = PickyAuthenticationForm(None, data) self.assertFalse(form.is_valid()) self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."]) def test_success(self): # The success case data = { 'username': 'testclient', 'password': 'password', } form = AuthenticationForm(None, data) self.assertTrue(form.is_valid()) self.assertEqual(form.non_field_errors(), []) def test_username_field_label(self): class CustomAuthenticationForm(AuthenticationForm): username = CharField(label="Name", max_length=75) form = CustomAuthenticationForm() self.assertEqual(form['username'].label, "Name") def test_username_field_label_not_set(self): class CustomAuthenticationForm(AuthenticationForm): username = CharField() form = CustomAuthenticationForm() username_field = User._meta.get_field(User.USERNAME_FIELD) self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name)) def test_username_field_label_empty_string(self): class CustomAuthenticationForm(AuthenticationForm): username = CharField(label='') form = CustomAuthenticationForm() self.assertEqual(form.fields['username'].label, "") @override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher']) class SetPasswordFormTest(TestCase): fixtures = ['authtestdata.json'] def test_password_verification(self): # The two new passwords do not match. user = User.objects.get(username='testclient') data = { 'new_password1': 'abc123', 'new_password2': 'abc', } form = SetPasswordForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual(form["new_password2"].errors, [force_text(form.error_messages['password_mismatch'])]) def test_success(self): user = User.objects.get(username='testclient') data = { 'new_password1': 'abc123', 'new_password2': 'abc123', } form = SetPasswordForm(user, data) self.assertTrue(form.is_valid()) @override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher']) class PasswordChangeFormTest(TestCase): fixtures = ['authtestdata.json'] def test_incorrect_password(self): user = User.objects.get(username='testclient') data = { 'old_password': 'test', 'new_password1': 'abc123', 'new_password2': 'abc123', } form = PasswordChangeForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual(form["old_password"].errors, [force_text(form.error_messages['password_incorrect'])]) def test_password_verification(self): # The two new passwords do not match. user = User.objects.get(username='testclient') data = { 'old_password': 'password', 'new_password1': 'abc123', 'new_password2': 'abc', } form = PasswordChangeForm(user, data) self.assertFalse(form.is_valid()) self.assertEqual(form["new_password2"].errors, [force_text(form.error_messages['password_mismatch'])]) def test_success(self): # The success case. user = User.objects.get(username='testclient') data = { 'old_password': 'password', 'new_password1': 'abc123', 'new_password2': 'abc123', } form = PasswordChangeForm(user, data) self.assertTrue(form.is_valid()) def test_field_order(self): # Regression test - check the order of fields: user = User.objects.get(username='testclient') self.assertEqual(list(PasswordChangeForm(user, {}).fields), ['old_password', 'new_password1', 'new_password2']) @override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher']) class UserChangeFormTest(TestCase): fixtures = ['authtestdata.json'] def test_username_validity(self): user = User.objects.get(username='testclient') data = {'username': 'not valid'} form = UserChangeForm(data, instance=user) self.assertFalse(form.is_valid()) validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid') self.assertEqual(form["username"].errors, [force_text(validator.message)]) def test_bug_14242(self): # A regression test, introduce by adding an optimization for the # UserChangeForm. class MyUserForm(UserChangeForm): def __init__(self, *args, **kwargs): super(MyUserForm, self).__init__(*args, **kwargs) self.fields['groups'].help_text = 'These groups give users different permissions' class Meta(UserChangeForm.Meta): fields = ('groups',) # Just check we can create it MyUserForm({}) def test_unsuable_password(self): user = User.objects.get(username='empty_password') user.set_unusable_password() user.save() form = UserChangeForm(instance=user) self.assertIn(_("No password set."), form.as_table()) def test_bug_17944_empty_password(self): user = User.objects.get(username='empty_password') form = UserChangeForm(instance=user) self.assertIn(_("No password set."), form.as_table()) def test_bug_17944_unmanageable_password(self): user = User.objects.get(username='unmanageable_password') form = UserChangeForm(instance=user) self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table()) def test_bug_17944_unknown_password_algorithm(self): user = User.objects.get(username='unknown_password') form = UserChangeForm(instance=user) self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table()) def test_bug_19133(self): "The change form does not return the password value" # Use the form to construct the POST data user = User.objects.get(username='testclient') form_for_data = UserChangeForm(instance=user) post_data = form_for_data.initial # The password field should be readonly, so anything # posted here should be ignored; the form will be # valid, and give back the 'initial' value for the # password field. post_data['password'] = 'new password' form = UserChangeForm(instance=user, data=post_data) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161') def test_bug_19349_bound_password_field(self): user = User.objects.get(username='testclient') form = UserChangeForm(data={}, instance=user) # When rendering the bound password field, # ReadOnlyPasswordHashWidget needs the initial # value to render correctly self.assertEqual(form.initial['password'], form['password'].value()) @override_settings( PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',), TEMPLATES=AUTH_TEMPLATES, USE_TZ=False, ) class PasswordResetFormTest(TestCase): fixtures = ['authtestdata.json'] @classmethod def setUpClass(cls): super(PasswordResetFormTest, cls).setUpClass() # This cleanup is necessary because contrib.sites cache # makes tests interfere with each other, see #11505 Site.objects.clear_cache() def create_dummy_user(self): """ Create a user and return a tuple (user_object, username, email). """ username = 'jsmith' email = 'jsmith@example.com' user = User.objects.create_user(username, email, 'test123') return (user, username, email) def test_invalid_email(self): data = {'email': 'not valid'} form = PasswordResetForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form['email'].errors, [_('Enter a valid email address.')]) def test_nonexistent_email(self): """ Test nonexistent email address. This should not fail because it would expose information about registered users. """ data = {'email': 'foo@bar.com'} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) self.assertEqual(len(mail.outbox), 0) def test_cleaned_data(self): (user, username, email) = self.create_dummy_user() data = {'email': email} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) form.save(domain_override='example.com') self.assertEqual(form.cleaned_data['email'], email) self.assertEqual(len(mail.outbox), 1) def test_custom_email_subject(self): data = {'email': 'testclient@example.com'} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) # Since we're not providing a request object, we must provide a # domain_override to prevent the save operation from failing in the # potential case where contrib.sites is not installed. Refs #16412. form.save(domain_override='example.com') self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com') def test_custom_email_constructor(self): data = {'email': 'testclient@example.com'} class CustomEmailPasswordResetForm(PasswordResetForm): def send_mail(self, subject_template_name, email_template_name, context, from_email, to_email, html_email_template_name=None): EmailMultiAlternatives( "Forgot your password?", "Sorry to hear you forgot your password.", None, [to_email], ['site_monitor@example.com'], headers={'Reply-To': 'webmaster@example.com'}, alternatives=[("Really sorry to hear you forgot your password.", "text/html")]).send() form = CustomEmailPasswordResetForm(data) self.assertTrue(form.is_valid()) # Since we're not providing a request object, we must provide a # domain_override to prevent the save operation from failing in the # potential case where contrib.sites is not installed. Refs #16412. form.save(domain_override='example.com') self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, 'Forgot your password?') self.assertEqual(mail.outbox[0].bcc, ['site_monitor@example.com']) self.assertEqual(mail.outbox[0].content_subtype, "plain") def test_preserve_username_case(self): """ Preserve the case of the user name (before the @ in the email address) when creating a user (#5605). """ user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test') self.assertEqual(user.email, 'tesT@example.com') user = User.objects.create_user('forms_test3', 'tesT', 'test') self.assertEqual(user.email, 'tesT') def test_inactive_user(self): """ Test that inactive user cannot receive password reset email. """ (user, username, email) = self.create_dummy_user() user.is_active = False user.save() form = PasswordResetForm({'email': email}) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 0) def test_unusable_password(self): user = User.objects.create_user('testuser', 'test@example.com', 'test') data = {"email": "test@example.com"} form = PasswordResetForm(data) self.assertTrue(form.is_valid()) user.set_unusable_password() user.save() form = PasswordResetForm(data) # The form itself is valid, but no email is sent self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 0) def test_save_plaintext_email(self): """ Test the PasswordResetForm.save() method with no html_email_template_name parameter passed in. Test to ensure original behavior is unchanged after the parameter was added. """ (user, username, email) = self.create_dummy_user() form = PasswordResetForm({"email": email}) self.assertTrue(form.is_valid()) form.save() self.assertEqual(len(mail.outbox), 1) message = mail.outbox[0].message() self.assertFalse(message.is_multipart()) self.assertEqual(message.get_content_type(), 'text/plain') self.assertEqual(message.get('subject'), 'Custom password reset on example.com') self.assertEqual(len(mail.outbox[0].alternatives), 0) self.assertEqual(message.get_all('to'), [email]) self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload())) def test_save_html_email_template_name(self): """ Test the PasswordResetFOrm.save() method with html_email_template_name parameter specified. Test to ensure that a multipart email is sent with both text/plain and text/html parts. """ (user, username, email) = self.create_dummy_user() form = PasswordResetForm({"email": email}) self.assertTrue(form.is_valid()) form.save(html_email_template_name='registration/html_password_reset_email.html') self.assertEqual(len(mail.outbox), 1) self.assertEqual(len(mail.outbox[0].alternatives), 1) message = mail.outbox[0].message() self.assertEqual(message.get('subject'), 'Custom password reset on example.com') self.assertEqual(len(message.get_payload()), 2) self.assertTrue(message.is_multipart()) self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain') self.assertEqual(message.get_payload(1).get_content_type(), 'text/html') self.assertEqual(message.get_all('to'), [email]) self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload())) self.assertTrue( re.match(r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$', message.get_payload(1).get_payload()) ) class ReadOnlyPasswordHashTest(TestCase): def test_bug_19349_render_with_none_value(self): # Rendering the widget with value set to None # mustn't raise an exception. widget = ReadOnlyPasswordHashWidget() html = widget.render(name='password', value=None, attrs={}) self.assertIn(_("No password set."), html) def test_readonly_field_has_changed(self): field = ReadOnlyPasswordHashField() self.assertFalse(field.has_changed('aaa', 'bbb'))
mit
nandhp/youtube-dl
youtube_dl/extractor/ndr.py
20
14258
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, int_or_none, parse_iso8601, qualities, ) class NDRBaseIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) display_id = next(group for group in mobj.groups() if group) webpage = self._download_webpage(url, display_id) return self._extract_embed(webpage, display_id) class NDRIE(NDRBaseIE): IE_NAME = 'ndr' IE_DESC = 'NDR.de - Norddeutscher Rundfunk' _VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)*(?P<id>[^/?#]+),[\da-z]+\.html' _TESTS = [{ # httpVideo, same content id 'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html', 'md5': '6515bc255dc5c5f8c85bbc38e035a659', 'info_dict': { 'id': 'hafengeburtstag988', 'display_id': 'Party-Poette-und-Parade', 'ext': 'mp4', 'title': 'Party, Pรถtte und Parade', 'description': 'md5:ad14f9d2f91d3040b6930c697e5f6b4c', 'uploader': 'ndrtv', 'timestamp': 1431108900, 'upload_date': '20150510', 'duration': 3498, }, 'params': { 'skip_download': True, }, }, { # httpVideo, different content id 'url': 'http://www.ndr.de/sport/fussball/40-Osnabrueck-spielt-sich-in-einen-Rausch,osna270.html', 'md5': '1043ff203eab307f0c51702ec49e9a71', 'info_dict': { 'id': 'osna272', 'display_id': '40-Osnabrueck-spielt-sich-in-einen-Rausch', 'ext': 'mp4', 'title': 'Osnabrรผck - Wehen Wiesbaden: Die Highlights', 'description': 'md5:32e9b800b3d2d4008103752682d5dc01', 'uploader': 'ndrtv', 'timestamp': 1442059200, 'upload_date': '20150912', 'duration': 510, }, 'params': { 'skip_download': True, }, }, { # httpAudio, same content id 'url': 'http://www.ndr.de/info/La-Valette-entgeht-der-Hinrichtung,audio51535.html', 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8', 'info_dict': { 'id': 'audio51535', 'display_id': 'La-Valette-entgeht-der-Hinrichtung', 'ext': 'mp3', 'title': 'La Valette entgeht der Hinrichtung', 'description': 'md5:22f9541913a40fe50091d5cdd7c9f536', 'uploader': 'ndrinfo', 'timestamp': 1290626100, 'upload_date': '20140729', 'duration': 884, }, 'params': { 'skip_download': True, }, }, { 'url': 'https://www.ndr.de/Fettes-Brot-Ferris-MC-und-Thees-Uhlmann-live-on-stage,festivalsommer116.html', 'only_matching': True, }] def _extract_embed(self, webpage, display_id): embed_url = self._html_search_meta( 'embedURL', webpage, 'embed URL', fatal=True) description = self._search_regex( r'<p[^>]+itemprop="description">([^<]+)</p>', webpage, 'description', default=None) or self._og_search_description(webpage) timestamp = parse_iso8601( self._search_regex( r'<span[^>]+itemprop="(?:datePublished|uploadDate)"[^>]+content="([^"]+)"', webpage, 'upload date', fatal=False)) return { '_type': 'url_transparent', 'url': embed_url, 'display_id': display_id, 'description': description, 'timestamp': timestamp, } class NJoyIE(NDRBaseIE): IE_NAME = 'njoy' IE_DESC = 'N-JOY' _VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)*(?:(?P<display_id>[^/?#]+),)?(?P<id>[\da-z]+)\.html' _TESTS = [{ # httpVideo, same content id 'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html', 'md5': 'cb63be60cd6f9dd75218803146d8dc67', 'info_dict': { 'id': 'comedycontest2480', 'display_id': 'Benaissa-beim-NDR-Comedy-Contest', 'ext': 'mp4', 'title': 'Benaissa beim NDR Comedy Contest', 'description': 'md5:f057a6c4e1c728b10d33b5ffd36ddc39', 'uploader': 'ndrtv', 'upload_date': '20141129', 'duration': 654, }, 'params': { 'skip_download': True, }, }, { # httpVideo, different content id 'url': 'http://www.n-joy.de/musik/Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-,felixjaehn168.html', 'md5': '417660fffa90e6df2fda19f1b40a64d8', 'info_dict': { 'id': 'dockville882', 'display_id': 'Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-', 'ext': 'mp4', 'title': '"Ich hab noch nie" mit Felix Jaehn', 'description': 'md5:85dd312d53be1b99e1f998a16452a2f3', 'uploader': 'njoy', 'upload_date': '20150822', 'duration': 211, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.n-joy.de/radio/webradio/morningshow209.html', 'only_matching': True, }] def _extract_embed(self, webpage, display_id): video_id = self._search_regex( r'<iframe[^>]+id="pp_([\da-z]+)"', webpage, 'embed id') description = self._search_regex( r'<div[^>]+class="subline"[^>]*>[^<]+</div>\s*<p>([^<]+)</p>', webpage, 'description', fatal=False) return { '_type': 'url_transparent', 'ie_key': 'NDREmbedBase', 'url': 'ndr:%s' % video_id, 'display_id': display_id, 'description': description, } class NDREmbedBaseIE(InfoExtractor): IE_NAME = 'ndr:embed:base' _VALID_URL = r'(?:ndr:(?P<id_s>[\da-z]+)|https?://www\.ndr\.de/(?P<id>[\da-z]+)-ppjson\.json)' _TESTS = [{ 'url': 'ndr:soundcheck3366', 'only_matching': True, }, { 'url': 'http://www.ndr.de/soundcheck3366-ppjson.json', 'only_matching': True, }] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') or mobj.group('id_s') ppjson = self._download_json( 'http://www.ndr.de/%s-ppjson.json' % video_id, video_id) playlist = ppjson['playlist'] formats = [] quality_key = qualities(('xs', 's', 'm', 'l', 'xl')) for format_id, f in playlist.items(): src = f.get('src') if not src: continue ext = determine_ext(src, None) if ext == 'f4m': formats.extend(self._extract_f4m_formats( src + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id, f4m_id='hds')) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( src, video_id, 'mp4', m3u8_id='hls', entry_protocol='m3u8_native')) else: quality = f.get('quality') ff = { 'url': src, 'format_id': quality or format_id, 'quality': quality_key(quality), } type_ = f.get('type') if type_ and type_.split('/')[0] == 'audio': ff['vcodec'] = 'none' ff['ext'] = ext or 'mp3' formats.append(ff) self._sort_formats(formats) config = playlist['config'] live = playlist.get('config', {}).get('streamType') in ['httpVideoLive', 'httpAudioLive'] title = config['title'] if live: title = self._live_title(title) uploader = ppjson.get('config', {}).get('branding') upload_date = ppjson.get('config', {}).get('publicationDate') duration = int_or_none(config.get('duration')) thumbnails = [{ 'id': thumbnail.get('quality') or thumbnail_id, 'url': thumbnail['src'], 'preference': quality_key(thumbnail.get('quality')), } for thumbnail_id, thumbnail in config.get('poster', {}).items() if thumbnail.get('src')] return { 'id': video_id, 'title': title, 'is_live': live, 'uploader': uploader if uploader != '-' else None, 'upload_date': upload_date[0:8] if upload_date else None, 'duration': duration, 'thumbnails': thumbnails, 'formats': formats, } class NDREmbedIE(NDREmbedBaseIE): IE_NAME = 'ndr:embed' _VALID_URL = r'https?://www\.ndr\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)\.html' _TESTS = [{ 'url': 'http://www.ndr.de/fernsehen/sendungen/ndr_aktuell/ndraktuell28488-player.html', 'md5': '8b9306142fe65bbdefb5ce24edb6b0a9', 'info_dict': { 'id': 'ndraktuell28488', 'ext': 'mp4', 'title': 'Norddeutschland begrรผรŸt Flรผchtlinge', 'is_live': False, 'uploader': 'ndrtv', 'upload_date': '20150907', 'duration': 132, }, }, { 'url': 'http://www.ndr.de/ndr2/events/soundcheck/soundcheck3366-player.html', 'md5': '002085c44bae38802d94ae5802a36e78', 'info_dict': { 'id': 'soundcheck3366', 'ext': 'mp4', 'title': 'Ella Henderson braucht Vergleiche nicht zu scheuen', 'is_live': False, 'uploader': 'ndr2', 'upload_date': '20150912', 'duration': 3554, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.ndr.de/info/audio51535-player.html', 'md5': 'bb3cd38e24fbcc866d13b50ca59307b8', 'info_dict': { 'id': 'audio51535', 'ext': 'mp3', 'title': 'La Valette entgeht der Hinrichtung', 'is_live': False, 'uploader': 'ndrinfo', 'upload_date': '20140729', 'duration': 884, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/visite/visite11010-externalPlayer.html', 'md5': 'ae57f80511c1e1f2fd0d0d3d31aeae7c', 'info_dict': { 'id': 'visite11010', 'ext': 'mp4', 'title': 'Visite - die ganze Sendung', 'is_live': False, 'uploader': 'ndrtv', 'upload_date': '20150902', 'duration': 3525, }, 'params': { 'skip_download': True, }, }, { # httpVideoLive 'url': 'http://www.ndr.de/fernsehen/livestream/livestream217-externalPlayer.html', 'info_dict': { 'id': 'livestream217', 'ext': 'flv', 'title': 're:^NDR Fernsehen Niedersachsen \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'is_live': True, 'upload_date': '20150910', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.ndr.de/ndrkultur/audio255020-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/nordtour/nordtour7124-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/kultur/film/videos/videoimport10424-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/hamburg_journal/hamj43006-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/sendungen/weltbilder/weltbilder4518-player.html', 'only_matching': True, }, { 'url': 'http://www.ndr.de/fernsehen/doku952-player.html', 'only_matching': True, }] class NJoyEmbedIE(NDREmbedBaseIE): IE_NAME = 'njoy:embed' _VALID_URL = r'https?://www\.n-joy\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)_[^/]+\.html' _TESTS = [{ # httpVideo 'url': 'http://www.n-joy.de/events/reeperbahnfestival/doku948-player_image-bc168e87-5263-4d6d-bd27-bb643005a6de_theme-n-joy.html', 'md5': '8483cbfe2320bd4d28a349d62d88bd74', 'info_dict': { 'id': 'doku948', 'ext': 'mp4', 'title': 'Zehn Jahre Reeperbahn Festival - die Doku', 'is_live': False, 'upload_date': '20150807', 'duration': 1011, }, }, { # httpAudio 'url': 'http://www.n-joy.de/news_wissen/stefanrichter100-player_image-d5e938b1-f21a-4b9a-86b8-aaba8bca3a13_theme-n-joy.html', 'md5': 'd989f80f28ac954430f7b8a48197188a', 'info_dict': { 'id': 'stefanrichter100', 'ext': 'mp3', 'title': 'Interview mit einem Augenzeugen', 'is_live': False, 'uploader': 'njoy', 'upload_date': '20150909', 'duration': 140, }, 'params': { 'skip_download': True, }, }, { # httpAudioLive, no explicit ext 'url': 'http://www.n-joy.de/news_wissen/webradioweltweit100-player_image-3fec0484-2244-4565-8fb8-ed25fd28b173_theme-n-joy.html', 'info_dict': { 'id': 'webradioweltweit100', 'ext': 'mp3', 'title': 're:^N-JOY Weltweit \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', 'is_live': True, 'uploader': 'njoy', 'upload_date': '20150810', }, 'params': { 'skip_download': True, }, }, { 'url': 'http://www.n-joy.de/musik/dockville882-player_image-3905259e-0803-4764-ac72-8b7de077d80a_theme-n-joy.html', 'only_matching': True, }, { 'url': 'http://www.n-joy.de/radio/sendungen/morningshow/urlaubsfotos190-player_image-066a5df1-5c95-49ec-a323-941d848718db_theme-n-joy.html', 'only_matching': True, }, { 'url': 'http://www.n-joy.de/entertainment/comedy/krudetv290-player_image-ab261bfe-51bf-4bf3-87ba-c5122ee35b3d_theme-n-joy.html', 'only_matching': True, }]
unlicense
propellerhead/blog
gdata/tlslite/utils/jython_compat.py
358
5270
"""Miscellaneous functions to mask Python/Jython differences.""" import os import sha if os.name != "java": BaseException = Exception from sets import Set import array import math def createByteArraySequence(seq): return array.array('B', seq) def createByteArrayZeros(howMany): return array.array('B', [0] * howMany) def concatArrays(a1, a2): return a1+a2 def bytesToString(bytes): return bytes.tostring() def stringToBytes(s): bytes = createByteArrayZeros(0) bytes.fromstring(s) return bytes def numBits(n): if n==0: return 0 return int(math.floor(math.log(n, 2))+1) class CertChainBase: pass class SelfTestBase: pass class ReportFuncBase: pass #Helper functions for working with sets (from Python 2.3) def iterSet(set): return iter(set) def getListFromSet(set): return list(set) #Factory function for getting a SHA1 object def getSHA1(s): return sha.sha(s) import sys import traceback def formatExceptionTrace(e): newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) return newStr else: #Jython 2.1 is missing lots of python 2.3 stuff, #which we have to emulate here: import java import jarray BaseException = java.lang.Exception def createByteArraySequence(seq): if isinstance(seq, type("")): #If it's a string, convert seq = [ord(c) for c in seq] return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed def createByteArrayZeros(howMany): return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed def concatArrays(a1, a2): l = list(a1)+list(a2) return createByteArraySequence(l) #WAY TOO SLOW - MUST BE REPLACED------------ def bytesToString(bytes): return "".join([chr(b) for b in bytes]) def stringToBytes(s): bytes = createByteArrayZeros(len(s)) for count, c in enumerate(s): bytes[count] = ord(c) return bytes #WAY TOO SLOW - MUST BE REPLACED------------ def numBits(n): if n==0: return 0 n= 1L * n; #convert to long, if it isn't already return n.__tojava__(java.math.BigInteger).bitLength() #This properly creates static methods for Jython class staticmethod: def __init__(self, anycallable): self.__call__ = anycallable #Properties are not supported for Jython class property: def __init__(self, anycallable): pass #True and False have to be specially defined False = 0 True = 1 class StopIteration(Exception): pass def enumerate(collection): return zip(range(len(collection)), collection) class Set: def __init__(self, seq=None): self.values = {} if seq: for e in seq: self.values[e] = None def add(self, e): self.values[e] = None def discard(self, e): if e in self.values.keys(): del(self.values[e]) def union(self, s): ret = Set() for e in self.values.keys(): ret.values[e] = None for e in s.values.keys(): ret.values[e] = None return ret def issubset(self, other): for e in self.values.keys(): if e not in other.values.keys(): return False return True def __nonzero__( self): return len(self.values.keys()) def __contains__(self, e): return e in self.values.keys() def iterSet(set): return set.values.keys() def getListFromSet(set): return set.values.keys() """ class JCE_SHA1: def __init__(self, s=None): self.md = java.security.MessageDigest.getInstance("SHA1") if s: self.update(s) def update(self, s): self.md.update(s) def copy(self): sha1 = JCE_SHA1() sha1.md = self.md.clone() return sha1 def digest(self): digest = self.md.digest() bytes = jarray.zeros(20, 'h') for count in xrange(20): x = digest[count] if x < 0: x += 256 bytes[count] = x return bytes """ #Factory function for getting a SHA1 object #The JCE_SHA1 class is way too slow... #the sha.sha object we use instead is broken in the jython 2.1 #release, and needs to be patched def getSHA1(s): #return JCE_SHA1(s) return sha.sha(s) #Adjust the string to an array of bytes def stringToJavaByteArray(s): bytes = jarray.zeros(len(s), 'b') for count, c in enumerate(s): x = ord(c) if x >= 128: x -= 256 bytes[count] = x return bytes import sys import traceback def formatExceptionTrace(e): newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) return newStr
mit
direvus/ansible
lib/ansible/modules/cloud/google/gc_storage.py
18
16892
#!/usr/bin/python # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gc_storage version_added: "1.4" short_description: This module manages objects/buckets in Google Cloud Storage. description: - This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for information about setting the default project. options: bucket: description: - Bucket name. required: true object: description: - Keyname of the object inside the bucket. Can be also be used to create "virtual directories" (see examples). src: description: - The source file path when performing a PUT operation. dest: description: - The destination file path when downloading an object/key with a GET operation. force: description: - Forces an overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. type: bool default: 'yes' aliases: [ 'overwrite' ] permission: description: - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'authenticated-read'. default: private headers: version_added: "2.0" description: - Headers to attach to object. default: {} expiration: description: - Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only available when public-read is the acl for the object. mode: description: - Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and delete (bucket). required: true choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ] gs_secret_key: description: - GS secret key. If not set then the value of the GS_SECRET_ACCESS_KEY environment variable is used. required: true gs_access_key: description: - GS access key. If not set then the value of the GS_ACCESS_KEY_ID environment variable is used. required: true region: version_added: "2.4" description: - The gs region to use. If not defined then the value 'US' will be used. See U(https://cloud.google.com/storage/docs/bucket-locations) default: 'US' versioning: version_added: "2.4" description: - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended) type: bool requirements: - "python >= 2.6" - "boto >= 2.9" author: - Benno Joy (@bennojoy) - Lukas Beumer (@nitaco) ''' EXAMPLES = ''' - name: Upload some content gc_storage: bucket: mybucket object: key.txt src: /usr/local/myfile.txt mode: put permission: public-read - name: Upload some headers gc_storage: bucket: mybucket object: key.txt src: /usr/local/myfile.txt headers: '{"Content-Encoding": "gzip"}' - name: Download some content gc_storage: bucket: mybucket object: key.txt dest: /usr/local/myfile.txt mode: get - name: Download an object as a string to use else where in your playbook gc_storage: bucket: mybucket object: key.txt mode: get_str - name: Create an empty bucket gc_storage: bucket: mybucket mode: create - name: Create a bucket with key as directory gc_storage: bucket: mybucket object: /my/directory/path mode: create - name: Delete a bucket and all contents gc_storage: bucket: mybucket mode: delete - name: Create a bucket with versioning enabled gc_storage: bucket: "mybucket" versioning: yes mode: create - name: Create a bucket located in the eu gc_storage: bucket: "mybucket" region: "europe-west3" mode: create ''' import os try: import boto HAS_BOTO = True except ImportError: HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule def grant_check(module, gs, obj): try: acp = obj.get_acl() if module.params.get('permission') == 'public-read': grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllUsers'] if not grant: obj.set_acl('public-read') module.exit_json(changed=True, result="The objects permission as been set to public-read") if module.params.get('permission') == 'authenticated-read': grant = [x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers'] if not grant: obj.set_acl('authenticated-read') module.exit_json(changed=True, result="The objects permission as been set to authenticated-read") except gs.provider.storage_response_error as e: module.fail_json(msg=str(e)) return True def key_check(module, gs, bucket, obj): try: bucket = gs.lookup(bucket) key_check = bucket.get_key(obj) except gs.provider.storage_response_error as e: module.fail_json(msg=str(e)) if key_check: grant_check(module, gs, key_check) return True else: return False def keysum(module, gs, bucket, obj): bucket = gs.lookup(bucket) key_check = bucket.get_key(obj) if not key_check: return None md5_remote = key_check.etag[1:-1] etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5 if etag_multipart is True: module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.") return md5_remote def bucket_check(module, gs, bucket): try: result = gs.lookup(bucket) except gs.provider.storage_response_error as e: module.fail_json(msg=str(e)) if result: grant_check(module, gs, result) return True else: return False def create_bucket(module, gs, bucket): try: bucket = gs.create_bucket(bucket, transform_headers(module.params.get('headers')), module.params.get('region')) bucket.set_acl(module.params.get('permission')) bucket.configure_versioning(module.params.get('versioning')) except gs.provider.storage_response_error as e: module.fail_json(msg=str(e)) if bucket: return True def delete_bucket(module, gs, bucket): try: bucket = gs.lookup(bucket) bucket_contents = bucket.list() for key in bucket_contents: bucket.delete_key(key.name) bucket.delete() return True except gs.provider.storage_response_error as e: module.fail_json(msg=str(e)) def delete_key(module, gs, bucket, obj): try: bucket = gs.lookup(bucket) bucket.delete_key(obj) module.exit_json(msg="Object deleted from bucket ", changed=True) except gs.provider.storage_response_error as e: module.fail_json(msg=str(e)) def create_dirkey(module, gs, bucket, obj): try: bucket = gs.lookup(bucket) key = bucket.new_key(obj) key.set_contents_from_string('') module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True) except gs.provider.storage_response_error as e: module.fail_json(msg=str(e)) def path_check(path): if os.path.exists(path): return True else: return False def transform_headers(headers): """ Boto url-encodes values unless we convert the value to `str`, so doing this prevents 'max-age=100000' from being converted to "max-age%3D100000". :param headers: Headers to convert :type headers: dict :rtype: dict """ for key, value in headers.items(): headers[key] = str(value) return headers def upload_gsfile(module, gs, bucket, obj, src, expiry): try: bucket = gs.lookup(bucket) key = bucket.new_key(obj) key.set_contents_from_filename( filename=src, headers=transform_headers(module.params.get('headers')) ) key.set_acl(module.params.get('permission')) url = key.generate_url(expiry) module.exit_json(msg="PUT operation complete", url=url, changed=True) except gs.provider.storage_copy_error as e: module.fail_json(msg=str(e)) def download_gsfile(module, gs, bucket, obj, dest): try: bucket = gs.lookup(bucket) key = bucket.lookup(obj) key.get_contents_to_filename(dest) module.exit_json(msg="GET operation complete", changed=True) except gs.provider.storage_copy_error as e: module.fail_json(msg=str(e)) def download_gsstr(module, gs, bucket, obj): try: bucket = gs.lookup(bucket) key = bucket.lookup(obj) contents = key.get_contents_as_string() module.exit_json(msg="GET operation complete", contents=contents, changed=True) except gs.provider.storage_copy_error as e: module.fail_json(msg=str(e)) def get_download_url(module, gs, bucket, obj, expiry): try: bucket = gs.lookup(bucket) key = bucket.lookup(obj) url = key.generate_url(expiry) module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True) except gs.provider.storage_response_error as e: module.fail_json(msg=str(e)) def handle_get(module, gs, bucket, obj, overwrite, dest): md5_remote = keysum(module, gs, bucket, obj) md5_local = module.md5(dest) if md5_local == md5_remote: module.exit_json(changed=False) if md5_local != md5_remote and not overwrite: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True) else: download_gsfile(module, gs, bucket, obj, dest) def handle_put(module, gs, bucket, obj, overwrite, src, expiration): # Lets check to see if bucket exists to get ground truth. bucket_rc = bucket_check(module, gs, bucket) key_rc = key_check(module, gs, bucket, obj) # Lets check key state. Does it exist and if it does, compute the etag md5sum. if bucket_rc and key_rc: md5_remote = keysum(module, gs, bucket, obj) md5_local = module.md5(src) if md5_local == md5_remote: module.exit_json(msg="Local and remote object are identical", changed=False) if md5_local != md5_remote and not overwrite: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True) else: upload_gsfile(module, gs, bucket, obj, src, expiration) if not bucket_rc: create_bucket(module, gs, bucket) upload_gsfile(module, gs, bucket, obj, src, expiration) # If bucket exists but key doesn't, just upload. if bucket_rc and not key_rc: upload_gsfile(module, gs, bucket, obj, src, expiration) def handle_delete(module, gs, bucket, obj): if bucket and not obj: if bucket_check(module, gs, bucket): module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=delete_bucket(module, gs, bucket)) else: module.exit_json(msg="Bucket does not exist.", changed=False) if bucket and obj: if bucket_check(module, gs, bucket): if key_check(module, gs, bucket, obj): module.exit_json(msg="Object has been deleted.", changed=delete_key(module, gs, bucket, obj)) else: module.exit_json(msg="Object does not exist.", changed=False) else: module.exit_json(msg="Bucket does not exist.", changed=False) else: module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True) def handle_create(module, gs, bucket, obj): if bucket and not obj: if bucket_check(module, gs, bucket): module.exit_json(msg="Bucket already exists.", changed=False) else: module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket)) if bucket and obj: if obj.endswith('/'): dirobj = obj else: dirobj = obj + "/" if bucket_check(module, gs, bucket): if key_check(module, gs, bucket, dirobj): module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False) else: create_dirkey(module, gs, bucket, dirobj) else: create_bucket(module, gs, bucket) create_dirkey(module, gs, bucket, dirobj) def main(): module = AnsibleModule( argument_spec=dict( bucket=dict(required=True), object=dict(default=None, type='path'), src=dict(default=None), dest=dict(default=None, type='path'), expiration=dict(type='int', default=600, aliases=['expiry']), mode=dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True), permission=dict(choices=['private', 'public-read', 'authenticated-read'], default='private'), headers=dict(type='dict', default={}), gs_secret_key=dict(no_log=True, required=True), gs_access_key=dict(required=True), overwrite=dict(default=True, type='bool', aliases=['force']), region=dict(default='US', type='str'), versioning=dict(default='no', type='bool') ), ) if not HAS_BOTO: module.fail_json(msg='`boto` 2.9+ is required for this module. Try: pip install `boto` --upgrade') bucket = module.params.get('bucket') obj = module.params.get('object') src = module.params.get('src') dest = module.params.get('dest') mode = module.params.get('mode') expiry = module.params.get('expiration') gs_secret_key = module.params.get('gs_secret_key') gs_access_key = module.params.get('gs_access_key') overwrite = module.params.get('overwrite') if mode == 'put': if not src or not object: module.fail_json(msg="When using PUT, src, bucket, object are mandatory parameters") if mode == 'get': if not dest or not object: module.fail_json(msg="When using GET, dest, bucket, object are mandatory parameters") try: gs = boto.connect_gs(gs_access_key, gs_secret_key) except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) if mode == 'get': if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj): module.fail_json(msg="Target bucket/key cannot be found", failed=True) if not path_check(dest): download_gsfile(module, gs, bucket, obj, dest) else: handle_get(module, gs, bucket, obj, overwrite, dest) if mode == 'put': if not path_check(src): module.fail_json(msg="Local object for PUT does not exist", failed=True) handle_put(module, gs, bucket, obj, overwrite, src, expiry) # Support for deleting an object if we have both params. if mode == 'delete': handle_delete(module, gs, bucket, obj) if mode == 'create': handle_create(module, gs, bucket, obj) if mode == 'get_url': if bucket and obj: if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj): get_download_url(module, gs, bucket, obj, expiry) else: module.fail_json(msg="Key/Bucket does not exist", failed=True) else: module.fail_json(msg="Bucket and Object parameters must be set", failed=True) # --------------------------- Get the String contents of an Object ------------------------- if mode == 'get_str': if bucket and obj: if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj): download_gsstr(module, gs, bucket, obj) else: module.fail_json(msg="Key/Bucket does not exist", failed=True) else: module.fail_json(msg="Bucket and Object parameters must be set", failed=True) if __name__ == '__main__': main()
gpl-3.0
rimsleur/QSLime
interpreter/PropositionTree.py
2
12197
# coding: utf8 """ ะ”ั€ะตะฒะพะฒะธะดะฝะพะต ะฟั€ะตะดัั‚ะฐะฒะปะตะฝะธะต ััƒะถะดะตะฝะธั """ from PropositionTreeNode import PropositionTreeNode from PropositionTreeNodeType import PropositionTreeNodeType from PropositionTreeNodeSide import PropositionTreeNodeSide from LanguageHelper import LanguageHelper from TriggerProvider import TriggerProvider from ContextProvider import ContextProvider from TreeNodeConcept import TreeNodeConcept from DatabaseTriad import DatabaseTriad from DatabaseSequence import DatabaseSequence from DatabaseConcept import DatabaseConcept from MemoryProvider import MemoryProvider from TreeNodeConceptType import TreeNodeConceptType from ConditionProvider import ConditionProvider from ErrorHelper import ErrorHelper class PropositionTree (): def __init__ (self): self.root_node = None self.__node_stack = [] self.__stack_index = 0 self.is_totally_parsed = False def push_node (self, node): self.__node_stack.append (node) self.__stack_index += 1 def pop_node (self): if self.__stack_index > 0: self.__stack_index -= 1 return self.__node_stack.pop () else: return None @classmethod def print_tree (cls, tree): node = tree.root_node node.child_index = 0 k = 0 print "<ะŸะตั‡ะฐั‚ัŒ ะดั€ะตะฒะพะฒะธะดะฝะพะน ัั‚ั€ัƒะบั‚ัƒั€ั‹ ััƒะถะดะตะฝะธั>" while node != None: if node.child_index == 0: if node.type == PropositionTreeNodeType.concept: if node.concept.subroot == True: print " "*k + "+" + node.text elif node.concept.sublink == True: print " "*k + "=" + node.text else: print " "*k + node.text else: print " "*k + node.text if node.child_index < len (node.children): idx = node.child_index node.child_index += 1 tree.push_node (node) node = node.children[idx] node.child_index = 0 k += 1 else: node = tree.pop_node () k -= 1 print "</ะŸะตั‡ะฐั‚ัŒ ะดั€ะตะฒะพะฒะธะดะฝะพะน ัั‚ั€ัƒะบั‚ัƒั€ั‹ ััƒะถะดะตะฝะธั>" @classmethod def get_actor_and_actant (cls, root_node): idx = 0 actor = None actant = None while idx < len (root_node.children): child = root_node.children[idx] if child.type == PropositionTreeNodeType.linkage: if child.linkage.name == LanguageHelper.translate ("who") or child.linkage.name == LanguageHelper.translate ("what"): parent = child child = child.children[0] if child.type == PropositionTreeNodeType.concept: if child.side == PropositionTreeNodeSide.left: actor = child if root_node.concept.subroot != True: ContextProvider.set_actor_node (actor) elif child.side == PropositionTreeNodeSide.right: actant = child if actor != None and actant != None: break idx += 1 if actor == None: actor = ContextProvider.get_actor_node () return actor, actant @classmethod def replace_subtree (cls, root_node, side, is_new, cursor): actor, actant = PropositionTree.get_actor_and_actant (root_node) result_node = PropositionTreeNode () result_node.type = PropositionTreeNodeType.concept result_node.side = side result_node.concept = TreeNodeConcept () is_memobject = False error_text = "" if root_node.concept.name == LanguageHelper.translate ("to-have"): if actant.concept.name == LanguageHelper.translate ("name"): if actor.concept.name == LanguageHelper.translate ("module"): child1 = actant.children[0] if child1.type == PropositionTreeNodeType.linkage: if child1.linkage.name == LanguageHelper.translate ("which"): child2 = child1.children[0] if child2.type == PropositionTreeNodeType.concept: result_node.concept.type = TreeNodeConceptType.module result_node.concept.name = child2.concept.name result_node.text = result_node.concept.name is_memobject = True elif actor.concept.name == LanguageHelper.translate ("field"): child1 = actant.children[0] if child1.type == PropositionTreeNodeType.linkage: if child1.linkage.name == LanguageHelper.translate ("which"): child2 = child1.children[0] if child2.type == PropositionTreeNodeType.code_object: if is_new == True: result_node.concept.id = MemoryProvider.create_field (child2.text) is_memobject = True result_node.concept.type = TreeNodeConceptType.field result_node.concept.name = "$" + str (result_node.concept.id) result_node.text = result_node.concept.name ContextProvider.set_field_node (result_node) else: result_node.concept.id = MemoryProvider.get_field_id (child2.text) is_memobject = True result_node.concept.type = TreeNodeConceptType.field result_node.concept.name = "$" + str (result_node.concept.id) result_node.text = result_node.concept.name elif actor.concept.name == LanguageHelper.translate ("constant"): child1 = actant.children[0] if child1.type == PropositionTreeNodeType.linkage: if child1.linkage.name == LanguageHelper.translate ("which"): child2 = child1.children[0] if child2.type == PropositionTreeNodeType.code_object: if is_new == True: result_node.concept.id = MemoryProvider.create_constant (child2.text) is_memobject = True result_node.concept.type = TreeNodeConceptType.constant result_node.concept.name = "$" + str (result_node.concept.id) result_node.text = result_node.concept.name ContextProvider.set_constant_node (result_node) else: result_node.concept.id = MemoryProvider.get_constant_id (child2.text) is_memobject = True result_node.concept.type = TreeNodeConceptType.constant result_node.concept.name = "$" + str (result_node.concept.id) result_node.text = result_node.concept.name elif actor.concept.name == LanguageHelper.translate ("list"): child1 = actant.children[0] if child1.type == PropositionTreeNodeType.linkage: if child1.linkage.name == LanguageHelper.translate ("which"): child2 = child1.children[0] if child2.type == PropositionTreeNodeType.code_object: if is_new == True: result_node.concept.id = MemoryProvider.create_list (child2.text) is_memobject = True result_node.concept.type = TreeNodeConceptType.memlist result_node.concept.name = "$" + str (result_node.concept.id) result_node.text = result_node.concept.name ContextProvider.set_list_node (result_node) else: result_node.concept.id = MemoryProvider.get_list_id (child2.text) is_memobject = True result_node.concept.type = TreeNodeConceptType.memlist result_node.concept.name = "$" + str (result_node.concept.id) result_node.text = result_node.concept.name elif actor.concept.name == LanguageHelper.translate ("trigger"): child1 = actant.children[0] if child1.type == PropositionTreeNodeType.linkage: if child1.linkage.name == LanguageHelper.translate ("which"): child2 = child1.children[0] if child2.type == PropositionTreeNodeType.code_object: if is_new == True: result_node.concept.id = TriggerProvider.create_trigger (child2.text) result_node.concept.type = TreeNodeConceptType.trigger result_node.concept.name = "$" + str (result_node.concept.id) result_node.text = result_node.concept.name ContextProvider.set_trigger_node (result_node) elif actor.concept.name == LanguageHelper.translate ("condition"): child1 = actant.children[0] if child1.type == PropositionTreeNodeType.linkage: if child1.linkage.name == LanguageHelper.translate ("which"): child2 = child1.children[0] if child2.type == PropositionTreeNodeType.code_object: if is_new == True: result_node.concept.id = ConditionProvider.create_condition (child2.text) result_node.concept.type = TreeNodeConceptType.condition result_node.concept.name = "$" + str (result_node.concept.id) result_node.text = result_node.concept.name ContextProvider.set_condition_node (result_node) elif child2.type == PropositionTreeNodeType.concept: if is_new == True: result_node.concept.id = ConditionProvider.create_condition (child2.text) result_node.concept.type = TreeNodeConceptType.condition result_node.concept.name = "$" + str (result_node.concept.id) result_node.text = result_node.concept.name ContextProvider.set_condition_node (result_node) elif actor.concept.name == LanguageHelper.translate ("procedure"): child1 = actant.children[0] if child1.type == PropositionTreeNodeType.linkage: if child1.linkage.name == LanguageHelper.translate ("which"): child2 = child1.children[0] if child2.type == PropositionTreeNodeType.concept: database_triad = DatabaseTriad.read (cursor, actant.concept.id, child1.linkage.id, child2.concept.id) if database_triad == None: error_text = ErrorHelper.get_text (105) return None, error_text database_sequense1 = DatabaseSequence.read (cursor, 0, 0, database_triad.id) if database_sequense1 == None: error_text = ErrorHelper.get_text (105) return None, error_text database_triad = DatabaseTriad.read_by_id (cursor, database_sequense1.left_triad_id) if database_triad == None: error_text = ErrorHelper.get_text (105) return None, error_text if database_triad.left_concept_id == root_node.concept.id: database_sequense2 = DatabaseSequence.read (cursor, database_sequense1.proposition_id, 0, database_triad.id) if database_sequense2 == None: error_text = ErrorHelper.get_text (105) return None, error_text database_triad = DatabaseTriad.read_by_id (cursor, database_sequense2.left_triad_id) if database_triad == None: error_text = ErrorHelper.get_text (105) return None, error_text result_node.concept.id = database_triad.left_concept_id database_concept = DatabaseConcept.read_by_name (cursor, LanguageHelper.translate ("to-be")) if database_concept == None: error_text = ErrorHelper.get_text (104) return None, error_text database_triad1 = DatabaseTriad.read (cursor, result_node.concept.id, 0, database_concept.id) if database_triad1 == None: error_text = ErrorHelper.get_text (104) return None, error_text database_triad2 = DatabaseTriad.read (cursor, database_concept.id, 0, actor.concept.id) if database_triad2 == None: error_text = ErrorHelper.get_text (104) return None, error_text database_sequense3 = DatabaseSequence.read (cursor, 0, database_triad1.id, database_triad2.id) if database_sequense3 == None: error_text = ErrorHelper.get_text (104) return None, error_text else: error_text = ErrorHelper.get_text (105) return None, error_text elif actant.concept.name == LanguageHelper.translate ("class"): if actor.concept.name == LanguageHelper.translate ("trigger"): child1 = actant.children[0] if child1.type == PropositionTreeNodeType.linkage: if child1.linkage.name == LanguageHelper.translate ("which"): child2 = child1.children[0] if child2.type == PropositionTreeNodeType.string: result_node.concept.id = TriggerProvider.get_id_by_class (child2.text) result_node.concept.type = TreeNodeConceptType.trigger result_node.concept.name = "$" + str (result_node.concept.id) result_node.text = result_node.concept.name is_memobject = True if is_memobject != True: if result_node.concept.id != 0: database_concept = DatabaseConcept.read_by_id (cursor, result_node.concept.id) result_node.concept.type = database_concept.type result_node.concept.name = database_concept.name result_node.text = result_node.concept.name else: return None, error_text return result_node, error_text
gpl-2.0
dlazz/ansible
test/units/modules/network/netscaler/test_netscaler_gslb_site.py
68
24193
# Copyright (c) 2017 Citrix Systems # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from units.compat.mock import patch, Mock, MagicMock, call from units.modules.utils import set_module_args from .netscaler_module import TestModule, nitro_base_patcher import sys if sys.version_info[:2] != (2, 6): import requests class TestNetscalerGSLBSiteModule(TestModule): @classmethod def setUpClass(cls): class MockException(Exception): pass cls.MockException = MockException m = MagicMock() nssrc_modules_mock = { 'nssrc.com.citrix.netscaler.nitro.resource.config.gslb': m, 'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite': m, 'nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite.gslbsite': m, } cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock) cls.nitro_base_patcher = nitro_base_patcher @classmethod def tearDownClass(cls): cls.nitro_base_patcher.stop() cls.nitro_specific_patcher.stop() def setUp(self): super(TestNetscalerGSLBSiteModule, self).setUp() self.nitro_base_patcher.start() self.nitro_specific_patcher.start() # Setup minimal required arguments to pass AnsibleModule argument parsing def tearDown(self): super(TestNetscalerGSLBSiteModule, self).tearDown() self.nitro_base_patcher.stop() self.nitro_specific_patcher.stop() def test_graceful_nitro_api_import_error(self): # Stop nitro api patching to cause ImportError set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) self.nitro_base_patcher.stop() self.nitro_specific_patcher.stop() from ansible.modules.network.netscaler import netscaler_gslb_site self.module = netscaler_gslb_site result = self.failed() self.assertEqual(result['msg'], 'Could not load nitro python sdk') def test_graceful_nitro_error_on_login(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_gslb_site class MockException(Exception): def __init__(self, *args, **kwargs): self.errorcode = 0 self.message = '' client_mock = Mock() client_mock.login = Mock(side_effect=MockException) m = Mock(return_value=client_mock) with patch('ansible.modules.network.netscaler.netscaler_gslb_site.get_nitro_client', m): with patch('ansible.modules.network.netscaler.netscaler_gslb_site.nitro_exception', MockException): self.module = netscaler_gslb_site result = self.failed() self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly') def test_graceful_no_connection_error(self): if sys.version_info[:2] == (2, 6): self.skipTest('requests library not available under python2.6') set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_gslb_site class MockException(Exception): pass client_mock = Mock() attrs = {'login.side_effect': requests.exceptions.ConnectionError} client_mock.configure_mock(**attrs) m = Mock(return_value=client_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', get_nitro_client=m, nitro_exception=MockException, ): self.module = netscaler_gslb_site result = self.failed() self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully') def test_graceful_login_error(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_gslb_site if sys.version_info[:2] == (2, 6): self.skipTest('requests library not available under python2.6') class MockException(Exception): pass client_mock = Mock() attrs = {'login.side_effect': requests.exceptions.SSLError} client_mock.configure_mock(**attrs) m = Mock(return_value=client_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', get_nitro_client=m, nitro_exception=MockException, ): self.module = netscaler_gslb_site result = self.failed() self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully') def test_ensure_feature_is_enabled_called(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_gslb_site gslb_site_proxy_mock = Mock() ensure_feature_is_enabled_mock = Mock() client_mock = Mock() with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', get_nitro_client=Mock(return_value=client_mock), gslb_site_exists=Mock(side_effect=[False, True]), gslb_site_identical=Mock(side_effect=[True]), nitro_exception=self.MockException, ensure_feature_is_enabled=ensure_feature_is_enabled_mock, ConfigProxy=Mock(return_value=gslb_site_proxy_mock), ): self.module = netscaler_gslb_site self.exited() ensure_feature_is_enabled_mock.assert_called_with(client_mock, 'GSLB') def test_save_config_called_on_state_present(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_gslb_site client_mock = Mock() m = Mock(return_value=client_mock) gslb_site_proxy_mock = Mock() with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', get_nitro_client=m, gslb_site_exists=Mock(side_effect=[False, True]), gslb_site_identical=Mock(side_effect=[True]), nitro_exception=self.MockException, ensure_feature_is_enabled=Mock(), ConfigProxy=Mock(return_value=gslb_site_proxy_mock), ): self.module = netscaler_gslb_site self.exited() self.assertIn(call.save_config(), client_mock.mock_calls) def test_save_config_called_on_state_absent(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='absent', )) from ansible.modules.network.netscaler import netscaler_gslb_site client_mock = Mock() m = Mock(return_value=client_mock) gslb_site_proxy_mock = Mock() with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', get_nitro_client=m, gslb_site_exists=Mock(side_effect=[True, False]), nitro_exception=self.MockException, ensure_feature_is_enabled=Mock(), ConfigProxy=Mock(return_value=gslb_site_proxy_mock), ): self.module = netscaler_gslb_site self.exited() self.assertIn(call.save_config(), client_mock.mock_calls) def test_save_config_not_called_on_state_present(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', save_config=False, )) from ansible.modules.network.netscaler import netscaler_gslb_site client_mock = Mock() m = Mock(return_value=client_mock) gslb_site_proxy_mock = Mock() with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', get_nitro_client=m, gslb_site_exists=Mock(side_effect=[False, True]), gslb_site_identical=Mock(side_effect=[True]), nitro_exception=self.MockException, ensure_feature_is_enabled=Mock(), ConfigProxy=Mock(return_value=gslb_site_proxy_mock), ): self.module = netscaler_gslb_site self.exited() self.assertNotIn(call.save_config(), client_mock.mock_calls) def test_save_config_not_called_on_state_absent(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='absent', save_config=False, )) from ansible.modules.network.netscaler import netscaler_gslb_site client_mock = Mock() m = Mock(return_value=client_mock) gslb_site_proxy_mock = Mock() with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', get_nitro_client=m, gslb_site_exists=Mock(side_effect=[True, False]), nitro_exception=self.MockException, ensure_feature_is_enabled=Mock(), ConfigProxy=Mock(return_value=gslb_site_proxy_mock), ): self.module = netscaler_gslb_site self.exited() self.assertNotIn(call.save_config(), client_mock.mock_calls) def test_new_gslb_site_execution_flow(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_gslb_site client_mock = Mock() m = Mock(return_value=client_mock) glsb_site_proxy_attrs = { 'diff_object.return_value': {}, } gslb_site_proxy_mock = Mock() gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs) config_proxy_mock = Mock(return_value=gslb_site_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', get_nitro_client=m, gslb_site_exists=Mock(side_effect=[False, True]), gslb_site_identical=Mock(side_effect=[True]), nitro_exception=self.MockException, ensure_feature_is_enabled=Mock(), ConfigProxy=config_proxy_mock, ): self.module = netscaler_gslb_site self.exited() gslb_site_proxy_mock.assert_has_calls([call.add()]) def test_modified_gslb_site_execution_flow(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_gslb_site client_mock = Mock() m = Mock(return_value=client_mock) glsb_site_proxy_attrs = { 'diff_object.return_value': {}, } gslb_site_proxy_mock = Mock() gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs) config_proxy_mock = Mock(return_value=gslb_site_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), gslb_site_exists=Mock(side_effect=[True, True]), gslb_site_identical=Mock(side_effect=[False, True]), ensure_feature_is_enabled=Mock(), nitro_exception=self.MockException, ConfigProxy=config_proxy_mock, ): self.module = netscaler_gslb_site self.exited() gslb_site_proxy_mock.assert_has_calls([call.update()]) def test_absent_gslb_site_execution_flow(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='absent', )) from ansible.modules.network.netscaler import netscaler_gslb_site client_mock = Mock() m = Mock(return_value=client_mock) glsb_site_proxy_attrs = { 'diff_object.return_value': {}, } gslb_site_proxy_mock = Mock() gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs) config_proxy_mock = Mock(return_value=gslb_site_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), gslb_site_exists=Mock(side_effect=[True, False]), gslb_site_identical=Mock(side_effect=[False, True]), ensure_feature_is_enabled=Mock(), ConfigProxy=config_proxy_mock, ): self.module = netscaler_gslb_site self.exited() gslb_site_proxy_mock.assert_has_calls([call.delete()]) def test_present_gslb_site_identical_flow(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_gslb_site client_mock = Mock() m = Mock(return_value=client_mock) glsb_site_proxy_attrs = { 'diff_object.return_value': {}, } gslb_site_proxy_mock = Mock() gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs) config_proxy_mock = Mock(return_value=gslb_site_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), gslb_site_exists=Mock(side_effect=[True, True]), gslb_site_identical=Mock(side_effect=[True, True]), nitro_exception=self.MockException, ensure_feature_is_enabled=Mock(), ConfigProxy=config_proxy_mock, ): self.module = netscaler_gslb_site self.exited() gslb_site_proxy_mock.assert_not_called() def test_absent_gslb_site_noop_flow(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='absent', )) from ansible.modules.network.netscaler import netscaler_gslb_site client_mock = Mock() m = Mock(return_value=client_mock) glsb_site_proxy_attrs = { 'diff_object.return_value': {}, } gslb_site_proxy_mock = Mock() gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs) config_proxy_mock = Mock(return_value=gslb_site_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), gslb_site_exists=Mock(side_effect=[False, False]), gslb_site_identical=Mock(side_effect=[False, False]), nitro_exception=self.MockException, ensure_feature_is_enabled=Mock(), ConfigProxy=config_proxy_mock, ): self.module = netscaler_gslb_site self.exited() gslb_site_proxy_mock.assert_not_called() def test_present_gslb_site_failed_update(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_gslb_site client_mock = Mock() m = Mock(return_value=client_mock) glsb_site_proxy_attrs = { 'diff_object.return_value': {}, } gslb_site_proxy_mock = Mock() gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs) config_proxy_mock = Mock(return_value=gslb_site_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', nitro_exception=self.MockException, get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), gslb_site_exists=Mock(side_effect=[True, True]), gslb_site_identical=Mock(side_effect=[False, False]), ensure_feature_is_enabled=Mock(), ConfigProxy=config_proxy_mock, ): self.module = netscaler_gslb_site result = self.failed() self.assertEqual(result['msg'], 'GSLB site differs from configured') self.assertTrue(result['failed']) def test_present_gslb_site_failed_create(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_gslb_site client_mock = Mock() m = Mock(return_value=client_mock) glsb_site_proxy_attrs = { 'diff_object.return_value': {}, } gslb_site_proxy_mock = Mock() gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs) config_proxy_mock = Mock(return_value=gslb_site_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', nitro_exception=self.MockException, get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), gslb_site_exists=Mock(side_effect=[False, False]), gslb_site_identical=Mock(side_effect=[False, False]), ensure_feature_is_enabled=Mock(), ConfigProxy=config_proxy_mock, ): self.module = netscaler_gslb_site result = self.failed() self.assertEqual(result['msg'], 'GSLB site does not exist') self.assertTrue(result['failed']) def test_present_gslb_site_update_immutable_attribute(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_gslb_site client_mock = Mock() m = Mock(return_value=client_mock) glsb_site_proxy_attrs = { 'diff_object.return_value': {}, } gslb_site_proxy_mock = Mock() gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs) config_proxy_mock = Mock(return_value=gslb_site_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', nitro_exception=self.MockException, get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=['domain']), gslb_site_exists=Mock(side_effect=[True, True]), gslb_site_identical=Mock(side_effect=[False, False]), ensure_feature_is_enabled=Mock(), ConfigProxy=config_proxy_mock, ): self.module = netscaler_gslb_site result = self.failed() self.assertEqual(result['msg'], 'Cannot update immutable attributes [\'domain\']') self.assertTrue(result['failed']) def test_absent_gslb_site_failed_delete(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='absent', )) from ansible.modules.network.netscaler import netscaler_gslb_site client_mock = Mock() m = Mock(return_value=client_mock) glsb_site_proxy_attrs = { 'diff_object.return_value': {}, } gslb_site_proxy_mock = Mock() gslb_site_proxy_mock.configure_mock(**glsb_site_proxy_attrs) config_proxy_mock = Mock(return_value=gslb_site_proxy_mock) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', nitro_exception=self.MockException, get_nitro_client=m, diff_list=Mock(return_value={}), get_immutables_intersection=Mock(return_value=[]), gslb_site_exists=Mock(side_effect=[True, True]), gslb_site_identical=Mock(side_effect=[False, False]), ensure_feature_is_enabled=Mock(), ConfigProxy=config_proxy_mock, ): self.module = netscaler_gslb_site result = self.failed() self.assertEqual(result['msg'], 'GSLB site still exists') self.assertTrue(result['failed']) def test_graceful_nitro_exception_state_present(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='present', )) from ansible.modules.network.netscaler import netscaler_gslb_site class MockException(Exception): def __init__(self, *args, **kwargs): self.errorcode = 0 self.message = '' m = Mock(side_effect=MockException) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', gslb_site_exists=m, ensure_feature_is_enabled=Mock(), nitro_exception=MockException ): self.module = netscaler_gslb_site result = self.failed() self.assertTrue( result['msg'].startswith('nitro exception'), msg='Nitro exception not caught on operation absent' ) def test_graceful_nitro_exception_state_absent(self): set_module_args(dict( nitro_user='user', nitro_pass='pass', nsip='192.0.2.1', state='absent', )) from ansible.modules.network.netscaler import netscaler_gslb_site class MockException(Exception): def __init__(self, *args, **kwargs): self.errorcode = 0 self.message = '' m = Mock(side_effect=MockException) with patch.multiple( 'ansible.modules.network.netscaler.netscaler_gslb_site', gslb_site_exists=m, ensure_feature_is_enabled=Mock(), nitro_exception=MockException ): self.module = netscaler_gslb_site result = self.failed() self.assertTrue( result['msg'].startswith('nitro exception'), msg='Nitro exception not caught on operation absent' )
gpl-3.0
g-k/servo
tests/wpt/web-platform-tests/tools/html5lib/html5lib/tests/test_parser.py
451
3612
from __future__ import absolute_import, division, unicode_literals import os import sys import traceback import warnings import re warnings.simplefilter("error") from .support import get_data_files from .support import TestData, convert, convertExpected, treeTypes from html5lib import html5parser, constants # Run the parse error checks checkParseErrors = False # XXX - There should just be one function here but for some reason the testcase # format differs from the treedump format by a single space character def convertTreeDump(data): return "\n".join(convert(3)(data).split("\n")[1:]) namespaceExpected = re.compile(r"^(\s*)<(\S+)>", re.M).sub def runParserTest(innerHTML, input, expected, errors, treeClass, namespaceHTMLElements): with warnings.catch_warnings(record=True) as caughtWarnings: warnings.simplefilter("always") p = html5parser.HTMLParser(tree=treeClass, namespaceHTMLElements=namespaceHTMLElements) try: if innerHTML: document = p.parseFragment(input, innerHTML) else: document = p.parse(input) except: errorMsg = "\n".join(["\n\nInput:", input, "\nExpected:", expected, "\nTraceback:", traceback.format_exc()]) assert False, errorMsg otherWarnings = [x for x in caughtWarnings if not issubclass(x.category, constants.DataLossWarning)] assert len(otherWarnings) == 0, [(x.category, x.message) for x in otherWarnings] if len(caughtWarnings): return output = convertTreeDump(p.tree.testSerializer(document)) expected = convertExpected(expected) if namespaceHTMLElements: expected = namespaceExpected(r"\1<html \2>", expected) errorMsg = "\n".join(["\n\nInput:", input, "\nExpected:", expected, "\nReceived:", output]) assert expected == output, errorMsg errStr = [] for (line, col), errorcode, datavars in p.errors: assert isinstance(datavars, dict), "%s, %s" % (errorcode, repr(datavars)) errStr.append("Line: %i Col: %i %s" % (line, col, constants.E[errorcode] % datavars)) errorMsg2 = "\n".join(["\n\nInput:", input, "\nExpected errors (" + str(len(errors)) + "):\n" + "\n".join(errors), "\nActual errors (" + str(len(p.errors)) + "):\n" + "\n".join(errStr)]) if checkParseErrors: assert len(p.errors) == len(errors), errorMsg2 def test_parser(): sys.stderr.write('Testing tree builders ' + " ".join(list(treeTypes.keys())) + "\n") files = get_data_files('tree-construction') for filename in files: testName = os.path.basename(filename).replace(".dat", "") if testName in ("template",): continue tests = TestData(filename, "data") for index, test in enumerate(tests): input, errors, innerHTML, expected = [test[key] for key in ('data', 'errors', 'document-fragment', 'document')] if errors: errors = errors.split("\n") for treeName, treeCls in treeTypes.items(): for namespaceHTMLElements in (True, False): yield (runParserTest, innerHTML, input, expected, errors, treeCls, namespaceHTMLElements)
mpl-2.0
ziaa/pelican-plugins
tag_cloud/test_tag_cloud.py
37
3222
import unittest, os, sys import six import tag_cloud from pelican.generators import ArticlesGenerator from pelican.tests.support import get_settings from pelican.urlwrappers import Tag CUR_DIR = os.path.dirname(__file__) CONTENT_DIR = os.path.join(CUR_DIR, 'test_data') class TestTagCloudGeneration(unittest.TestCase): @classmethod def setUpClass(cls): cls._settings = get_settings(filenames={}) cls._settings['DEFAULT_CATEGORY'] = 'Default' cls._settings['DEFAULT_DATE'] = (1970, 1, 1) cls._settings['READERS'] = {'asc': None} cls._settings['CACHE_CONTENT'] = False tag_cloud.set_default_settings(cls._settings) cls.generator = ArticlesGenerator( context=cls._settings.copy(), settings=cls._settings, path=CONTENT_DIR, theme=cls._settings['THEME'], output_path=None) cls.generator.generate_context() def test_tag_cloud_random(self): tag_cloud.generate_tag_cloud(self.generator) expected = [ (Tag('plugins', self._settings), 1), (Tag('fun', self._settings), 4), (Tag('python', self._settings), 4), (Tag('pelican', self._settings), 1) ] six.assertCountEqual(self, self.generator.tag_cloud, expected) def test_tag_cloud_alphabetical(self): self.generator.settings['TAG_CLOUD_SORTING'] = 'alphabetically' tag_cloud.generate_tag_cloud(self.generator) expected = [ (Tag('fun', self._settings), 4), (Tag('pelican', self._settings), 1), (Tag('plugins', self._settings), 1), (Tag('python', self._settings), 4) ] self.assertEqual(self.generator.tag_cloud, expected) def test_tag_cloud_alphabetical_rev(self): self.generator.settings['TAG_CLOUD_SORTING'] = 'alphabetically-rev' tag_cloud.generate_tag_cloud(self.generator) expected = [ (Tag('python', self._settings), 4), (Tag('plugins', self._settings), 1), (Tag('pelican', self._settings), 1), (Tag('fun', self._settings), 4) ] self.assertEqual(self.generator.tag_cloud, expected) def test_tag_cloud_size(self): self.generator.settings['TAG_CLOUD_SORTING'] = 'size' tag_cloud.generate_tag_cloud(self.generator) expected = [ (Tag('pelican', self._settings), 1), (Tag('plugins', self._settings), 1), (Tag('fun', self._settings), 4), (Tag('python', self._settings), 4) ] self.assertEqual(self.generator.tag_cloud, expected) def test_tag_cloud_size_rev(self): self.generator.settings['TAG_CLOUD_SORTING'] = 'size-rev' tag_cloud.generate_tag_cloud(self.generator) expected = [ (Tag('fun', self._settings), 4), (Tag('python', self._settings), 4), (Tag('pelican', self._settings), 1), (Tag('plugins', self._settings), 1) ] self.assertEqual(self.generator.tag_cloud, expected) if __name__ == "__main__": unittest.main()
agpl-3.0
jwu/exlibs-cpp
ext/scintilla/test/XiteWin.py
1
16871
# -*- coding: utf-8 -*- from __future__ import with_statement import os, sys, unittest import ctypes from ctypes import wintypes from ctypes import c_int, c_ulong, c_char_p, c_wchar_p, c_ushort user32=ctypes.windll.user32 gdi32=ctypes.windll.gdi32 kernel32=ctypes.windll.kernel32 from MessageNumbers import msgs, sgsm import XiteMenu scintillaDirectory = ".." scintillaIncludeDirectory = os.path.join(scintillaDirectory, "include") sys.path.append(scintillaIncludeDirectory) import Face scintillaBinDirectory = os.path.join(scintillaDirectory, "bin") os.environ['PATH'] = os.environ['PATH'] + ";" + scintillaBinDirectory #print(os.environ['PATH']) WFUNC = ctypes.WINFUNCTYPE(c_int, c_int, c_int, c_int, c_int) WS_CHILD = 0x40000000 WS_CLIPCHILDREN = 0x2000000 WS_OVERLAPPEDWINDOW = 0xcf0000 WS_VISIBLE = 0x10000000 WS_HSCROLL = 0x100000 WS_VSCROLL = 0x200000 WA_INACTIVE = 0 MF_POPUP = 16 MF_SEPARATOR = 0x800 IDYES = 6 OFN_HIDEREADONLY = 4 MB_OK = 0 MB_YESNOCANCEL = 3 MF_CHECKED = 8 MF_UNCHECKED = 0 SW_SHOW = 5 PM_REMOVE = 1 VK_SHIFT = 16 VK_CONTROL = 17 VK_MENU = 18 class OPENFILENAME(ctypes.Structure): _fields_ = (("lStructSize", c_int), ("hwndOwner", c_int), ("hInstance", c_int), ("lpstrFilter", c_wchar_p), ("lpstrCustomFilter", c_char_p), ("nMaxCustFilter", c_int), ("nFilterIndex", c_int), ("lpstrFile", c_wchar_p), ("nMaxFile", c_int), ("lpstrFileTitle", c_wchar_p), ("nMaxFileTitle", c_int), ("lpstrInitialDir", c_wchar_p), ("lpstrTitle", c_wchar_p), ("flags", c_int), ("nFileOffset", c_ushort), ("nFileExtension", c_ushort), ("lpstrDefExt", c_char_p), ("lCustData", c_int), ("lpfnHook", c_char_p), ("lpTemplateName", c_char_p), ("pvReserved", c_char_p), ("dwReserved", c_int), ("flagsEx", c_int)) def __init__(self, win, title): ctypes.Structure.__init__(self) self.lStructSize = ctypes.sizeof(OPENFILENAME) self.nMaxFile = 1024 self.hwndOwner = win self.lpstrTitle = title self.Flags = OFN_HIDEREADONLY trace = False #~ trace = True def WindowSize(w): rc = ctypes.wintypes.RECT() user32.GetClientRect(w, ctypes.byref(rc)) return rc.right - rc.left, rc.bottom - rc.top def IsKeyDown(key): return (user32.GetKeyState(key) & 0x8000) != 0 def KeyTranslate(w): tr = { 9: "Tab", 0xD:"Enter", 0x1B: "Esc" } if w in tr: return tr[w] elif ord("A") <= w <= ord("Z"): return chr(w) elif 0x70 <= w <= 0x7b: return "F" + str(w-0x70+1) else: return "Unknown_" + hex(w) class WNDCLASS(ctypes.Structure): _fields_= (\ ('style', c_int), ('lpfnWndProc', WFUNC), ('cls_extra', c_int), ('wnd_extra', c_int), ('hInst', c_int), ('hIcon', c_int), ('hCursor', c_int), ('hbrBackground', c_int), ('menu_name', c_wchar_p), ('lpzClassName', c_wchar_p), ) class XTEXTRANGE(ctypes.Structure): _fields_= (\ ('cpMin', c_int), ('cpMax', c_int), ('lpstrText', c_char_p), ) class TEXTRANGE(ctypes.Structure): _fields_= (\ ('cpMin', c_int), ('cpMax', c_int), ('lpstrText', ctypes.POINTER(ctypes.c_char)), ) class FINDTEXT(ctypes.Structure): _fields_= (\ ('cpMin', c_int), ('cpMax', c_int), ('lpstrText', c_char_p), ('cpMinText', c_int), ('cpMaxText', c_int), ) hinst = ctypes.windll.kernel32.GetModuleHandleW(0) def RegisterClass(name, func, background = 0): # register a window class for toplevel windows. wc = WNDCLASS() wc.style = 0 wc.lpfnWndProc = func wc.cls_extra = 0 wc.wnd_extra = 0 wc.hInst = hinst wc.hIcon = 0 wc.hCursor = 0 wc.hbrBackground = background wc.menu_name = 0 wc.lpzClassName = name user32.RegisterClassW(ctypes.byref(wc)) class SciCall: def __init__(self, fn, ptr, msg): self._fn = fn self._ptr = ptr self._msg = msg def __call__(self, w=0, l=0): return self._fn(self._ptr, self._msg, w, l) class Scintilla: def __init__(self, face, hwndParent, hinstance): self.__dict__["face"] = face self.__dict__["used"] = set() self.__dict__["all"] = set() # The k member is for accessing constants as a dictionary self.__dict__["k"] = {} for f in face.features: self.all.add(f) if face.features[f]["FeatureType"] == "val": self.k[f] = int(self.face.features[f]["Value"], 0) elif face.features[f]["FeatureType"] == "evt": self.k["SCN_"+f] = int(self.face.features[f]["Value"], 0) # Get the function first as that also loads the DLL self.__dict__["_scifn"] = ctypes.windll.SciLexer.Scintilla_DirectFunction self.__dict__["_hwnd"] = user32.CreateWindowExW(0, "Scintilla", "Source", WS_CHILD | WS_VSCROLL | WS_HSCROLL | WS_CLIPCHILDREN, 0, 0, 100, 100, hwndParent, 0, hinstance, 0) self.__dict__["_sciptr"] = user32.SendMessageW(self._hwnd, int(self.face.features["GetDirectPointer"]["Value"], 0), 0,0) user32.ShowWindow(self._hwnd, SW_SHOW) def __getattr__(self, name): if name in self.face.features: self.used.add(name) feature = self.face.features[name] value = int(feature["Value"], 0) #~ print("Feature", name, feature) if feature["FeatureType"] == "val": self.__dict__[name] = value return value else: return SciCall(self._scifn, self._sciptr, value) elif ("Get" + name) in self.face.features: self.used.add("Get" + name) feature = self.face.features["Get" + name] value = int(feature["Value"], 0) if feature["FeatureType"] == "get" and \ not name.startswith("Get") and \ not feature["Param1Type"] and \ not feature["Param2Type"] and \ feature["ReturnType"] in ["bool", "int", "position"]: #~ print("property", feature) return self._scifn(self._sciptr, value, 0, 0) elif name.startswith("SCN_") and name in self.k: self.used.add(name) feature = self.face.features[name[4:]] value = int(feature["Value"], 0) #~ print("Feature", name, feature) if feature["FeatureType"] == "val": return value raise AttributeError(name) def __setattr__(self, name, val): if ("Set" + name) in self.face.features: self.used.add("Set" + name) feature = self.face.features["Set" + name] value = int(feature["Value"], 0) #~ print("setproperty", feature) if feature["FeatureType"] == "set" and not name.startswith("Set"): if feature["Param1Type"] in ["bool", "int", "position"]: return self._scifn(self._sciptr, value, val, 0) elif feature["Param2Type"] in ["string"]: return self._scifn(self._sciptr, value, 0, val) raise AttributeError(name) raise AttributeError(name) def getvalue(self, name): if name in self.face.features: feature = self.face.features[name] if feature["FeatureType"] != "evt": try: return int(feature["Value"], 0) except ValueError: return -1 return -1 def ByteRange(self, start, end): tr = TEXTRANGE() tr.cpMin = start tr.cpMax = end length = end - start tr.lpstrText = ctypes.create_string_buffer(length + 1) self.GetTextRange(0, ctypes.byref(tr)) text = tr.lpstrText[:length] text += b"\0" * (length - len(text)) return text def StyledTextRange(self, start, end): tr = TEXTRANGE() tr.cpMin = start tr.cpMax = end length = 2 * (end - start) tr.lpstrText = ctypes.create_string_buffer(length + 2) self.GetStyledText(0, ctypes.byref(tr)) styledText = tr.lpstrText[:length] styledText += b"\0" * (length - len(styledText)) return styledText def FindBytes(self, start, end, s, flags): ft = FINDTEXT() ft.cpMin = start ft.cpMax = end ft.lpstrText = s ft.cpMinText = 0 ft.cpMaxText = 0 pos = self.FindText(flags, ctypes.byref(ft)) #~ print(start, end, ft.cpMinText, ft.cpMaxText) return pos def Contents(self): return self.ByteRange(0, self.Length) def SizeTo(self, width, height): user32.SetWindowPos(self._hwnd, 0, 0, 0, width, height, 0) def FocusOn(self): user32.SetFocus(self._hwnd) class XiteWin(): def __init__(self, test=""): self.face = Face.Face() self.face.ReadFromFile(os.path.join(scintillaIncludeDirectory, "Scintilla.iface")) self.titleDirty = True self.fullPath = "" self.test = test self.appName = "xite" self.cmds = {} self.windowName = "XiteWindow" self.wfunc = WFUNC(self.WndProc) RegisterClass(self.windowName, self.wfunc) user32.CreateWindowExW(0, self.windowName, self.appName, \ WS_VISIBLE | WS_OVERLAPPEDWINDOW | WS_CLIPCHILDREN, \ 0, 0, 500, 700, 0, 0, hinst, 0) args = sys.argv[1:] self.SetMenus() if args: self.GrabFile(args[0]) self.ed.FocusOn() self.ed.GotoPos(self.ed.Length) print(self.test) if self.test: for k in self.cmds: if self.cmds[k] == "Test": user32.PostMessageW(self.win, msgs["WM_COMMAND"], k, 0) def OnSize(self): width, height = WindowSize(self.win) self.ed.SizeTo(width, height) user32.InvalidateRect(self.win, 0, 0) def OnCreate(self, hwnd): self.win = hwnd self.ed = Scintilla(self.face, hwnd, hinst) self.ed.FocusOn() def Invalidate(self): user32.InvalidateRect(self.win, 0, 0) def WndProc(self, h, m, w, l): ms = sgsm.get(m, "XXX") if trace: print("%s %s %s %s" % (hex(h)[2:],ms,w,l)) if ms == "WM_CLOSE": user32.PostQuitMessage(0) elif ms == "WM_CREATE": self.OnCreate(h) return 0 elif ms == "WM_SIZE": # Work out size if w != 1: self.OnSize() return 0 elif ms == "WM_COMMAND": cmdCode = w & 0xffff if cmdCode in self.cmds: self.Command(self.cmds[cmdCode]) return 0 elif ms == "WM_ACTIVATE": if w != WA_INACTIVE: self.ed.FocusOn() return 0 else: return user32.DefWindowProcW(h, m, w, l) return 0 def Command(self, name): name = name.replace(" ", "") method = "Cmd" + name cmd = None try: cmd = getattr(self, method) except AttributeError: return if cmd: cmd() def KeyDown(self, w, prefix = ""): keyName = prefix if IsKeyDown(VK_CONTROL): keyName += "<control>" if IsKeyDown(VK_SHIFT): keyName += "<shift>" keyName += KeyTranslate(w) if trace: print("Key:", keyName) if keyName in self.keys: method = "Cmd" + self.keys[keyName] getattr(self, method)() return True #~ print("UKey:", keyName) return False def Accelerator(self, msg): ms = sgsm.get(msg.message, "XXX") if ms == "WM_KEYDOWN": return self.KeyDown(msg.wParam) elif ms == "WM_SYSKEYDOWN": return self.KeyDown(msg.wParam, "<alt>") return False def AppLoop(self): msg = ctypes.wintypes.MSG() lpmsg = ctypes.byref(msg) while user32.GetMessageW(lpmsg, 0, 0, 0): if trace and msg.message != msgs["WM_TIMER"]: print('mm', hex(msg.hWnd)[2:],sgsm.get(msg.message, "XXX")) if not self.Accelerator(msg): user32.TranslateMessage(lpmsg) user32.DispatchMessageW(lpmsg) def DoEvents(self): msg = ctypes.wintypes.MSG() lpmsg = ctypes.byref(msg) cont = True while cont: cont = user32.PeekMessageW(lpmsg, 0, 0, 0, PM_REMOVE) if cont: if not self.Accelerator(msg): user32.TranslateMessage(lpmsg) user32.DispatchMessageW(lpmsg) def SetTitle(self, changePath): if changePath or self.titleDirty != self.ed.Modify: self.titleDirty = self.ed.Modify self.title = self.fullPath if self.titleDirty: self.title += " * " else: self.title += " - " self.title += self.appName if self.win: user32.SetWindowTextW(self.win, self.title) def Open(self): ofx = OPENFILENAME(self.win, "Open File") opath = "\0" * 1024 ofx.lpstrFile = opath filters = ["Python (.py;.pyw)|*.py;*.pyw|All|*.*"] filterText = "\0".join([f.replace("|", "\0") for f in filters])+"\0\0" ofx.lpstrFilter = filterText if ctypes.windll.comdlg32.GetOpenFileNameW(ctypes.byref(ofx)): absPath = opath.replace("\0", "") self.GrabFile(absPath) self.ed.FocusOn() self.ed.LexerLanguage = "python" self.ed.Lexer = self.ed.SCLEX_PYTHON self.ed.SetKeyWords(0, "class def else for if import print return while") for style in [k for k in self.ed.k if k.startswith("SCE_P_")]: self.ed.StyleSetFont(self.ed.k[style], "Verdana") if "COMMENT" in style: self.ed.StyleSetFore(self.ed.k[style], 127 * 256) self.ed.StyleSetFont(self.ed.k[style], "Comic Sans MS") elif "OPERATOR" in style: print(style, self.ed.k[style]) self.ed.StyleSetBold(self.ed.k[style], 1) self.ed.StyleSetFore(self.ed.k[style], 127 * 256 * 256) elif "WORD" in style: print(style, self.ed.k[style]) self.ed.StyleSetItalic(self.ed.k[style], 255) self.ed.StyleSetFore(self.ed.k[style], 255 * 256 * 256) else: self.ed.StyleSetFore(self.ed.k[style], 0) def SaveAs(self): ofx = OPENFILENAME(self.win, "Save File") opath = "\0" * 1024 ofx.lpstrFile = opath if ctypes.windll.comdlg32.GetSaveFileNameW(ctypes.byref(ofx)): self.fullPath = opath.replace("\0", "") self.Save() self.SetTitle(1) self.ed.FocusOn() def SetMenus(self): ui = XiteMenu.MenuStructure self.cmds = {} self.keys = {} cmdId = 0 self.menuBar = user32.CreateMenu() for name, contents in ui: cmdId += 1 menu = user32.CreateMenu() for item in contents: text, key = item cmdText = text.replace("&", "") cmdText = cmdText.replace("...", "") cmdText = cmdText.replace(" ", "") cmdId += 1 if key: keyText = key.replace("<control>", "Ctrl+") keyText = keyText.replace("<shift>", "Shift+") text += "\t" + keyText if text == "-": user32.AppendMenuW(menu, MF_SEPARATOR, cmdId, text) else: user32.AppendMenuW(menu, 0, cmdId, text) self.cmds[cmdId] = cmdText self.keys[key] = cmdText #~ print(cmdId, item) user32.AppendMenuW(self.menuBar, MF_POPUP, menu, name) user32.SetMenu(self.win, self.menuBar) self.CheckMenuItem("Wrap", True) user32.ShowWindow(self.win, SW_SHOW) def CheckMenuItem(self, name, val): #~ print(name, val) if self.cmds: for k,v in self.cmds.items(): if v == name: #~ print(name, k) user32.CheckMenuItem(user32.GetMenu(self.win), \ k, [MF_UNCHECKED, MF_CHECKED][val]) def Exit(self): sys.exit(0) def DisplayMessage(self, msg, ask): return IDYES == user32.MessageBoxW(self.win, \ msg, self.appName, [MB_OK, MB_YESNOCANCEL][ask]) def NewDocument(self): self.ed.ClearAll() self.ed.EmptyUndoBuffer() self.ed.SetSavePoint() def SaveIfUnsure(self): if self.ed.Modify: msg = "Save changes to \"" + self.fullPath + "\"?" print(msg) decision = self.DisplayMessage(msg, True) if decision: self.CmdSave() return decision return True def New(self): if self.SaveIfUnsure(): self.fullPath = "" self.overrideMode = None self.NewDocument() self.SetTitle(1) self.Invalidate() def CheckMenus(self): pass def MoveSelection(self, caret, anchor=-1): if anchor == -1: anchor = caret self.ed.SetSelectionStart(caret) self.ed.SetSelectionEnd(anchor) self.ed.ScrollCaret() self.Invalidate() def GrabFile(self, name): self.fullPath = name self.overrideMode = None self.NewDocument() fsr = open(name, "rb") data = fsr.read() fsr.close() self.ed.AddText(len(data), data) self.ed.EmptyUndoBuffer() self.MoveSelection(0) self.SetTitle(1) def Save(self): fos = open(self.fullPath, "wb") blockSize = 1024 length = self.ed.Length i = 0 while i < length: grabSize = length - i if grabSize > blockSize: grabSize = blockSize #~ print(i, grabSize, length) data = self.ed.ByteRange(i, i + grabSize) fos.write(data) i += grabSize fos.close() self.ed.SetSavePoint() self.SetTitle(0) # Command handlers are called by menu actions def CmdNew(self): self.New() def CmdOpen(self): self.Open() def CmdSave(self): if (self.fullPath == None) or (len(self.fullPath) == 0): self.SaveAs() else: self.Save() def CmdSaveAs(self): self.SaveAs() def CmdTest(self): runner = unittest.TextTestRunner() if self.test: tests = unittest.defaultTestLoader.loadTestsFromName(self.test) else: tests = unittest.defaultTestLoader.loadTestsFromName("simpleTests") results = runner.run(tests) #~ print(results) if self.test: user32.PostQuitMessage(0) def CmdExercised(self): print() unused = sorted(self.ed.all.difference(self.ed.used)) print("Unused", len(unused)) print() print("\n".join(unused)) print() print("Used", len(self.ed.used)) print() print("\n".join(sorted(self.ed.used))) def Uncalled(self): print() unused = sorted(self.ed.all.difference(self.ed.used)) uu = {} for u in unused: v = self.ed.getvalue(u) if v > 2000: uu[v] = u #~ for x in sorted(uu.keys())[150:]: return uu def CmdExit(self): self.Exit() def CmdUndo(self): self.ed.Undo() def CmdRedo(self): self.ed.Redo() def CmdCut(self): self.ed.Cut() def CmdCopy(self): self.ed.Copy() def CmdPaste(self): self.ed.Paste() def CmdDelete(self): self.ed.Clear() xiteFrame = None def main(test): global xiteFrame xiteFrame = XiteWin(test) xiteFrame.AppLoop() #~ xiteFrame.CmdExercised() return xiteFrame.Uncalled()
lgpl-3.0
microsoft/EconML
econml/tests/test_shap.py
1
10941
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import numpy as np import unittest import shap from shap.plots import scatter, heatmap, bar, beeswarm, waterfall, force from econml.dml import * from econml.orf import * from econml.dr import * from econml.metalearners import * from sklearn.linear_model import LinearRegression, LogisticRegression, Lasso from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier from sklearn.preprocessing import PolynomialFeatures class TestShap(unittest.TestCase): def test_continuous_t(self): n = 100 d_x = 3 d_w = 2 X = np.random.normal(size=(n, d_x)) W = np.random.normal(size=(n, d_w)) for d_t in [2, 1]: for d_y in [3, 1]: Y = np.random.normal(size=(n, d_y)) Y = Y.flatten() if d_y == 1 else Y T = np.random.normal(size=(n, d_t)) T = T.flatten() if d_t == 1 else T for featurizer in [None, PolynomialFeatures(degree=2, include_bias=False)]: est_list = [LinearDML(model_y=LinearRegression(), model_t=LinearRegression(), featurizer=featurizer), CausalForestDML(model_y=LinearRegression(), model_t=LinearRegression())] if d_t == 1: est_list += [ NonParamDML(model_y=LinearRegression( ), model_t=LinearRegression(), model_final=RandomForestRegressor(), featurizer=featurizer), ] for est in est_list: with self.subTest(est=est, featurizer=featurizer, d_y=d_y, d_t=d_t): est.fit(Y, T, X, W) shap_values = est.shap_values(X[:10], feature_names=["a", "b", "c"], background_samples=None) for i, output in enumerate(est.cate_output_names()): for j, treat in enumerate(est.cate_treatment_names()): # test base values equals to mean of constant marginal effect if not isinstance(est, (CausalForestDML, DMLOrthoForest)): mean_cate = est.const_marginal_effect(X[:10]).mean(axis=0) mean_cate = np.array(mean_cate).reshape((d_y, d_t))[i, j] self.assertAlmostEqual( shap_values[output][treat].base_values[0], mean_cate, delta=1e-2) # test shape of shap values output is as expected self.assertEqual(len(shap_values[output]), d_t) self.assertEqual(len(shap_values), d_y) # test shape of attribute of explanation object is as expected self.assertEqual(shap_values[output][treat].values.shape[0], 10) self.assertEqual(shap_values[output][treat].data.shape[0], 10) self.assertEqual(shap_values[output][treat].base_values.shape, (10,)) # test length of feature names equals to shap values shape self.assertEqual( len(shap_values[output][treat].feature_names), shap_values[output][treat].values.shape[1]) def test_discrete_t(self): n = 100 d_x = 3 d_w = 2 X = np.random.normal(size=(n, d_x)) W = np.random.normal(size=(n, d_w)) for d_t in [3, 2]: for d_y in [3, 1]: Y = np.random.normal(size=(n, d_y)) Y = Y.flatten() if d_y == 1 else Y T = np.random.choice(range(d_t), size=(n,)) for featurizer in [None, PolynomialFeatures(degree=2, include_bias=False)]: est_list = [LinearDML(featurizer=featurizer, discrete_treatment=True), TLearner(models=RandomForestRegressor()), SLearner(overall_model=RandomForestRegressor()), XLearner(models=RandomForestRegressor()), DomainAdaptationLearner(models=RandomForestRegressor(), final_models=RandomForestRegressor()), CausalForestDML(model_y=LinearRegression(), model_t=LogisticRegression(), discrete_treatment=True) ] if d_t == 2: est_list += [ NonParamDML(model_y=LinearRegression( ), model_t=LogisticRegression(), model_final=RandomForestRegressor(), featurizer=featurizer, discrete_treatment=True)] if d_y == 1: est_list += [DRLearner(multitask_model_final=True, featurizer=featurizer), DRLearner(multitask_model_final=False, featurizer=featurizer), ForestDRLearner()] for est in est_list: with self.subTest(est=est, featurizer=featurizer, d_y=d_y, d_t=d_t): if isinstance(est, (TLearner, SLearner, XLearner, DomainAdaptationLearner)): est.fit(Y, T, X) else: est.fit(Y, T, X, W) shap_values = est.shap_values(X[:10], feature_names=["a", "b", "c"], background_samples=None) for i, output in enumerate(est.cate_output_names()): for j, treat in enumerate(est.cate_treatment_names()): # test base values equals to mean of constant marginal effect if not isinstance(est, (CausalForestDML, ForestDRLearner, DROrthoForest)): mean_cate = est.const_marginal_effect(X[:10]).mean(axis=0) mean_cate = np.array(mean_cate).reshape((d_y, d_t - 1))[i, j] self.assertAlmostEqual( shap_values[output][treat].base_values[0], mean_cate, delta=1e-2) # test shape of shap values output is as expected self.assertEqual(len(shap_values[output]), d_t - 1) self.assertEqual(len(shap_values), d_y) # test shape of attribute of explanation object is as expected self.assertEqual(shap_values[output][treat].values.shape[0], 10) self.assertEqual(shap_values[output][treat].data.shape[0], 10) self.assertEqual(shap_values[output][treat].base_values.shape, (10,)) # test length of feature names equals to shap values shape self.assertEqual( len(shap_values[output][treat].feature_names), shap_values[output][treat].values.shape[1]) def test_identical_output(self): # Treatment effect function def exp_te(x): return np.exp(2 * x[0]) n = 500 n_w = 10 support_size = 5 n_x = 2 # Outcome support support_Y = np.random.choice(range(n_w), size=support_size, replace=False) coefs_Y = np.random.uniform(0, 1, size=(support_size,)) def epsilon_sample(n): return np.random.uniform(-1, 1, size=(n,)) # Treatment support support_T = support_Y coefs_T = np.random.uniform(0, 1, size=support_size) def eta_sample(n): return np.random.uniform(-1, 1, size=n) # Generate controls, covariates, treatments and outcomes W = np.random.normal(0, 1, size=(n, n_w)) X = np.random.uniform(0, 1, size=(n, n_x)) # Heterogeneous treatment effects TE = np.array([np.exp(2 * x_i[0]) for x_i in X]).flatten() T = np.dot(W[:, support_T], coefs_T) + eta_sample(n) Y = (TE * T) + np.dot(W[:, support_Y], coefs_Y) + epsilon_sample(n) Y = np.tile(Y.reshape(-1, 1), (1, 2)) est = LinearDML(model_y=Lasso(), model_t=Lasso(), random_state=123, fit_cate_intercept=True, featurizer=PolynomialFeatures(degree=2, include_bias=False)) est.fit(Y, T, X=X, W=W) shap_values1 = est.shap_values(X[:10], feature_names=["A", "B"], treatment_names=["orange"], background_samples=None) est = LinearDML(model_y=Lasso(), model_t=Lasso(), random_state=123, fit_cate_intercept=True, featurizer=PolynomialFeatures(degree=2, include_bias=False)) est.fit(Y[:, 0], T, X=X, W=W) shap_values2 = est.shap_values(X[:10], feature_names=["A", "B"], treatment_names=["orange"], background_samples=None) np.testing.assert_allclose(shap_values1["Y0"]["orange"].data, shap_values2["Y0"]["orange"].data) np.testing.assert_allclose(shap_values1["Y0"]["orange"].values, shap_values2["Y0"]["orange"].values) # TODO There is a matrix dimension mismatch between multiple outcome and single outcome, should solve that # through shap package. np.testing.assert_allclose(shap_values1["Y0"]["orange"].main_effects, shap_values2["Y0"]["orange"].main_effects) np.testing.assert_allclose(shap_values1["Y0"]["orange"].base_values, shap_values2["Y0"]["orange"].base_values) # test shap could generate the plot from the shap_values heatmap(shap_values1["Y0"]["orange"], show=False) waterfall(shap_values1["Y0"]["orange"][6], show=False) scatter(shap_values1["Y0"]["orange"][:, "A"], show=False) bar(shap_values1["Y0"]["orange"], show=False) beeswarm(shap_values1["Y0"]["orange"], show=False)
mit
shubhamchopra/spark
python/pyspark/ml/param/_shared_params_code_gen.py
14
10161
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function header = """# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #""" # Code generator for shared params (shared.py). Run under this folder with: # python _shared_params_code_gen.py > shared.py def _gen_param_header(name, doc, defaultValueStr, typeConverter): """ Generates the header part for shared variables :param name: param name :param doc: param doc """ template = '''class Has$Name(Params): """ Mixin for param $name: $doc """ $name = Param(Params._dummy(), "$name", "$doc", typeConverter=$typeConverter) def __init__(self): super(Has$Name, self).__init__()''' if defaultValueStr is not None: template += ''' self._setDefault($name=$defaultValueStr)''' Name = name[0].upper() + name[1:] if typeConverter is None: typeConverter = str(None) return template \ .replace("$name", name) \ .replace("$Name", Name) \ .replace("$doc", doc) \ .replace("$defaultValueStr", str(defaultValueStr)) \ .replace("$typeConverter", typeConverter) def _gen_param_code(name, doc, defaultValueStr): """ Generates Python code for a shared param class. :param name: param name :param doc: param doc :param defaultValueStr: string representation of the default value :return: code string """ # TODO: How to correctly inherit instance attributes? template = ''' def set$Name(self, value): """ Sets the value of :py:attr:`$name`. """ return self._set($name=value) def get$Name(self): """ Gets the value of $name or its default value. """ return self.getOrDefault(self.$name)''' Name = name[0].upper() + name[1:] return template \ .replace("$name", name) \ .replace("$Name", Name) \ .replace("$doc", doc) \ .replace("$defaultValueStr", str(defaultValueStr)) if __name__ == "__main__": print(header) print("\n# DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.\n") print("from pyspark.ml.param import *\n\n") shared = [ ("maxIter", "max number of iterations (>= 0).", None, "TypeConverters.toInt"), ("regParam", "regularization parameter (>= 0).", None, "TypeConverters.toFloat"), ("featuresCol", "features column name.", "'features'", "TypeConverters.toString"), ("labelCol", "label column name.", "'label'", "TypeConverters.toString"), ("predictionCol", "prediction column name.", "'prediction'", "TypeConverters.toString"), ("probabilityCol", "Column name for predicted class conditional probabilities. " + "Note: Not all models output well-calibrated probability estimates! These probabilities " + "should be treated as confidences, not precise probabilities.", "'probability'", "TypeConverters.toString"), ("rawPredictionCol", "raw prediction (a.k.a. confidence) column name.", "'rawPrediction'", "TypeConverters.toString"), ("inputCol", "input column name.", None, "TypeConverters.toString"), ("inputCols", "input column names.", None, "TypeConverters.toListString"), ("outputCol", "output column name.", "self.uid + '__output'", "TypeConverters.toString"), ("numFeatures", "number of features.", None, "TypeConverters.toInt"), ("checkpointInterval", "set checkpoint interval (>= 1) or disable checkpoint (-1). " + "E.g. 10 means that the cache will get checkpointed every 10 iterations.", None, "TypeConverters.toInt"), ("seed", "random seed.", "hash(type(self).__name__)", "TypeConverters.toInt"), ("tol", "the convergence tolerance for iterative algorithms (>= 0).", None, "TypeConverters.toFloat"), ("stepSize", "Step size to be used for each iteration of optimization (>= 0).", None, "TypeConverters.toFloat"), ("handleInvalid", "how to handle invalid entries. Options are skip (which will filter " + "out rows with bad values), or error (which will throw an error). More options may be " + "added later.", None, "TypeConverters.toString"), ("elasticNetParam", "the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, " + "the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.", "0.0", "TypeConverters.toFloat"), ("fitIntercept", "whether to fit an intercept term.", "True", "TypeConverters.toBoolean"), ("standardization", "whether to standardize the training features before fitting the " + "model.", "True", "TypeConverters.toBoolean"), ("thresholds", "Thresholds in multi-class classification to adjust the probability of " + "predicting each class. Array must have length equal to the number of classes, with " + "values > 0, excepting that at most one value may be 0. " + "The class with largest value p/t is predicted, where p is the original " + "probability of that class and t is the class's threshold.", None, "TypeConverters.toListFloat"), ("threshold", "threshold in binary classification prediction, in range [0, 1]", "0.5", "TypeConverters.toFloat"), ("weightCol", "weight column name. If this is not set or empty, we treat " + "all instance weights as 1.0.", None, "TypeConverters.toString"), ("solver", "the solver algorithm for optimization. If this is not set or empty, " + "default value is 'auto'.", "'auto'", "TypeConverters.toString"), ("varianceCol", "column name for the biased sample variance of prediction.", None, "TypeConverters.toString"), ("aggregationDepth", "suggested depth for treeAggregate (>= 2).", "2", "TypeConverters.toInt"), ("parallelism", "the number of threads to use when running parallel algorithms (>= 1).", "1", "TypeConverters.toInt")] code = [] for name, doc, defaultValueStr, typeConverter in shared: param_code = _gen_param_header(name, doc, defaultValueStr, typeConverter) code.append(param_code + "\n" + _gen_param_code(name, doc, defaultValueStr)) decisionTreeParams = [ ("maxDepth", "Maximum depth of the tree. (>= 0) E.g., depth 0 means 1 leaf node; " + "depth 1 means 1 internal node + 2 leaf nodes.", "TypeConverters.toInt"), ("maxBins", "Max number of bins for" + " discretizing continuous features. Must be >=2 and >= number of categories for any" + " categorical feature.", "TypeConverters.toInt"), ("minInstancesPerNode", "Minimum number of instances each child must have after split. " + "If a split causes the left or right child to have fewer than minInstancesPerNode, the " + "split will be discarded as invalid. Should be >= 1.", "TypeConverters.toInt"), ("minInfoGain", "Minimum information gain for a split to be considered at a tree node.", "TypeConverters.toFloat"), ("maxMemoryInMB", "Maximum memory in MB allocated to histogram aggregation. If too small," + " then 1 node will be split per iteration, and its aggregates may exceed this size.", "TypeConverters.toInt"), ("cacheNodeIds", "If false, the algorithm will pass trees to executors to match " + "instances with nodes. If true, the algorithm will cache node IDs for each instance. " + "Caching can speed up training of deeper trees. Users can set how often should the " + "cache be checkpointed or disable it by setting checkpointInterval.", "TypeConverters.toBoolean")] decisionTreeCode = '''class DecisionTreeParams(Params): """ Mixin for Decision Tree parameters. """ $dummyPlaceHolders def __init__(self): super(DecisionTreeParams, self).__init__()''' dtParamMethods = "" dummyPlaceholders = "" paramTemplate = """$name = Param($owner, "$name", "$doc", typeConverter=$typeConverterStr)""" for name, doc, typeConverterStr in decisionTreeParams: if typeConverterStr is None: typeConverterStr = str(None) variable = paramTemplate.replace("$name", name).replace("$doc", doc) \ .replace("$typeConverterStr", typeConverterStr) dummyPlaceholders += variable.replace("$owner", "Params._dummy()") + "\n " dtParamMethods += _gen_param_code(name, doc, None) + "\n" code.append(decisionTreeCode.replace("$dummyPlaceHolders", dummyPlaceholders) + "\n" + dtParamMethods) print("\n\n\n".join(code))
apache-2.0
bdh1011/wau
venv/lib/python2.7/site-packages/requests/models.py
410
29176
# -*- coding: utf-8 -*- """ requests.models ~~~~~~~~~~~~~~~ This module contains the primary objects that power Requests. """ import collections import datetime from io import BytesIO, UnsupportedOperation from .hooks import default_hooks from .structures import CaseInsensitiveDict from .auth import HTTPBasicAuth from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar from .packages.urllib3.fields import RequestField from .packages.urllib3.filepost import encode_multipart_formdata from .packages.urllib3.util import parse_url from .packages.urllib3.exceptions import ( DecodeError, ReadTimeoutError, ProtocolError, LocationParseError) from .exceptions import ( HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError, ContentDecodingError, ConnectionError, StreamConsumedError) from .utils import ( guess_filename, get_auth_from_url, requote_uri, stream_decode_response_unicode, to_key_val_list, parse_header_links, iter_slices, guess_json_utf, super_len, to_native_string) from .compat import ( cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO, is_py2, chardet, json, builtin_str, basestring) from .status_codes import codes #: The set of HTTP status codes that indicate an automatically #: processable redirect. REDIRECT_STATI = ( codes.moved, # 301 codes.found, # 302 codes.other, # 303 codes.temporary_redirect, # 307 codes.permanent_redirect, # 308 ) DEFAULT_REDIRECT_LIMIT = 30 CONTENT_CHUNK_SIZE = 10 * 1024 ITER_CHUNK_SIZE = 512 json_dumps = json.dumps class RequestEncodingMixin(object): @property def path_url(self): """Build the path URL to use.""" url = [] p = urlsplit(self.url) path = p.path if not path: path = '/' url.append(path) query = p.query if query: url.append('?') url.append(query) return ''.join(url) @staticmethod def _encode_params(data): """Encode parameters in a piece of data. Will successfully encode parameters when passed as a dict or a list of 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary if parameters are supplied as a dict. """ if isinstance(data, (str, bytes)): return data elif hasattr(data, 'read'): return data elif hasattr(data, '__iter__'): result = [] for k, vs in to_key_val_list(data): if isinstance(vs, basestring) or not hasattr(vs, '__iter__'): vs = [vs] for v in vs: if v is not None: result.append( (k.encode('utf-8') if isinstance(k, str) else k, v.encode('utf-8') if isinstance(v, str) else v)) return urlencode(result, doseq=True) else: return data @staticmethod def _encode_files(files, data): """Build the body for a multipart/form-data request. Will successfully encode files when passed as a dict or a list of 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary if parameters are supplied as a dict. """ if (not files): raise ValueError("Files must be provided.") elif isinstance(data, basestring): raise ValueError("Data must not be a string.") new_fields = [] fields = to_key_val_list(data or {}) files = to_key_val_list(files or {}) for field, val in fields: if isinstance(val, basestring) or not hasattr(val, '__iter__'): val = [val] for v in val: if v is not None: # Don't call str() on bytestrings: in Py3 it all goes wrong. if not isinstance(v, bytes): v = str(v) new_fields.append( (field.decode('utf-8') if isinstance(field, bytes) else field, v.encode('utf-8') if isinstance(v, str) else v)) for (k, v) in files: # support for explicit filename ft = None fh = None if isinstance(v, (tuple, list)): if len(v) == 2: fn, fp = v elif len(v) == 3: fn, fp, ft = v else: fn, fp, ft, fh = v else: fn = guess_filename(v) or k fp = v if isinstance(fp, (str, bytes, bytearray)): fdata = fp else: fdata = fp.read() rf = RequestField(name=k, data=fdata, filename=fn, headers=fh) rf.make_multipart(content_type=ft) new_fields.append(rf) body, content_type = encode_multipart_formdata(new_fields) return body, content_type class RequestHooksMixin(object): def register_hook(self, event, hook): """Properly register a hook.""" if event not in self.hooks: raise ValueError('Unsupported event specified, with event name "%s"' % (event)) if isinstance(hook, collections.Callable): self.hooks[event].append(hook) elif hasattr(hook, '__iter__'): self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable)) def deregister_hook(self, event, hook): """Deregister a previously registered hook. Returns True if the hook existed, False if not. """ try: self.hooks[event].remove(hook) return True except ValueError: return False class Request(RequestHooksMixin): """A user-created :class:`Request <Request>` object. Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server. :param method: HTTP method to use. :param url: URL to send. :param headers: dictionary of headers to send. :param files: dictionary of {filename: fileobject} files to multipart upload. :param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place. :param json: json for the body to attach to the request (if data is not specified). :param params: dictionary of URL parameters to append to the URL. :param auth: Auth handler or (user, pass) tuple. :param cookies: dictionary or CookieJar of cookies to attach to this request. :param hooks: dictionary of callback hooks, for internal usage. Usage:: >>> import requests >>> req = requests.Request('GET', 'http://httpbin.org/get') >>> req.prepare() <PreparedRequest [GET]> """ def __init__(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None): # Default empty dicts for dict params. data = [] if data is None else data files = [] if files is None else files headers = {} if headers is None else headers params = {} if params is None else params hooks = {} if hooks is None else hooks self.hooks = default_hooks() for (k, v) in list(hooks.items()): self.register_hook(event=k, hook=v) self.method = method self.url = url self.headers = headers self.files = files self.data = data self.json = json self.params = params self.auth = auth self.cookies = cookies def __repr__(self): return '<Request [%s]>' % (self.method) def prepare(self): """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it.""" p = PreparedRequest() p.prepare( method=self.method, url=self.url, headers=self.headers, files=self.files, data=self.data, json=self.json, params=self.params, auth=self.auth, cookies=self.cookies, hooks=self.hooks, ) return p class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): """The fully mutable :class:`PreparedRequest <PreparedRequest>` object, containing the exact bytes that will be sent to the server. Generated from either a :class:`Request <Request>` object or manually. Usage:: >>> import requests >>> req = requests.Request('GET', 'http://httpbin.org/get') >>> r = req.prepare() <PreparedRequest [GET]> >>> s = requests.Session() >>> s.send(r) <Response [200]> """ def __init__(self): #: HTTP verb to send to the server. self.method = None #: HTTP URL to send the request to. self.url = None #: dictionary of HTTP headers. self.headers = None # The `CookieJar` used to create the Cookie header will be stored here # after prepare_cookies is called self._cookies = None #: request body to send to the server. self.body = None #: dictionary of callback hooks, for internal usage. self.hooks = default_hooks() def prepare(self, method=None, url=None, headers=None, files=None, data=None, params=None, auth=None, cookies=None, hooks=None, json=None): """Prepares the entire request with the given parameters.""" self.prepare_method(method) self.prepare_url(url, params) self.prepare_headers(headers) self.prepare_cookies(cookies) self.prepare_body(data, files, json) self.prepare_auth(auth, url) # Note that prepare_auth must be last to enable authentication schemes # such as OAuth to work on a fully prepared request. # This MUST go after prepare_auth. Authenticators could add a hook self.prepare_hooks(hooks) def __repr__(self): return '<PreparedRequest [%s]>' % (self.method) def copy(self): p = PreparedRequest() p.method = self.method p.url = self.url p.headers = self.headers.copy() if self.headers is not None else None p._cookies = _copy_cookie_jar(self._cookies) p.body = self.body p.hooks = self.hooks return p def prepare_method(self, method): """Prepares the given HTTP method.""" self.method = method if self.method is not None: self.method = self.method.upper() def prepare_url(self, url, params): """Prepares the given HTTP URL.""" #: Accept objects that have string representations. #: We're unable to blindy call unicode/str functions #: as this will include the bytestring indicator (b'') #: on python 3.x. #: https://github.com/kennethreitz/requests/pull/2238 if isinstance(url, bytes): url = url.decode('utf8') else: url = unicode(url) if is_py2 else str(url) # Don't do any URL preparation for non-HTTP schemes like `mailto`, # `data` etc to work around exceptions from `url_parse`, which # handles RFC 3986 only. if ':' in url and not url.lower().startswith('http'): self.url = url return # Support for unicode domain names and paths. try: scheme, auth, host, port, path, query, fragment = parse_url(url) except LocationParseError as e: raise InvalidURL(*e.args) if not scheme: raise MissingSchema("Invalid URL {0!r}: No schema supplied. " "Perhaps you meant http://{0}?".format( to_native_string(url, 'utf8'))) if not host: raise InvalidURL("Invalid URL %r: No host supplied" % url) # Only want to apply IDNA to the hostname try: host = host.encode('idna').decode('utf-8') except UnicodeError: raise InvalidURL('URL has an invalid label.') # Carefully reconstruct the network location netloc = auth or '' if netloc: netloc += '@' netloc += host if port: netloc += ':' + str(port) # Bare domains aren't valid URLs. if not path: path = '/' if is_py2: if isinstance(scheme, str): scheme = scheme.encode('utf-8') if isinstance(netloc, str): netloc = netloc.encode('utf-8') if isinstance(path, str): path = path.encode('utf-8') if isinstance(query, str): query = query.encode('utf-8') if isinstance(fragment, str): fragment = fragment.encode('utf-8') enc_params = self._encode_params(params) if enc_params: if query: query = '%s&%s' % (query, enc_params) else: query = enc_params url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) self.url = url def prepare_headers(self, headers): """Prepares the given HTTP headers.""" if headers: self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items()) else: self.headers = CaseInsensitiveDict() def prepare_body(self, data, files, json=None): """Prepares the given HTTP body data.""" # Check if file, fo, generator, iterator. # If not, run through normal process. # Nottin' on you. body = None content_type = None length = None if json is not None: content_type = 'application/json' body = json_dumps(json) is_stream = all([ hasattr(data, '__iter__'), not isinstance(data, (basestring, list, tuple, dict)) ]) try: length = super_len(data) except (TypeError, AttributeError, UnsupportedOperation): length = None if is_stream: body = data if files: raise NotImplementedError('Streamed bodies and files are mutually exclusive.') if length is not None: self.headers['Content-Length'] = builtin_str(length) else: self.headers['Transfer-Encoding'] = 'chunked' else: # Multi-part file uploads. if files: (body, content_type) = self._encode_files(files, data) else: if data and json is None: body = self._encode_params(data) if isinstance(data, basestring) or hasattr(data, 'read'): content_type = None else: content_type = 'application/x-www-form-urlencoded' self.prepare_content_length(body) # Add content-type if it wasn't explicitly provided. if content_type and ('content-type' not in self.headers): self.headers['Content-Type'] = content_type self.body = body def prepare_content_length(self, body): if hasattr(body, 'seek') and hasattr(body, 'tell'): body.seek(0, 2) self.headers['Content-Length'] = builtin_str(body.tell()) body.seek(0, 0) elif body is not None: l = super_len(body) if l: self.headers['Content-Length'] = builtin_str(l) elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None): self.headers['Content-Length'] = '0' def prepare_auth(self, auth, url=''): """Prepares the given HTTP auth data.""" # If no Auth is explicitly provided, extract it from the URL first. if auth is None: url_auth = get_auth_from_url(self.url) auth = url_auth if any(url_auth) else None if auth: if isinstance(auth, tuple) and len(auth) == 2: # special-case basic HTTP auth auth = HTTPBasicAuth(*auth) # Allow auth to make its changes. r = auth(self) # Update self to reflect the auth changes. self.__dict__.update(r.__dict__) # Recompute Content-Length self.prepare_content_length(self.body) def prepare_cookies(self, cookies): """Prepares the given HTTP cookie data. This function eventually generates a ``Cookie`` header from the given cookies using cookielib. Due to cookielib's design, the header will not be regenerated if it already exists, meaning this function can only be called once for the life of the :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls to ``prepare_cookies`` will have no actual effect, unless the "Cookie" header is removed beforehand.""" if isinstance(cookies, cookielib.CookieJar): self._cookies = cookies else: self._cookies = cookiejar_from_dict(cookies) cookie_header = get_cookie_header(self._cookies, self) if cookie_header is not None: self.headers['Cookie'] = cookie_header def prepare_hooks(self, hooks): """Prepares the given hooks.""" # hooks can be passed as None to the prepare method and to this # method. To prevent iterating over None, simply use an empty list # if hooks is False-y hooks = hooks or [] for event in hooks: self.register_hook(event, hooks[event]) class Response(object): """The :class:`Response <Response>` object, which contains a server's response to an HTTP request. """ __attrs__ = [ '_content', 'status_code', 'headers', 'url', 'history', 'encoding', 'reason', 'cookies', 'elapsed', 'request', ] def __init__(self): super(Response, self).__init__() self._content = False self._content_consumed = False #: Integer Code of responded HTTP Status, e.g. 404 or 200. self.status_code = None #: Case-insensitive Dictionary of Response Headers. #: For example, ``headers['content-encoding']`` will return the #: value of a ``'Content-Encoding'`` response header. self.headers = CaseInsensitiveDict() #: File-like object representation of response (for advanced usage). #: Use of ``raw`` requires that ``stream=True`` be set on the request. # This requirement does not apply for use internally to Requests. self.raw = None #: Final URL location of Response. self.url = None #: Encoding to decode with when accessing r.text. self.encoding = None #: A list of :class:`Response <Response>` objects from #: the history of the Request. Any redirect responses will end #: up here. The list is sorted from the oldest to the most recent request. self.history = [] #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK". self.reason = None #: A CookieJar of Cookies the server sent back. self.cookies = cookiejar_from_dict({}) #: The amount of time elapsed between sending the request #: and the arrival of the response (as a timedelta). #: This property specifically measures the time taken between sending #: the first byte of the request and finishing parsing the headers. It #: is therefore unaffected by consuming the response content or the #: value of the ``stream`` keyword argument. self.elapsed = datetime.timedelta(0) #: The :class:`PreparedRequest <PreparedRequest>` object to which this #: is a response. self.request = None def __getstate__(self): # Consume everything; accessing the content attribute makes # sure the content has been fully read. if not self._content_consumed: self.content return dict( (attr, getattr(self, attr, None)) for attr in self.__attrs__ ) def __setstate__(self, state): for name, value in state.items(): setattr(self, name, value) # pickled objects do not have .raw setattr(self, '_content_consumed', True) setattr(self, 'raw', None) def __repr__(self): return '<Response [%s]>' % (self.status_code) def __bool__(self): """Returns true if :attr:`status_code` is 'OK'.""" return self.ok def __nonzero__(self): """Returns true if :attr:`status_code` is 'OK'.""" return self.ok def __iter__(self): """Allows you to use a response as an iterator.""" return self.iter_content(128) @property def ok(self): try: self.raise_for_status() except HTTPError: return False return True @property def is_redirect(self): """True if this Response is a well-formed HTTP redirect that could have been processed automatically (by :meth:`Session.resolve_redirects`). """ return ('location' in self.headers and self.status_code in REDIRECT_STATI) @property def is_permanent_redirect(self): """True if this Response one of the permanant versions of redirect""" return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect)) @property def apparent_encoding(self): """The apparent encoding, provided by the chardet library""" return chardet.detect(self.content)['encoding'] def iter_content(self, chunk_size=1, decode_unicode=False): """Iterates over the response data. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. The chunk size is the number of bytes it should read into memory. This is not necessarily the length of each item returned as decoding can take place. If decode_unicode is True, content will be decoded using the best available encoding based on the response. """ def generate(): try: # Special case for urllib3. try: for chunk in self.raw.stream(chunk_size, decode_content=True): yield chunk except ProtocolError as e: raise ChunkedEncodingError(e) except DecodeError as e: raise ContentDecodingError(e) except ReadTimeoutError as e: raise ConnectionError(e) except AttributeError: # Standard file-like object. while True: chunk = self.raw.read(chunk_size) if not chunk: break yield chunk self._content_consumed = True if self._content_consumed and isinstance(self._content, bool): raise StreamConsumedError() # simulate reading small chunks of the content reused_chunks = iter_slices(self._content, chunk_size) stream_chunks = generate() chunks = reused_chunks if self._content_consumed else stream_chunks if decode_unicode: chunks = stream_decode_response_unicode(chunks, self) return chunks def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None): """Iterates over the response data, one line at a time. When stream=True is set on the request, this avoids reading the content at once into memory for large responses. .. note:: This method is not reentrant safe. """ pending = None for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): if pending is not None: chunk = pending + chunk if delimiter: lines = chunk.split(delimiter) else: lines = chunk.splitlines() if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: pending = lines.pop() else: pending = None for line in lines: yield line if pending is not None: yield pending @property def content(self): """Content of the response, in bytes.""" if self._content is False: # Read the contents. try: if self._content_consumed: raise RuntimeError( 'The content for this response was already consumed') if self.status_code == 0: self._content = None else: self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes() except AttributeError: self._content = None self._content_consumed = True # don't need to release the connection; that's been handled by urllib3 # since we exhausted the data. return self._content @property def text(self): """Content of the response, in unicode. If Response.encoding is None, encoding will be guessed using ``chardet``. The encoding of the response content is determined based solely on HTTP headers, following RFC 2616 to the letter. If you can take advantage of non-HTTP knowledge to make a better guess at the encoding, you should set ``r.encoding`` appropriately before accessing this property. """ # Try charset from content-type content = None encoding = self.encoding if not self.content: return str('') # Fallback to auto-detected encoding. if self.encoding is None: encoding = self.apparent_encoding # Decode unicode from given encoding. try: content = str(self.content, encoding, errors='replace') except (LookupError, TypeError): # A LookupError is raised if the encoding was not found which could # indicate a misspelling or similar mistake. # # A TypeError can be raised if encoding is None # # So we try blindly encoding. content = str(self.content, errors='replace') return content def json(self, **kwargs): """Returns the json-encoded content of a response, if any. :param \*\*kwargs: Optional arguments that ``json.loads`` takes. """ if not self.encoding and len(self.content) > 3: # No encoding set. JSON RFC 4627 section 3 states we should expect # UTF-8, -16 or -32. Detect which one to use; If the detection or # decoding fails, fall back to `self.text` (using chardet to make # a best guess). encoding = guess_json_utf(self.content) if encoding is not None: try: return json.loads(self.content.decode(encoding), **kwargs) except UnicodeDecodeError: # Wrong UTF codec detected; usually because it's not UTF-8 # but some other 8-bit codec. This is an RFC violation, # and the server didn't bother to tell us what codec *was* # used. pass return json.loads(self.text, **kwargs) @property def links(self): """Returns the parsed header links of the response, if any.""" header = self.headers.get('link') # l = MultiDict() l = {} if header: links = parse_header_links(header) for link in links: key = link.get('rel') or link.get('url') l[key] = link return l def raise_for_status(self): """Raises stored :class:`HTTPError`, if one occurred.""" http_error_msg = '' if 400 <= self.status_code < 500: http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason) elif 500 <= self.status_code < 600: http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason) if http_error_msg: raise HTTPError(http_error_msg, response=self) def close(self): """Releases the connection back to the pool. Once this method has been called the underlying ``raw`` object must not be accessed again. *Note: Should not normally need to be called explicitly.* """ return self.raw.release_conn()
mit
lawrence34/python-social-auth
social/apps/django_app/views.py
76
2181
from django.conf import settings from django.contrib.auth import login, REDIRECT_FIELD_NAME from django.contrib.auth.decorators import login_required from django.views.decorators.csrf import csrf_exempt, csrf_protect from django.views.decorators.http import require_POST from django.views.decorators.cache import never_cache from social.utils import setting_name from social.actions import do_auth, do_complete, do_disconnect from social.apps.django_app.utils import psa NAMESPACE = getattr(settings, setting_name('URL_NAMESPACE'), None) or 'social' @never_cache @psa('{0}:complete'.format(NAMESPACE)) def auth(request, backend): return do_auth(request.backend, redirect_name=REDIRECT_FIELD_NAME) @never_cache @csrf_exempt @psa('{0}:complete'.format(NAMESPACE)) def complete(request, backend, *args, **kwargs): """Authentication complete view""" return do_complete(request.backend, _do_login, request.user, redirect_name=REDIRECT_FIELD_NAME, *args, **kwargs) @never_cache @login_required @psa() @require_POST @csrf_protect def disconnect(request, backend, association_id=None): """Disconnects given backend from current logged in user.""" return do_disconnect(request.backend, request.user, association_id, redirect_name=REDIRECT_FIELD_NAME) def _do_login(backend, user, social_user): user.backend = '{0}.{1}'.format(backend.__module__, backend.__class__.__name__) login(backend.strategy.request, user) if backend.setting('SESSION_EXPIRATION', False): # Set session expiration date if present and enabled # by setting. Use last social-auth instance for current # provider, users can associate several accounts with # a same provider. expiration = social_user.expiration_datetime() if expiration: try: backend.strategy.request.session.set_expiry( expiration.seconds + expiration.days * 86400 ) except OverflowError: # Handle django time zone overflow backend.strategy.request.session.set_expiry(None)
bsd-3-clause
renner/spacewalk
client/rhel/rhnlib/test/02-callbacks.py
36
1613
#!/usr/bin/python # # # import sys sys.path.append('..') from rhn.rpclib import Server, GETServer SERVER = "http://xmlrpc.rhn.redhat.com/XMLRPC" system_id_file = "/etc/sysconfig/rhn/systemid" try: SERVER = "http://%s/XMLRPC" % sys.argv[1] system_id_file = sys.argv[2] except: pass print "SERVER = %s" % SERVER print "system_id_file = %s" % system_id_file def refreshCallback(*args, **kwargs): print "Called refreshCallback, args %s, kwargs %s" % (args, kwargs) def progressCallback(*args, **kwargs): print "Called progressCallback, args %s, kwargs %s" % (args, kwargs) if __name__ == '__main__': sysid = open(system_id_file).read() s = Server(SERVER) s.set_refresh_callback(refreshCallback) s.set_progress_callback(progressCallback) dict = s.up2date.login(sysid) gs = GETServer(SERVER, headers=dict) gs.set_refresh_callback(refreshCallback) gs.set_progress_callback(progressCallback, 16384) channels = dict['X-RHN-Auth-Channels'] cn, cv = channels[0][:2] print "Calling listPackages" l = gs.listPackages(cn, cv) for p in l: if p[0] == 'kernel': package = p break else: raise Exception("Package not found") print "PACKAGE TO DOWNLOAD: %s %s %s %s" % (package[0], package[1], package[2], package[4]) filename = "%s-%s-%s.%s.rpm" % (package[0], package[1], package[2], package[4]) print "Calling getPackages" fd = gs.getPackage(cn, filename) data_name = "/tmp/foobar" data = open(data_name, "w+").write(fd.read()) print "PACKAGE DOWNLOADED AS: %s" % data_name
gpl-2.0
tarthy6/dozer-thesis
examples/old/concrete/uniax-post.py
5
1677
#!/usr/bin/python # -*- coding: utf-8 -*- # # demonstration of the woo.post2d module (see its documentation for details) # from woo import post2d import pylab # the matlab-like interface of matplotlib loadFile='/tmp/uniax-tension.woo.gz' if not os.path.exists(loadFile): raise RuntimeError("Run uniax.py first so that %s is created"%loadFile) O.load(loadFile) # flattener that project to the xz plane flattener=post2d.AxisFlatten(useRef=False,axis=1) # return scalar given a Body instance extractDmg=lambda b: b.state.normDmg # will call flattener.planar implicitly # the same as: extractVelocity=lambda b: flattener.planar(b,b.state['vel']) extractVelocity=lambda b: b.state.vel # create new figure pylab.figure() # plot raw damage post2d.plot(post2d.data(extractDmg,flattener)) pylab.suptitle('damage') # plot smooth damage into new figure pylab.figure(); ax,map=post2d.plot(post2d.data(extractDmg,flattener,stDev=2e-3)) pylab.suptitle('smooth damage') # show color scale pylab.colorbar(map,orientation='horizontal') # shear stress pylab.figure() post2d.plot(post2d.data(lambda b: b.state.sigma,flattener)) pylab.suptitle('sigma') pylab.figure() post2d.plot(post2d.data(lambda b: b.state.tau,flattener,stDev=2e-3)) pylab.suptitle('smooth tau (in grid)') # raw velocity (vector field) plot pylab.figure(); post2d.plot(post2d.data(extractVelocity,flattener)) pylab.suptitle('velocity') # smooth velocity plot; data are sampled at regular grid pylab.figure(); ax,map=post2d.plot(post2d.data(extractVelocity,flattener,stDev=1e-3)) pylab.suptitle('smooth velocity') # save last (current) figure to file pylab.gcf().savefig('/tmp/foo.png') # show the figures pylab.show()
gpl-2.0
silveringsea/urllib3
urllib3/connectionpool.py
62
31251
import errno import logging import sys import warnings from socket import error as SocketError, timeout as SocketTimeout import socket try: # Python 3 from queue import LifoQueue, Empty, Full except ImportError: from Queue import LifoQueue, Empty, Full import Queue as _ # Platform-specific: Windows from .exceptions import ( ClosedPoolError, ProtocolError, EmptyPoolError, HeaderParsingError, HostChangedError, LocationValueError, MaxRetryError, ProxyError, ConnectTimeoutError, ReadTimeoutError, SSLError, TimeoutError, InsecureRequestWarning, NewConnectionError, ) from .packages.ssl_match_hostname import CertificateError from .packages import six from .connection import ( port_by_scheme, DummyConnection, HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection, HTTPException, BaseSSLError, ConnectionError ) from .request import RequestMethods from .response import HTTPResponse from .util.connection import is_connection_dropped from .util.response import assert_header_parsing from .util.retry import Retry from .util.timeout import Timeout from .util.url import get_host, Url xrange = six.moves.xrange log = logging.getLogger(__name__) _Default = object() ## Pool objects class ConnectionPool(object): """ Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. """ scheme = None QueueCls = LifoQueue def __init__(self, host, port=None): if not host: raise LocationValueError("No host specified.") # httplib doesn't like it when we include brackets in ipv6 addresses self.host = host.strip('[]') self.port = port def __str__(self): return '%s(host=%r, port=%r)' % (type(self).__name__, self.host, self.port) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() # Return False to re-raise any potential exceptions return False def close(): """ Close all pooled connections and disable the pool. """ pass # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK]) class HTTPConnectionPool(ConnectionPool, RequestMethods): """ Thread-safe connection pool for one host. :param host: Host used for this HTTP Connection (e.g. "localhost"), passed into :class:`httplib.HTTPConnection`. :param port: Port used for this HTTP Connection (None is equivalent to 80), passed into :class:`httplib.HTTPConnection`. :param strict: Causes BadStatusLine to be raised if the status line can't be parsed as a valid HTTP/1.0 or 1.1 status line, passed into :class:`httplib.HTTPConnection`. .. note:: Only works in Python 2. This parameter is ignored in Python 3. :param timeout: Socket timeout in seconds for each individual connection. This can be a float or integer, which sets the timeout for the HTTP request, or an instance of :class:`urllib3.util.Timeout` which gives you more fine-grained control over request timeouts. After the constructor has been parsed, this is always a `urllib3.util.Timeout` object. :param maxsize: Number of connections to save that can be reused. More than 1 is useful in multithreaded situations. If ``block`` is set to False, more connections will be created but they will not be saved once they've been used. :param block: If set to True, no more than ``maxsize`` connections will be used at a time. When no free connections are available, the call will block until a connection has been released. This is a useful side effect for particular multithreaded situations where one does not want to use more than maxsize connections per host to prevent flooding. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param retries: Retry configuration to use by default with requests in this pool. :param _proxy: Parsed proxy URL, should not be used directly, instead, see :class:`urllib3.connectionpool.ProxyManager`" :param _proxy_headers: A dictionary with proxy headers, should not be used directly, instead, see :class:`urllib3.connectionpool.ProxyManager`" :param \**conn_kw: Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, :class:`urllib3.connection.HTTPSConnection` instances. """ scheme = 'http' ConnectionCls = HTTPConnection def __init__(self, host, port=None, strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, headers=None, retries=None, _proxy=None, _proxy_headers=None, **conn_kw): ConnectionPool.__init__(self, host, port) RequestMethods.__init__(self, headers) self.strict = strict if not isinstance(timeout, Timeout): timeout = Timeout.from_float(timeout) if retries is None: retries = Retry.DEFAULT self.timeout = timeout self.retries = retries self.pool = self.QueueCls(maxsize) self.block = block self.proxy = _proxy self.proxy_headers = _proxy_headers or {} # Fill the queue up so that doing get() on it will block properly for _ in xrange(maxsize): self.pool.put(None) # These are mostly for testing and debugging purposes. self.num_connections = 0 self.num_requests = 0 self.conn_kw = conn_kw if self.proxy: # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. # We cannot know if the user has added default socket options, so we cannot replace the # list. self.conn_kw.setdefault('socket_options', []) def _new_conn(self): """ Return a fresh :class:`HTTPConnection`. """ self.num_connections += 1 log.info("Starting new HTTP connection (%d): %s" % (self.num_connections, self.host)) conn = self.ConnectionCls(host=self.host, port=self.port, timeout=self.timeout.connect_timeout, strict=self.strict, **self.conn_kw) return conn def _get_conn(self, timeout=None): """ Get a connection. Will return a pooled connection if one is available. If no connections are available and :prop:`.block` is ``False``, then a fresh connection is returned. :param timeout: Seconds to wait before giving up and raising :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and :prop:`.block` is ``True``. """ conn = None try: conn = self.pool.get(block=self.block, timeout=timeout) except AttributeError: # self.pool is None raise ClosedPoolError(self, "Pool is closed.") except Empty: if self.block: raise EmptyPoolError(self, "Pool reached maximum size and no more " "connections are allowed.") pass # Oh well, we'll create a new connection then # If this is a persistent connection, check if it got disconnected if conn and is_connection_dropped(conn): log.info("Resetting dropped connection: %s" % self.host) conn.close() if getattr(conn, 'auto_open', 1) == 0: # This is a proxied connection that has been mutated by # httplib._tunnel() and cannot be reused (since it would # attempt to bypass the proxy) conn = None return conn or self._new_conn() def _put_conn(self, conn): """ Put a connection back into the pool. :param conn: Connection object for the current host and port as returned by :meth:`._new_conn` or :meth:`._get_conn`. If the pool is already full, the connection is closed and discarded because we exceeded maxsize. If connections are discarded frequently, then maxsize should be increased. If the pool is closed, then the connection will be closed and discarded. """ try: self.pool.put(conn, block=False) return # Everything is dandy, done. except AttributeError: # self.pool is None. pass except Full: # This should never happen if self.block == True log.warning( "Connection pool is full, discarding connection: %s" % self.host) # Connection never got put back into the pool, close it. if conn: conn.close() def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """ pass def _prepare_proxy(self, conn): # Nothing to do for HTTP connections. pass def _get_timeout(self, timeout): """ Helper that always returns a :class:`urllib3.util.Timeout` """ if timeout is _Default: return self.timeout.clone() if isinstance(timeout, Timeout): return timeout.clone() else: # User passed us an int/float. This is for backwards compatibility, # can be removed later return Timeout.from_float(timeout) def _raise_timeout(self, err, url, timeout_value): """Is the error actually a timeout? Will raise a ReadTimeout or pass""" if isinstance(err, SocketTimeout): raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) # See the above comment about EAGAIN in Python 3. In Python 2 we have # to specifically catch it and throw the timeout error if hasattr(err, 'errno') and err.errno in _blocking_errnos: raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) # Catch possible read timeouts thrown as SSL errors. If not the # case, rethrow the original. We need to do this because of: # http://bugs.python.org/issue10272 if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6 raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value) def _make_request(self, conn, method, url, timeout=_Default, **httplib_request_kw): """ Perform a request on a given urllib connection object taken from our pool. :param conn: a connection from one of our connection pools :param timeout: Socket timeout in seconds for the request. This can be a float or integer, which will set the same timeout value for the socket connect and the socket read, or an instance of :class:`urllib3.util.Timeout`, which gives you more fine-grained control over your timeouts. """ self.num_requests += 1 timeout_obj = self._get_timeout(timeout) timeout_obj.start_connect() conn.timeout = timeout_obj.connect_timeout # Trigger any extra validation we need to do. try: self._validate_conn(conn) except (SocketTimeout, BaseSSLError) as e: # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) raise # conn.request() calls httplib.*.request, not the method in # urllib3.request. It also calls makefile (recv) on the socket. conn.request(method, url, **httplib_request_kw) # Reset the timeout for the recv() on the socket read_timeout = timeout_obj.read_timeout # App Engine doesn't have a sock attr if getattr(conn, 'sock', None): # In Python 3 socket.py will catch EAGAIN and return None when you # try and read into the file pointer created by http.client, which # instead raises a BadStatusLine exception. Instead of catching # the exception and assuming all BadStatusLine exceptions are read # timeouts, check for a zero timeout before making the request. if read_timeout == 0: raise ReadTimeoutError( self, url, "Read timed out. (read timeout=%s)" % read_timeout) if read_timeout is Timeout.DEFAULT_TIMEOUT: conn.sock.settimeout(socket.getdefaulttimeout()) else: # None or a value conn.sock.settimeout(read_timeout) # Receive the response from the server try: try: # Python 2.7, use buffering of HTTP responses httplib_response = conn.getresponse(buffering=True) except TypeError: # Python 2.6 and older httplib_response = conn.getresponse() except (SocketTimeout, BaseSSLError, SocketError) as e: self._raise_timeout(err=e, url=url, timeout_value=read_timeout) raise # AppEngine doesn't have a version attr. http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') log.debug("\"%s %s %s\" %s %s" % (method, url, http_version, httplib_response.status, httplib_response.length)) try: assert_header_parsing(httplib_response.msg) except HeaderParsingError as hpe: # Platform-specific: Python 3 log.warning( 'Failed to parse headers (url=%s): %s', self._absolute_url(url), hpe, exc_info=True) return httplib_response def _absolute_url(self, path): return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url def close(self): """ Close all pooled connections and disable the pool. """ # Disable access to the pool old_pool, self.pool = self.pool, None try: while True: conn = old_pool.get(block=False) if conn: conn.close() except Empty: pass # Done. def is_same_host(self, url): """ Check if the given ``url`` is a member of the same host as this connection pool. """ if url.startswith('/'): return True # TODO: Add optional support for socket.gethostbyname checking. scheme, host, port = get_host(url) # Use explicit default port for comparison when none is given if self.port and not port: port = port_by_scheme.get(scheme) elif not self.port and port == port_by_scheme.get(scheme): port = None return (scheme, host, port) == (self.scheme, self.host, self.port) def urlopen(self, method, url, body=None, headers=None, retries=None, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, **response_kw): """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all the raw details. .. note:: More commonly, it's appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param body: Data to send in the request body (useful for creating POST requests, see HTTPConnectionPool.post_url for more convenience). :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Configure the number of retries to allow before raising a :class:`~urllib3.exceptions.MaxRetryError` exception. Pass ``None`` to retry until you receive a response. Pass a :class:`~urllib3.util.retry.Retry` object for fine-grained control over different types of retries. Pass an integer number to retry connection errors that many times, but no other types of errors. Pass zero to never retry. If ``False``, then retries are disabled and any exception is raised immediately. Also, instead of raising a MaxRetryError on redirects, the redirect response will be returned. :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. :param redirect: If True, automatically handle redirects (status codes 301, 302, 303, 307, 308). Each redirect counts as a retry. Disabling retries will disable redirect, too. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When False, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. It may be a float (in seconds) or an instance of :class:`urllib3.util.Timeout`. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you're not preloading the response's content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get('preload_content', True)``. :param \**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib` """ if headers is None: headers = self.headers if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: release_conn = response_kw.get('preload_content', True) # Check host if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) conn = None # Merge the proxy headers. Only do this in HTTP. We have to copy the # headers dict so we can safely change it without those changes being # reflected in anyone else's copy. if self.scheme == 'http': headers = headers.copy() headers.update(self.proxy_headers) # Must keep the exception bound to a separate variable or else Python 3 # complains about UnboundLocalError. err = None try: # Request a connection from the queue. timeout_obj = self._get_timeout(timeout) conn = self._get_conn(timeout=pool_timeout) conn.timeout = timeout_obj.connect_timeout is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None) if is_new_proxy_conn: self._prepare_proxy(conn) # Make the request on the httplib connection object. httplib_response = self._make_request(conn, method, url, timeout=timeout_obj, body=body, headers=headers) # If we're going to release the connection in ``finally:``, then # the request doesn't need to know about the connection. Otherwise # it will also try to release it and we'll have a double-release # mess. response_conn = not release_conn and conn # Import httplib's response into our own wrapper object response = HTTPResponse.from_httplib(httplib_response, pool=self, connection=response_conn, **response_kw) # else: # The connection will be put back into the pool when # ``response.release_conn()`` is called (implicitly by # ``response.read()``) except Empty: # Timed out by queue. raise EmptyPoolError(self, "No pool connections are available.") except (BaseSSLError, CertificateError) as e: # Close the connection. If a connection is reused on which there # was a Certificate error, the next request will certainly raise # another Certificate error. conn = conn and conn.close() release_conn = True raise SSLError(e) except SSLError: # Treat SSLError separately from BaseSSLError to preserve # traceback. conn = conn and conn.close() release_conn = True raise except (TimeoutError, HTTPException, SocketError, ProtocolError) as e: # Discard the connection for these exceptions. It will be # be replaced during the next _get_conn() call. conn = conn and conn.close() release_conn = True if isinstance(e, (SocketError, NewConnectionError)) and self.proxy: e = ProxyError('Cannot connect to proxy.', e) elif isinstance(e, (SocketError, HTTPException)): e = ProtocolError('Connection aborted.', e) retries = retries.increment(method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]) retries.sleep() # Keep track of the error for the retry warning. err = e finally: if release_conn: # Put the connection back to be reused. If the connection is # expired then it will be None, which will get replaced with a # fresh connection during _get_conn. self._put_conn(conn) if not conn: # Try again log.warning("Retrying (%r) after connection " "broken by '%r': %s" % (retries, err, url)) return self.urlopen(method, url, body, headers, retries, redirect, assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, **response_kw) # Handle redirect? redirect_location = redirect and response.get_redirect_location() if redirect_location: if response.status == 303: method = 'GET' try: retries = retries.increment(method, url, response=response, _pool=self) except MaxRetryError: if retries.raise_on_redirect: # Release the connection for this response, since we're not # returning it to be released manually. response.release_conn() raise return response log.info("Redirecting %s -> %s" % (url, redirect_location)) return self.urlopen(method, redirect_location, body, headers, retries=retries, redirect=redirect, assert_same_host=assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, **response_kw) # Check if we should retry the HTTP response. if retries.is_forced_retry(method, status_code=response.status): retries = retries.increment(method, url, response=response, _pool=self) retries.sleep() log.info("Forced retry: %s" % url) return self.urlopen(method, url, body, headers, retries=retries, redirect=redirect, assert_same_host=assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, **response_kw) return response class HTTPSConnectionPool(HTTPConnectionPool): """ Same as :class:`.HTTPConnectionPool`, but HTTPS. When Python is compiled with the :mod:`ssl` module, then :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates, instead of :class:`.HTTPSConnection`. :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``, ``assert_hostname`` and ``host`` in this order to verify connections. If ``assert_hostname`` is False, no verification is done. The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket into an SSL socket. """ scheme = 'https' ConnectionCls = HTTPSConnection def __init__(self, host, port=None, strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, headers=None, retries=None, _proxy=None, _proxy_headers=None, key_file=None, cert_file=None, cert_reqs=None, ca_certs=None, ssl_version=None, assert_hostname=None, assert_fingerprint=None, ca_cert_dir=None, **conn_kw): HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, block, headers, retries, _proxy, _proxy_headers, **conn_kw) if ca_certs and cert_reqs is None: cert_reqs = 'CERT_REQUIRED' self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.ca_certs = ca_certs self.ca_cert_dir = ca_cert_dir self.ssl_version = ssl_version self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint def _prepare_conn(self, conn): """ Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` and establish the tunnel if proxy is used. """ if isinstance(conn, VerifiedHTTPSConnection): conn.set_cert(key_file=self.key_file, cert_file=self.cert_file, cert_reqs=self.cert_reqs, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint) conn.ssl_version = self.ssl_version return conn def _prepare_proxy(self, conn): """ Establish tunnel connection early, because otherwise httplib would improperly set Host: header to proxy's IP:port. """ # Python 2.7+ try: set_tunnel = conn.set_tunnel except AttributeError: # Platform-specific: Python 2.6 set_tunnel = conn._set_tunnel if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older set_tunnel(self.host, self.port) else: set_tunnel(self.host, self.port, self.proxy_headers) conn.connect() def _new_conn(self): """ Return a fresh :class:`httplib.HTTPSConnection`. """ self.num_connections += 1 log.info("Starting new HTTPS connection (%d): %s" % (self.num_connections, self.host)) if not self.ConnectionCls or self.ConnectionCls is DummyConnection: raise SSLError("Can't connect to HTTPS URL because the SSL " "module is not available.") actual_host = self.host actual_port = self.port if self.proxy is not None: actual_host = self.proxy.host actual_port = self.proxy.port conn = self.ConnectionCls(host=actual_host, port=actual_port, timeout=self.timeout.connect_timeout, strict=self.strict, **self.conn_kw) return self._prepare_conn(conn) def _validate_conn(self, conn): """ Called right before a request is made, after the socket is created. """ super(HTTPSConnectionPool, self)._validate_conn(conn) # Force connect early to allow us to validate the connection. if not getattr(conn, 'sock', None): # AppEngine might not have `.sock` conn.connect() if not conn.is_verified: warnings.warn(( 'Unverified HTTPS request is being made. ' 'Adding certificate verification is strongly advised. See: ' 'https://urllib3.readthedocs.org/en/latest/security.html'), InsecureRequestWarning) def connection_from_url(url, **kw): """ Given a url, return an :class:`.ConnectionPool` instance of its host. This is a shortcut for not having to parse out the scheme, host, and port of the url before creating an :class:`.ConnectionPool` instance. :param url: Absolute URL string that must include the scheme. Port is optional. :param \**kw: Passes additional parameters to the constructor of the appropriate :class:`.ConnectionPool`. Useful for specifying things like timeout, maxsize, headers, etc. Example:: >>> conn = connection_from_url('http://google.com/') >>> r = conn.request('GET', '/') """ scheme, host, port = get_host(url) if scheme == 'https': return HTTPSConnectionPool(host, port=port, **kw) else: return HTTPConnectionPool(host, port=port, **kw)
mit
sungkim11/mhargadh
django/contrib/gis/tests/relatedapp/tests.py
27
14331
from datetime import date from django.test import TestCase from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint from django.contrib.gis.db.models import Collect, Count, Extent, F, Union from django.contrib.gis.geometry.backend import Geometry from django.contrib.gis.tests.utils import mysql, oracle, no_mysql, no_oracle, no_spatialite from models import City, Location, DirectoryEntry, Parcel, Book, Author, Article class RelatedGeoModelTest(TestCase): def test02_select_related(self): "Testing `select_related` on geographic models (see #7126)." qs1 = City.objects.all() qs2 = City.objects.select_related() qs3 = City.objects.select_related('location') # Reference data for what's in the fixtures. cities = ( ('Aurora', 'TX', -97.516111, 33.058333), ('Roswell', 'NM', -104.528056, 33.387222), ('Kecksburg', 'PA', -79.460734, 40.18476), ) for qs in (qs1, qs2, qs3): for ref, c in zip(cities, qs): nm, st, lon, lat = ref self.assertEqual(nm, c.name) self.assertEqual(st, c.state) self.assertEqual(Point(lon, lat), c.location.point) @no_mysql def test03_transform_related(self): "Testing the `transform` GeoQuerySet method on related geographic models." # All the transformations are to state plane coordinate systems using # US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot). tol = 0 def check_pnt(ref, pnt): self.assertAlmostEqual(ref.x, pnt.x, tol) self.assertAlmostEqual(ref.y, pnt.y, tol) self.assertEqual(ref.srid, pnt.srid) # Each city transformed to the SRID of their state plane coordinate system. transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'), ('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'), ('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'), ) for name, srid, wkt in transformed: # Doing this implicitly sets `select_related` select the location. # TODO: Fix why this breaks on Oracle. qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point')) check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point) @no_mysql @no_spatialite def test04a_related_extent_aggregate(self): "Testing the `extent` GeoQuerySet aggregates on related geographic models." # This combines the Extent and Union aggregates into one query aggs = City.objects.aggregate(Extent('location__point')) # One for all locations, one that excludes New Mexico (Roswell). all_extent = (-104.528056, 29.763374, -79.460734, 40.18476) txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476) e1 = City.objects.extent(field_name='location__point') e2 = City.objects.exclude(state='NM').extent(field_name='location__point') e3 = aggs['location__point__extent'] # The tolerance value is to four decimal places because of differences # between the Oracle and PostGIS spatial backends on the extent calculation. tol = 4 for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]: for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol) @no_mysql def test04b_related_union_aggregate(self): "Testing the `unionagg` GeoQuerySet aggregates on related geographic models." # This combines the Extent and Union aggregates into one query aggs = City.objects.aggregate(Union('location__point')) # These are the points that are components of the aggregate geographic # union that is returned. Each point # corresponds to City PK. p1 = Point(-104.528056, 33.387222) p2 = Point(-97.516111, 33.058333) p3 = Point(-79.460734, 40.18476) p4 = Point(-96.801611, 32.782057) p5 = Point(-95.363151, 29.763374) # Creating the reference union geometry depending on the spatial backend, # as Oracle will have a different internal ordering of the component # geometries than PostGIS. The second union aggregate is for a union # query that includes limiting information in the WHERE clause (in other # words a `.filter()` precedes the call to `.unionagg()`). if oracle: ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326) ref_u2 = MultiPoint(p3, p2, srid=4326) else: # Looks like PostGIS points by longitude value. ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326) ref_u2 = MultiPoint(p2, p3, srid=4326) u1 = City.objects.unionagg(field_name='location__point') u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point') u3 = aggs['location__point__union'] self.assertEqual(ref_u1, u1) self.assertEqual(ref_u2, u2) self.assertEqual(ref_u1, u3) def test05_select_related_fk_to_subclass(self): "Testing that calling select_related on a query over a model with an FK to a model subclass works" # Regression test for #9752. l = list(DirectoryEntry.objects.all().select_related()) def test06_f_expressions(self): "Testing F() expressions on GeometryFields." # Constructing a dummy parcel border and getting the City instance for # assigning the FK. b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326) pcity = City.objects.get(name='Aurora') # First parcel has incorrect center point that is equal to the City; # it also has a second border that is different from the first as a # 100ft buffer around the City. c1 = pcity.location.point c2 = c1.transform(2276, clone=True) b2 = c2.buffer(100) p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2) # Now creating a second Parcel where the borders are the same, just # in different coordinate systems. The center points are also the # the same (but in different coordinate systems), and this time they # actually correspond to the centroid of the border. c1 = b1.centroid c2 = c1.transform(2276, clone=True) p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1) # Should return the second Parcel, which has the center within the # border. qs = Parcel.objects.filter(center1__within=F('border1')) self.assertEqual(1, len(qs)) self.assertEqual('P2', qs[0].name) if not mysql: # This time center2 is in a different coordinate system and needs # to be wrapped in transformation SQL. qs = Parcel.objects.filter(center2__within=F('border1')) self.assertEqual(1, len(qs)) self.assertEqual('P2', qs[0].name) # Should return the first Parcel, which has the center point equal # to the point in the City ForeignKey. qs = Parcel.objects.filter(center1=F('city__location__point')) self.assertEqual(1, len(qs)) self.assertEqual('P1', qs[0].name) if not mysql: # This time the city column should be wrapped in transformation SQL. qs = Parcel.objects.filter(border2__contains=F('city__location__point')) self.assertEqual(1, len(qs)) self.assertEqual('P1', qs[0].name) def test07_values(self): "Testing values() and values_list() and GeoQuerySets." # GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively. gqs = Location.objects.all() gvqs = Location.objects.values() gvlqs = Location.objects.values_list() # Incrementing through each of the models, dictionaries, and tuples # returned by the different types of GeoQuerySets. for m, d, t in zip(gqs, gvqs, gvlqs): # The values should be Geometry objects and not raw strings returned # by the spatial database. self.assertTrue(isinstance(d['point'], Geometry)) self.assertTrue(isinstance(t[1], Geometry)) self.assertEqual(m.point, d['point']) self.assertEqual(m.point, t[1]) def test08_defer_only(self): "Testing defer() and only() on Geographic models." qs = Location.objects.all() def_qs = Location.objects.defer('point') for loc, def_loc in zip(qs, def_qs): self.assertEqual(loc.point, def_loc.point) def test09_pk_relations(self): "Ensuring correct primary key column is selected across relations. See #10757." # The expected ID values -- notice the last two location IDs # are out of order. Dallas and Houston have location IDs that differ # from their PKs -- this is done to ensure that the related location # ID column is selected instead of ID column for the city. city_ids = (1, 2, 3, 4, 5) loc_ids = (1, 2, 3, 5, 4) ids_qs = City.objects.order_by('id').values('id', 'location__id') for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids): self.assertEqual(val_dict['id'], c_id) self.assertEqual(val_dict['location__id'], l_id) def test10_combine(self): "Testing the combination of two GeoQuerySets. See #10807." buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1) buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1) qs1 = City.objects.filter(location__point__within=buf1) qs2 = City.objects.filter(location__point__within=buf2) combined = qs1 | qs2 names = [c.name for c in combined] self.assertEqual(2, len(names)) self.assertTrue('Aurora' in names) self.assertTrue('Kecksburg' in names) def test11_geoquery_pickle(self): "Ensuring GeoQuery objects are unpickled correctly. See #10839." import pickle from django.contrib.gis.db.models.sql import GeoQuery qs = City.objects.all() q_str = pickle.dumps(qs.query) q = pickle.loads(q_str) self.assertEqual(GeoQuery, q.__class__) # TODO: fix on Oracle -- get the following error because the SQL is ordered # by a geometry object, which Oracle apparently doesn't like: # ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type @no_oracle def test12a_count(self): "Testing `Count` aggregate use with the `GeoManager` on geo-fields." # The City, 'Fort Worth' uses the same location as Dallas. dallas = City.objects.get(name='Dallas') # Count annotation should be 2 for the Dallas location now. loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id) self.assertEqual(2, loc.num_cities) def test12b_count(self): "Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087." # Should only be one author (Trevor Paglen) returned by this query, and # the annotation should have 3 for the number of books, see #11087. # Also testing with a `GeoValuesQuerySet`, see #11489. qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1) vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1) self.assertEqual(1, len(qs)) self.assertEqual(3, qs[0].num_books) self.assertEqual(1, len(vqs)) self.assertEqual(3, vqs[0]['num_books']) # TODO: The phantom model does appear on Oracle. @no_oracle def test13_select_related_null_fk(self): "Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381." no_author = Book.objects.create(title='Without Author') b = Book.objects.select_related('author').get(title='Without Author') # Should be `None`, and not a 'dummy' model. self.assertEqual(None, b.author) @no_mysql @no_oracle @no_spatialite def test14_collect(self): "Testing the `collect` GeoQuerySet method and `Collect` aggregate." # Reference query: # SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN # "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id") # WHERE "relatedapp_city"."state" = 'TX'; ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)') c1 = City.objects.filter(state='TX').collect(field_name='location__point') c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect'] for coll in (c1, c2): # Even though Dallas and Ft. Worth share same point, Collect doesn't # consolidate -- that's why 4 points in MultiPoint. self.assertEqual(4, len(coll)) self.assertEqual(ref_geom, coll) def test15_invalid_select_related(self): "Testing doing select_related on the related name manager of a unique FK. See #13934." qs = Article.objects.select_related('author__article') # This triggers TypeError when `get_default_columns` has no `local_only` # keyword. The TypeError is swallowed if QuerySet is actually # evaluated as list generation swallows TypeError in CPython. sql = str(qs.query) def test16_annotated_date_queryset(self): "Ensure annotated date querysets work if spatial backend is used. See #14648." birth_years = [dt.year for dt in list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))] birth_years.sort() self.assertEqual([1950, 1974], birth_years) # TODO: Related tests for KML, GML, and distance lookups.
bsd-3-clause
rjsmethurst/starpy
fluxes.py
2
2701
import numpy as N import scipy as S from scipy.integrate import simps from scipy import interpolate import pylab as P """ Functions to calculate the colours at every time step in a defined SFH using a specified extracted SPS model. """ model = str(raw_input('Tell me again the location of the extracted (.ised_ASCII) SPS model to use to predict the u-r and NUV-u colours, e.g. ~/extracted_bc2003_lr_m62_chab_ssp.ised_ASCII :')) data = N.loadtxt(model) model_ages = data[0,1:] model_lambda = data[1:,0] model_fluxes = data[1:,1:] time = N.arange(0, 0.01, 0.003) t = N.linspace(0,14.0,100) time_steps = N.append(time, t[1:])*1E9 #First mask the ages of the very young stars hidden in birth clouds mask = model_ages[model_ages<4E6] model_fluxes[:,0:len(mask)] = 0.0 # Calculate the fluxes at the ages specified by the time steps rather than in the models using numpy/scipy array manipulations rather than a for loop f = interpolate.interp2d(model_ages, model_lambda, model_fluxes) interp_fluxes_sim = f(time_steps, model_lambda) def assign_total_flux(model_ages, model_lambda, model_fluxes, time_steps, sim_SFR): ## Produce the array to keep track of the ages of the fractional SFR at each time step frac_sfr = sim_SFR/sim_SFR[0] fraction_array = S.linalg.toeplitz(frac_sfr, N.zeros_like(frac_sfr)).T # Produce the array to keep track of the ages of the mass fraction of stars formed at each time step m_array = (sim_SFR.T)*(N.append(1, N.diff(time_steps))) mass_array = S.linalg.toeplitz(m_array, N.zeros_like(frac_sfr)).T # Produce the array to keep track of the fraction of flux produced at each timestep frac_flux_array = fraction_array*mass_array # Calculate the total flux contributed by all of the fractions at each time step by summing across all wavelength values flux_lambda = frac_flux_array*(N.split(interp_fluxes_sim.T, len(model_lambda), axis=1)) total_flux = (N.sum(flux_lambda, axis=1)).T # Array of dimensions (len(timesteps), len(model_lambda)) return total_flux def calculate_AB_mag(time_steps, model_lambda, sim_flux, wave, trans): lambda_filter1 = [i for i in model_lambda if i > wave[0] and i < wave[len(wave)-1]] lambda_filter2 = N.append(wave[0], lambda_filter1) lambda_filter = N.append(lambda_filter2, wave[len(wave)-1]) f = interpolate.interp2d(model_lambda, time_steps, sim_flux) flux_filter = (f(lambda_filter, time_steps)) trans_filter = N.interp(lambda_filter, wave, trans) top = N.trapz((lambda_filter*flux_filter*trans_filter), lambda_filter, axis=1) bottom = N.trapz(trans_filter/lambda_filter, lambda_filter) m_ab = -2.41 - 2.5*N.log10(top/bottom) return m_ab
apache-2.0
ProjectSWGCore/NGECore2
scripts/mobiles/talus/sleemo_delinquent.py
2
1533
import sys from services.spawn import MobileTemplate from services.spawn import WeaponTemplate from resources.datatables import WeaponType from resources.datatables import Difficulty from resources.datatables import Options from java.util import Vector def addTemplate(core): mobileTemplate = MobileTemplate() mobileTemplate.setCreatureName('sleemo_delinquent') mobileTemplate.setLevel(4) mobileTemplate.setDifficulty(Difficulty.NORMAL) mobileTemplate.setMinSpawnDistance(4) mobileTemplate.setMaxSpawnDistance(8) mobileTemplate.setDeathblow(False) mobileTemplate.setScale(1) mobileTemplate.setSocialGroup("sleemo gang") mobileTemplate.setAssistRange(6) mobileTemplate.setStalker(True) mobileTemplate.setOptionsBitmask(128) templates = Vector() templates.add('object/mobile/shared_dressed_talus_sif_mercenary_rodian_01.iff') mobileTemplate.setTemplates(templates) weaponTemplates = Vector() weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_e11.iff', WeaponType.CARBINE, 1.0, 15, 'energy') weaponTemplates.add(weapontemplate) mobileTemplate.setWeaponTemplateVector(weaponTemplates) attacks = Vector() mobileTemplate.setDefaultAttack('rangedShot') mobileTemplate.setAttacks(attacks) lootPoolNames_1 = ['Junk'] lootPoolChances_1 = [100] lootGroupChance_1 = 100 mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1) core.spawnService.addMobileTemplate('sleemo_delinquent', mobileTemplate) return
lgpl-3.0
davidobrien1985/ansible-modules-extras
database/vertica/vertica_facts.py
148
9176
#!/usr/bin/python # -*- coding: utf-8 -*- # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = """ --- module: vertica_facts version_added: '2.0' short_description: Gathers Vertica database facts. description: - Gathers Vertica database facts. options: cluster: description: - Name of the cluster running the schema. required: false default: localhost port: description: Database port to connect to. required: false default: 5433 db: description: - Name of the database running the schema. required: false default: null login_user: description: - The username used to authenticate with. required: false default: dbadmin login_password: description: - The password used to authenticate with. required: false default: null notes: - The default authentication assumes that you are either logging in as or sudo'ing to the C(dbadmin) account on the host. - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure that C(unixODBC) and C(pyodbc) is installed on the host and properly configured. - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so) to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini) and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16) to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini). requirements: [ 'unixODBC', 'pyodbc' ] author: "Dariusz Owczarek (@dareko)" """ EXAMPLES = """ - name: gathering vertica facts vertica_facts: db=db_name """ try: import pyodbc except ImportError: pyodbc_found = False else: pyodbc_found = True class NotSupportedError(Exception): pass # module specific functions def get_schema_facts(cursor, schema=''): facts = {} cursor.execute(""" select schema_name, schema_owner, create_time from schemata where not is_system_schema and schema_name not in ('public') and (? = '' or schema_name ilike ?) """, schema, schema) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: facts[row.schema_name.lower()] = { 'name': row.schema_name, 'owner': row.schema_owner, 'create_time': str(row.create_time), 'usage_roles': [], 'create_roles': []} cursor.execute(""" select g.object_name as schema_name, r.name as role_name, lower(g.privileges_description) privileges_description from roles r join grants g on g.grantee = r.name and g.object_type='SCHEMA' and g.privileges_description like '%USAGE%' and g.grantee not in ('public', 'dbadmin') and (? = '' or g.object_name ilike ?) """, schema, schema) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: schema_key = row.schema_name.lower() if 'create' in row.privileges_description: facts[schema_key]['create_roles'].append(row.role_name) else: facts[schema_key]['usage_roles'].append(row.role_name) return facts def get_user_facts(cursor, user=''): facts = {} cursor.execute(""" select u.user_name, u.is_locked, u.lock_time, p.password, p.acctexpired as is_expired, u.profile_name, u.resource_pool, u.all_roles, u.default_roles from users u join password_auditor p on p.user_id = u.user_id where not u.is_super_user and (? = '' or u.user_name ilike ?) """, user, user) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: user_key = row.user_name.lower() facts[user_key] = { 'name': row.user_name, 'locked': str(row.is_locked), 'password': row.password, 'expired': str(row.is_expired), 'profile': row.profile_name, 'resource_pool': row.resource_pool, 'roles': [], 'default_roles': []} if row.is_locked: facts[user_key]['locked_time'] = str(row.lock_time) if row.all_roles: facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',') if row.default_roles: facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',') return facts def get_role_facts(cursor, role=''): facts = {} cursor.execute(""" select r.name, r.assigned_roles from roles r where (? = '' or r.name ilike ?) """, role, role) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: role_key = row.name.lower() facts[role_key] = { 'name': row.name, 'assigned_roles': []} if row.assigned_roles: facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',') return facts def get_configuration_facts(cursor, parameter=''): facts = {} cursor.execute(""" select c.parameter_name, c.current_value, c.default_value from configuration_parameters c where c.node_name = 'ALL' and (? = '' or c.parameter_name ilike ?) """, parameter, parameter) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: facts[row.parameter_name.lower()] = { 'parameter_name': row.parameter_name, 'current_value': row.current_value, 'default_value': row.default_value} return facts def get_node_facts(cursor, schema=''): facts = {} cursor.execute(""" select node_name, node_address, export_address, node_state, node_type, catalog_path from nodes """) while True: rows = cursor.fetchmany(100) if not rows: break for row in rows: facts[row.node_address] = { 'node_name': row.node_name, 'export_address': row.export_address, 'node_state': row.node_state, 'node_type': row.node_type, 'catalog_path': row.catalog_path} return facts # module logic def main(): module = AnsibleModule( argument_spec=dict( cluster=dict(default='localhost'), port=dict(default='5433'), db=dict(default=None), login_user=dict(default='dbadmin'), login_password=dict(default=None), ), supports_check_mode = True) if not pyodbc_found: module.fail_json(msg="The python pyodbc module is required.") db = '' if module.params['db']: db = module.params['db'] changed = False try: dsn = ( "Driver=Vertica;" "Server={0};" "Port={1};" "Database={2};" "User={3};" "Password={4};" "ConnectionLoadBalance={5}" ).format(module.params['cluster'], module.params['port'], db, module.params['login_user'], module.params['login_password'], 'true') db_conn = pyodbc.connect(dsn, autocommit=True) cursor = db_conn.cursor() except Exception, e: module.fail_json(msg="Unable to connect to database: {0}.".format(e)) try: schema_facts = get_schema_facts(cursor) user_facts = get_user_facts(cursor) role_facts = get_role_facts(cursor) configuration_facts = get_configuration_facts(cursor) node_facts = get_node_facts(cursor) module.exit_json(changed=False, ansible_facts={'vertica_schemas': schema_facts, 'vertica_users': user_facts, 'vertica_roles': role_facts, 'vertica_configuration': configuration_facts, 'vertica_nodes': node_facts}) except NotSupportedError, e: module.fail_json(msg=str(e)) except SystemExit: # avoid catching this on python 2.4 raise except Exception, e: module.fail_json(msg=e) # import ansible utilities from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
sametmax/Django--an-app-at-a-time
ignore_this_directory/django/conf/locale/ru/formats.py
79
1202
# This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j E Y ะณ.' TIME_FORMAT = 'G:i' DATETIME_FORMAT = 'j E Y ะณ. G:i' YEAR_MONTH_FORMAT = 'F Y ะณ.' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'd.m.Y' SHORT_DATETIME_FORMAT = 'd.m.Y H:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see https://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d.%m.%Y', # '25.10.2006' '%d.%m.%y', # '25.10.06' ] DATETIME_INPUT_FORMATS = [ '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200' '%d.%m.%y %H:%M', # '25.10.06 14:30' '%d.%m.%y', # '25.10.06' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '\xa0' # non-breaking space NUMBER_GROUPING = 3
mit
CTSRD-SOAAP/chromium-42.0.2311.135
native_client/pnacl/driver/elftools.py
7
5431
#!/usr/bin/python # Copyright (c) 2013 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Tools for parsing ELF headers. import struct from collections import namedtuple from driver_log import DriverOpen, DriverClose, Log, FixArch class ELFHeader(object): ELF_MAGIC = '\x7fELF' ELF_TYPES = { 1: 'REL', # .o 2: 'EXEC', # .exe 3: 'DYN' } # .so ELF_MACHINES = { 3: '386', 8: 'MIPS', 40: 'ARM', 62: 'X86_64' } ELF_OSABI = { 0: 'UNIX', 3: 'LINUX', 123: 'NACL' } ELF_ABI_VER = { 0: 'NONE', 7: 'NACL' } # A list of tuples of pack format and name. 'P' in pack formats will # be replaced by 'I' for 32bit ELF and 'Q' for 64bit ELF. ELF_HEADER_FORMAT = [ ('16s', 'e_ident'), ('H', 'e_type'), ('H', 'e_machine'), ('I', 'e_version'), ('P', 'e_entry'), ('P', 'e_phoff'), ('P', 'e_shoff'), ('I', 'e_flags'), ('H', 'e_ehsize'), ('H', 'e_phentsize'), ('H', 'e_phnum'), ('H', 'e_shentsize'), ('H', 'e_shnum'), ('H', 'e_shstrndx') ] ELFCLASS32 = 1 ELFCLASS64 = 2 Ehdr = namedtuple('Ehdr', ' '.join(name for _, name in ELF_HEADER_FORMAT)) def __init__(self, header, filename): pack_format = ''.join(fmt for fmt, _ in self.ELF_HEADER_FORMAT) e_class = ord(header[4]) if e_class == ELFHeader.ELFCLASS32: pack_format = pack_format.replace('P', 'I') elif e_class == ELFHeader.ELFCLASS64: pack_format = pack_format.replace('P', 'Q') else: Log.Fatal('%s: ELF file has unknown class (%d)', filename, e_class) ehdr = self.Ehdr(*struct.unpack_from(pack_format, header)) e_osabi = ord(header[7]) e_abiver = ord(header[8]) if e_osabi not in ELFHeader.ELF_OSABI: Log.Fatal('%s: ELF file has unknown OS ABI (%d)', filename, e_osabi) if e_abiver not in ELFHeader.ELF_ABI_VER: Log.Fatal('%s: ELF file has unknown ABI version (%d)', filename, e_abiver) if ehdr.e_type not in ELFHeader.ELF_TYPES: Log.Fatal('%s: ELF file has unknown type (%d)', filename, ehdr.e_type) if ehdr.e_machine not in ELFHeader.ELF_MACHINES: Log.Fatal('%s: ELF file has unknown machine type (%d)', filename, ehdr.e_machine) self.type = self.ELF_TYPES[ehdr.e_type] self.machine = self.ELF_MACHINES[ehdr.e_machine] self.osabi = self.ELF_OSABI[e_osabi] self.abiver = self.ELF_ABI_VER[e_abiver] self.arch = FixArch(self.machine) # For convenience self.phoff = ehdr.e_phoff self.phnum = ehdr.e_phnum self.phentsize = ehdr.e_phentsize class ProgramHeader(object): # Note we cannot use the P => I/Q trick we used for ELF header # because the order of members of Elf32_Phdr is different from # Elf64_Phdr's. PROGRAM_HEADER_FORMAT_32 = [ ('I', 'p_type'), ('I', 'p_offset'), ('I', 'p_vaddr'), ('I', 'p_paddr'), ('I', 'p_filesz'), ('I', 'p_memsz'), ('I', 'p_flags'), ('I', 'p_align'), ] PROGRAM_HEADER_FORMAT_64 = [ ('I', 'p_type'), ('I', 'p_flags'), ('Q', 'p_offset'), ('Q', 'p_vaddr'), ('Q', 'p_paddr'), ('Q', 'p_filesz'), ('Q', 'p_memsz'), ('Q', 'p_align'), ] PT_NULL = 0 PT_LOAD = 1 PT_DYNAMIC = 2 PT_INTERP = 3 def __init__(self, header, filename): pack_format = None for program_header_format in [self.PROGRAM_HEADER_FORMAT_32, self.PROGRAM_HEADER_FORMAT_64]: pack_format = ''.join(fmt for fmt, _ in program_header_format) if len(header) == struct.calcsize(pack_format): Phdr = namedtuple( 'Phdr', ' '.join(name for _, name in program_header_format)) phdr = Phdr(*struct.unpack(pack_format, header)) break else: Log.Fatal('%s: Invalid program header size (%d)', filename, len(header)) self.type = phdr.p_type self.offset = phdr.p_offset self.vaddr = phdr.p_vaddr self.paddr = phdr.p_paddr self.filesz = phdr.p_filesz self.memsz = phdr.p_memsz self.flags = phdr.p_flags self.align = phdr.p_align # If the file is not ELF, returns None. # Otherwise, returns an ELFHeader object. def GetELFHeader(filename): fp = DriverOpen(filename, 'rb') # Read max(sizeof(Elf64_Ehdr), sizeof(Elf32_Ehdr)), which is 64 bytes. header = fp.read(64) DriverClose(fp) return DecodeELFHeader(header, filename) def DecodeELFHeader(header, filename): # Pull e_ident, e_type, e_machine if header[0:4] != ELFHeader.ELF_MAGIC: return None return ELFHeader(header, filename) # If the file is not ELF, returns None. # Otherwise, returns a tuple of ELFHeader and list of ProgramHeader objects. def GetELFAndProgramHeaders(filename): ehdr = GetELFHeader(filename) if not ehdr: return None phdrs = [] fp = open(filename, 'rb') fp.seek(ehdr.phoff) for i in xrange(ehdr.phnum): phdrs.append(ProgramHeader(fp.read(ehdr.phentsize), filename)) return (ehdr, phdrs) # filetype.IsELF calls this IsElf. Top-level tools should prefer filetype.IsELF, # both for consistency (i.e., all checks for file type come from that library), # and because its results are cached. def IsELF(filename): return GetELFHeader(filename) is not None
bsd-3-clause
chouseknecht/ansible
test/units/modules/storage/netapp/test_na_ontap_object_store.py
23
7128
# (c) 2019, NetApp, Inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """ unit tests for Ansible module: na_ontap_object_store """ from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json import pytest from units.compat import unittest from units.compat.mock import patch, Mock from ansible.module_utils import basic from ansible.module_utils._text import to_bytes import ansible.module_utils.netapp as netapp_utils from ansible.modules.storage.netapp.na_ontap_object_store \ import NetAppOntapObjectStoreConfig as my_module # module under test if not netapp_utils.has_netapp_lib(): pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') def set_module_args(args): """prepare arguments so that they will be picked up during module creation""" args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access class AnsibleExitJson(Exception): """Exception class to be raised by module.exit_json and caught by the test case""" pass class AnsibleFailJson(Exception): """Exception class to be raised by module.fail_json and caught by the test case""" pass def exit_json(*args, **kwargs): # pylint: disable=unused-argument """function to patch over exit_json; package return data into an exception""" if 'changed' not in kwargs: kwargs['changed'] = False raise AnsibleExitJson(kwargs) def fail_json(*args, **kwargs): # pylint: disable=unused-argument """function to patch over fail_json; package return data into an exception""" kwargs['failed'] = True raise AnsibleFailJson(kwargs) class MockONTAPConnection(object): ''' mock server connection to ONTAP host ''' def __init__(self, kind=None): ''' save arguments ''' self.type = kind self.xml_in = None self.xml_out = None def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument ''' mock invoke_successfully returning xml data ''' self.xml_in = xml if self.type == 'object_store': xml = self.build_object_store_info() elif self.type == 'object_store_fail': raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") self.xml_out = xml return xml @staticmethod def build_object_store_info(): ''' build xml data for object store ''' xml = netapp_utils.zapi.NaElement('xml') data = {'attributes': {'aggr-object-store-config-info': {'object-store-name': 'ansible'} } } xml.translate_struct(data) print(xml.to_string()) return xml class TestMyModule(unittest.TestCase): ''' a group of related Unit Tests ''' def setUp(self): self.mock_module_helper = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) self.mock_module_helper.start() self.addCleanup(self.mock_module_helper.stop) self.server = MockONTAPConnection() # whether to use a mock or a simulator self.onbox = False def set_default_args(self): if self.onbox: hostname = '10.10.10.10' username = 'admin' password = 'password' name = 'ansible' else: hostname = 'hostname' username = 'username' password = 'password' name = 'ansible' return dict({ 'hostname': hostname, 'username': username, 'password': password, 'name': name }) def call_command(self, module_args): ''' utility function to call apply ''' module_args.update(self.set_default_args()) set_module_args(module_args) my_obj = my_module() my_obj.asup_log_for_cserver = Mock(return_value=None) if not self.onbox: # mock the connection my_obj.server = MockONTAPConnection('object_store') with pytest.raises(AnsibleExitJson) as exc: my_obj.apply() return exc.value.args[0]['changed'] def test_module_fail_when_required_args_missing(self): ''' required arguments are reported as errors ''' with pytest.raises(AnsibleFailJson) as exc: set_module_args({}) my_module() print('Info: %s' % exc.value.args[0]['msg']) def test_ensure_object_store_get_called(self): ''' fetching details of object store ''' set_module_args(self.set_default_args()) my_obj = my_module() my_obj.server = self.server assert my_obj.get_aggr_object_store() is not None def test_ensure_get_called_existing(self): ''' test for existing object store''' set_module_args(self.set_default_args()) my_obj = my_module() my_obj.server = MockONTAPConnection(kind='object_store') assert my_obj.get_aggr_object_store() def test_object_store_create(self): ''' test for creating object store''' module_args = { 'provider_type': 'abc', 'server': 'abc', 'container': 'abc', 'access_key': 'abc', 'secret_password': 'abc' } module_args.update(self.set_default_args()) set_module_args(module_args) my_obj = my_module() my_obj.asup_log_for_cserver = Mock(return_value=None) if not self.onbox: # mock the connection my_obj.server = MockONTAPConnection(kind='object_store') with pytest.raises(AnsibleExitJson) as exc: my_obj.apply() assert not exc.value.args[0]['changed'] def test_object_store_delete(self): ''' test for deleting object store''' module_args = { 'state': 'absent', } changed = self.call_command(module_args) assert changed def test_if_all_methods_catch_exception(self): module_args = { 'provider_type': 'abc', 'server': 'abc', 'container': 'abc', 'access_key': 'abc', 'secret_password': 'abc' } module_args.update(self.set_default_args()) set_module_args(module_args) my_obj = my_module() if not self.onbox: my_obj.server = MockONTAPConnection('object_store_fail') with pytest.raises(AnsibleFailJson) as exc: my_obj.get_aggr_object_store() assert '' in exc.value.args[0]['msg'] with pytest.raises(AnsibleFailJson) as exc: my_obj.create_aggr_object_store() assert 'Error provisioning object store config ' in exc.value.args[0]['msg'] with pytest.raises(AnsibleFailJson) as exc: my_obj.delete_aggr_object_store() assert 'Error removing object store config ' in exc.value.args[0]['msg']
gpl-3.0
emilkjer/django-model-utils
setup.py
1
1491
from setuptools import setup, find_packages import subprocess import os.path try: # don't get confused if our sdist is unzipped in a subdir of some # other hg repo if os.path.isdir('.hg'): p = subprocess.Popen(['hg', 'parents', r'--template={rev}\n'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) if not p.returncode: fh = open('HGREV', 'w') fh.write(p.communicate()[0].splitlines()[0]) fh.close() except (OSError, IndexError): pass try: hgrev = open('HGREV').read() except IOError: hgrev = '' long_description = (open('README.rst').read() + open('CHANGES.rst').read() + open('TODO.rst').read()) setup( name='django-model-utils', version='1.1.0.post%s' % hgrev, description='Django model mixins and utilities', long_description=long_description, author='Carl Meyer', author_email='carl@dirtcircle.com', url='http://bitbucket.org/carljm/django-model-utils/', packages=find_packages(), classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Framework :: Django', ], zip_safe=False, tests_require=["Django>=1.1"], test_suite='runtests.runtests' )
bsd-3-clause
nzavagli/UnrealPy
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/django/utils/tzinfo.py
82
3932
"Implementation of tzinfo classes for use with datetime.datetime." from __future__ import unicode_literals import time import warnings from datetime import timedelta, tzinfo from django.utils.deprecation import RemovedInDjango19Warning from django.utils.encoding import ( DEFAULT_LOCALE_ENCODING, force_str, force_text, ) warnings.warn( "django.utils.tzinfo will be removed in Django 1.9. " "Use django.utils.timezone instead.", RemovedInDjango19Warning, stacklevel=2) # Python's doc say: "A tzinfo subclass must have an __init__() method that can # be called with no arguments". FixedOffset and LocalTimezone don't honor this # requirement. Defining __getinitargs__ is sufficient to fix copy/deepcopy as # well as pickling/unpickling. class FixedOffset(tzinfo): "Fixed offset in minutes east from UTC." def __init__(self, offset): warnings.warn( "django.utils.tzinfo.FixedOffset will be removed in Django 1.9. " "Use django.utils.timezone.get_fixed_timezone instead.", RemovedInDjango19Warning) if isinstance(offset, timedelta): self.__offset = offset offset = self.__offset.seconds // 60 else: self.__offset = timedelta(minutes=offset) sign = '-' if offset < 0 else '+' self.__name = "%s%02d%02d" % (sign, abs(offset) / 60., abs(offset) % 60) def __repr__(self): return self.__name def __getinitargs__(self): return self.__offset, def utcoffset(self, dt): return self.__offset def tzname(self, dt): return self.__name def dst(self, dt): return timedelta(0) # This implementation is used for display purposes. It uses an approximation # for DST computations on dates >= 2038. # A similar implementation exists in django.utils.timezone. It's used for # timezone support (when USE_TZ = True) and focuses on correctness. class LocalTimezone(tzinfo): "Proxy timezone information from time module." def __init__(self, dt): warnings.warn( "django.utils.tzinfo.LocalTimezone will be removed in Django 1.9. " "Use django.utils.timezone.get_default_timezone instead.", RemovedInDjango19Warning) tzinfo.__init__(self) self.__dt = dt self._tzname = self.tzname(dt) def __repr__(self): return force_str(self._tzname) def __getinitargs__(self): return self.__dt, def utcoffset(self, dt): if self._isdst(dt): return timedelta(seconds=-time.altzone) else: return timedelta(seconds=-time.timezone) def dst(self, dt): if self._isdst(dt): return timedelta(seconds=-time.altzone) - timedelta(seconds=-time.timezone) else: return timedelta(0) def tzname(self, dt): is_dst = False if dt is None else self._isdst(dt) try: return force_text(time.tzname[is_dst], DEFAULT_LOCALE_ENCODING) except UnicodeDecodeError: return None def _isdst(self, dt): tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, 0) try: stamp = time.mktime(tt) except (OverflowError, ValueError): # 32 bit systems can't handle dates after Jan 2038, and certain # systems can't handle dates before ~1901-12-01: # # >>> time.mktime((1900, 1, 13, 0, 0, 0, 0, 0, 0)) # OverflowError: mktime argument out of range # >>> time.mktime((1850, 1, 13, 0, 0, 0, 0, 0, 0)) # ValueError: year out of range # # In this case, we fake the date, because we only care about the # DST flag. tt = (2037,) + tt[1:] stamp = time.mktime(tt) tt = time.localtime(stamp) return tt.tm_isdst > 0
mit
fAntel/pygroot
src/pygroot.py
1
5085
#!/usr/bin/python # # pygroot.py # # Author: # keldzh <keldzh@gmail.com> # # Copyright (c) 2015 Anton Kovalyov # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http:#www.gnu.org/licenses/>. import sys import string import argparse from config import NAME from config import VERSION lex = None def print_error(msg, is_exit): print(NAME + ": ", filename, "(", lineno, "): ", msg) if is_exit: sys.exit(1) class lexer: def __init__(self, filename=None): self.tokens = { "iamgroot": "inc", "IamGroot": "dec", "IAMGROOOT": "out", "IAMGROOT": "right", "Iamgroot": "left", "Iamgrooot": "inp", "I'mGroot": "jump", "WeareGroot": "jump_back"} self.lineno = 0 self.filename = filename if filename is not None: self.f = open(filename) def get_token(self): while True: lexema = self.f.readline() self.lineno += 1 if lexema == '': return None lexema = lexema.split('#', 2)[0] if ''.join(lexema.split()) != '': break token = self.tokens[''.join(lexema.split())] if token is None: raise KeyError("unknown command '" + lexema + "'") return token class executor: def __init__(self): self.ptr = int() self.memory = [0] self.conveyor = None def inc(self): self.memory[self.ptr] += 1 def dec(self): self.memory[self.ptr] -= 1 def out(self): print(chr(self.memory[self.ptr])) def right(self): self.ptr += 1 if self.ptr >= len(self.memory): self.memory.append(int()) def left(self): if self.ptr > 0: self.ptr -= 1 else: print_error( "warning: program is trying to move pointer in position befor beginning of memory", False) def inp(self): self.memory[self.ptr] = ord(sys.stdin.read(1)) def jump(self): if self.memory[self.ptr] == 0: self.conveyor.next_jump_back() def jump_back(self): if self.memory[self.ptr] != 0: if self.conveyor is not None: self.conveyor.prev_jump() else: raise SyntaxError("There is no 'I'm Groot' previously than 'We are Groot'") def run(self): try: token = lex.get_token() while token is not None: if token == "jump": self.conveyor = conveyor() while not self.conveyor.is_end(): (getattr(self, self.conveyor.get_command()))() self.conveyor.next() del self.conveyor self.conveyor = None else: (getattr(self, token))() token = lex.get_token() except SyntaxError as e: print_error(str(e), True) except KeyError as e: print_error(str(e), True) except BaseException as e: print_error(str(e), True) class conveyor: def __init__(self, test=False): # for testing if test is True: self.cmds = None self.cmd_ptr = None return self.cmds = ["jump"] self.cmd_ptr = 0 level = 0 token = lex.get_token() while token is not None: self.cmds.append(token) if token == "jump": level += 1 elif token == "jump_back": if level > 0: level -= 1 else: break token = lex.get_token() if token is None: raise SyntaxError("there is no closing 'We are Groot'") def next(self): self.cmd_ptr += 1 def get_command(self): return self.cmds[self.cmd_ptr] def is_end(self): return True if self.cmd_ptr >= len(self.cmds) else False def next_jump_back(self): self.cmd_ptr += 1 level = 0 while self.cmds[self.cmd_ptr] != "jump_back" or level != 0: if self.cmds[self.cmd_ptr] == "jump": level += 1 elif self.cmds[self.cmd_ptr] == "jump_back": level -= 1 self.cmd_ptr += 1 def prev_jump(self): level = 0 self.cmd_ptr -= 1 while self.cmd_ptr >= 0: if self.cmds[self.cmd_ptr] == "jump_back": level += 1 elif self.cmds[self.cmd_ptr] == "jump": if level == 0: return else: level -= 1 self.cmd_ptr -= 1 if __name__ == "__main__": parser = argparse.ArgumentParser( prog=NAME, description="The Groot Programming Language interpreter", epilog="Report bugs to: <keldzh@gmail.com>", formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("FILE", help="path to the file with program on Groot") parser.add_argument( "--version", action="version", version=NAME + " " + VERSION + """ Copyright (C) 2015 Anton Kovalyov License GPLv3: GNU GPL version 3 or later <http://www.gnu.org/licenses/gpl-3.0.html> This program comes with ABSOLUTELY NO WARRANTY, to the extent permitted by law. This is free software, and you are welcome to redistribute it under certain conditions.""") args = parser.parse_args() lex = lexer(args.FILE) e = executor() e.run()
gpl-3.0
springcoil/pymc3
pymc3/tests/test_variational_inference.py
1
28177
import pytest import functools import operator import numpy as np from theano import theano, tensor as tt import pymc3 as pm import pymc3.util from pymc3.theanof import change_flags from pymc3.variational.approximations import ( MeanFieldGroup, FullRankGroup, NormalizingFlowGroup, EmpiricalGroup, MeanField, FullRank, NormalizingFlow, Empirical ) from pymc3.variational.inference import ( ADVI, FullRankADVI, SVGD, NFVI, ASVGD, fit ) from pymc3.variational import flows from pymc3.variational.opvi import Approximation, Group from . import models from .helpers import not_raises pytestmark = pytest.mark.usefixtures( 'strict_float32', 'seeded_test' ) @pytest.mark.parametrize( 'diff', [ 'relative', 'absolute' ] ) @pytest.mark.parametrize( 'ord', [1, 2, np.inf] ) def test_callbacks_convergence(diff, ord): cb = pm.variational.callbacks.CheckParametersConvergence(every=1, diff=diff, ord=ord) class _approx: params = (theano.shared(np.asarray([1, 2, 3])), ) approx = _approx() with pytest.raises(StopIteration): cb(approx, None, 1) cb(approx, None, 10) def test_tracker_callback(): import time tracker = pm.callbacks.Tracker( ints=lambda *t: t[-1], ints2=lambda ap, h, j: j, time=time.time, ) for i in range(10): tracker(None, None, i) assert 'time' in tracker.hist assert 'ints' in tracker.hist assert 'ints2' in tracker.hist assert (len(tracker['ints']) == len(tracker['ints2']) == len(tracker['time']) == 10) assert tracker['ints'] == tracker['ints2'] == list(range(10)) tracker = pm.callbacks.Tracker( bad=lambda t: t # bad signature ) with pytest.raises(TypeError): tracker(None, None, 1) @pytest.fixture('module') def three_var_model(): with pm.Model() as model: pm.HalfNormal('one', shape=(10, 2), total_size=100) pm.Normal('two', shape=(10, )) pm.Normal('three', shape=(10, 1, 2)) return model @pytest.mark.parametrize( ['raises', 'grouping'], [ (not_raises(), {MeanFieldGroup: None}), (not_raises(), {FullRankGroup: None, MeanFieldGroup: ['one']}), (not_raises(), {MeanFieldGroup: ['one'], FullRankGroup: ['two'], NormalizingFlowGroup: ['three']}), (pytest.raises(TypeError, match='Found duplicates'), {MeanFieldGroup: ['one'], FullRankGroup: ['two', 'one'], NormalizingFlowGroup: ['three']}), (pytest.raises(TypeError, match='No approximation is specified'), {MeanFieldGroup: ['one', 'two']}), (not_raises(), {MeanFieldGroup: ['one'], FullRankGroup: ['two', 'three']}), ] ) def test_init_groups(three_var_model, raises, grouping): with raises, three_var_model: approxes, groups = zip(*grouping.items()) groups = [list(map(functools.partial(getattr, three_var_model), g)) if g is not None else None for g in groups] inited_groups = [a(group=g) for a, g in zip(approxes, groups)] approx = Approximation(inited_groups) for ig, g in zip(inited_groups, groups): if g is None: pass else: assert set(pm.util.get_transformed(z) for z in g) == set(ig.group) else: assert approx.ndim == three_var_model.ndim @pytest.fixture(params=[ ({}, {MeanFieldGroup: (None, {})}), ({}, {FullRankGroup: (None, {}), MeanFieldGroup: (['one'], {})}), ({}, {MeanFieldGroup: (['one'], {}), FullRankGroup: (['two'], {}), NormalizingFlowGroup: (['three'], {'flow': 'scale-hh*2-planar-radial-loc'})}), ({}, {MeanFieldGroup: (['one'], {}), FullRankGroup: (['two', 'three'], {})}), ({}, {MeanFieldGroup: (['one'], {}), EmpiricalGroup: (['two', 'three'], {'size': 100})}) ], ids=lambda t: ', '.join('%s: %s' % (k.__name__, v[0]) for k, v in t[1].items()) ) def three_var_groups(request, three_var_model): kw, grouping = request.param approxes, groups = zip(*grouping.items()) groups, gkwargs = zip(*groups) groups = [list(map(functools.partial(getattr, three_var_model), g)) if g is not None else None for g in groups] inited_groups = [a(group=g, model=three_var_model, **gk) for a, g, gk in zip(approxes, groups, gkwargs)] return inited_groups @pytest.fixture def three_var_approx(three_var_model, three_var_groups): approx = Approximation(three_var_groups, model=three_var_model) return approx def test_sample_simple(three_var_approx): trace = three_var_approx.sample(500) assert set(trace.varnames) == {'one', 'one_log__', 'three', 'two'} assert len(trace) == 500 assert trace[0]['one'].shape == (10, 2) assert trace[0]['two'].shape == (10, ) assert trace[0]['three'].shape == (10, 1, 2) @pytest.fixture def aevb_initial(): return theano.shared(np.random.rand(3, 7).astype('float32')) @pytest.fixture( params=[ (MeanFieldGroup, {}), (FullRankGroup, {}), (NormalizingFlowGroup, {'flow': 'scale'}), (NormalizingFlowGroup, {'flow': 'loc'}), (NormalizingFlowGroup, {'flow': 'hh'}), (NormalizingFlowGroup, {'flow': 'planar'}), (NormalizingFlowGroup, {'flow': 'radial'}), (NormalizingFlowGroup, {'flow': 'radial-loc'}) ], ids=lambda t: '{c} : {d}'.format(c=t[0].__name__, d=t[1]) ) def parametric_grouped_approxes(request): return request.param @pytest.fixture def three_var_aevb_groups(parametric_grouped_approxes, three_var_model, aevb_initial): dsize = np.prod(pymc3.util.get_transformed(three_var_model.one).dshape[1:]) cls, kw = parametric_grouped_approxes spec = cls.get_param_spec_for(d=dsize, **kw) params = dict() for k, v in spec.items(): if isinstance(k, int): params[k] = dict() for k_i, v_i in v.items(): params[k][k_i] = aevb_initial.dot(np.random.rand(7, *v_i).astype('float32')) else: params[k] = aevb_initial.dot(np.random.rand(7, *v).astype('float32')) aevb_g = cls([three_var_model.one], params=params, model=three_var_model, local=True) return [aevb_g, MeanFieldGroup(None, model=three_var_model)] @pytest.fixture def three_var_aevb_approx(three_var_model, three_var_aevb_groups): approx = Approximation(three_var_aevb_groups, model=three_var_model) return approx def test_sample_aevb(three_var_aevb_approx, aevb_initial): pm.KLqp(three_var_aevb_approx).fit(1, more_replacements={ aevb_initial: np.zeros_like(aevb_initial.get_value())[:1] }) aevb_initial.set_value(np.random.rand(7, 7).astype('float32')) trace = three_var_aevb_approx.sample(500) assert set(trace.varnames) == {'one', 'one_log__', 'two', 'three'} assert len(trace) == 500 assert trace[0]['one'].shape == (7, 2) assert trace[0]['two'].shape == (10, ) assert trace[0]['three'].shape == (10, 1, 2) aevb_initial.set_value(np.random.rand(13, 7).astype('float32')) trace = three_var_aevb_approx.sample(500) assert set(trace.varnames) == {'one', 'one_log__', 'two', 'three'} assert len(trace) == 500 assert trace[0]['one'].shape == (13, 2) assert trace[0]['two'].shape == (10,) assert trace[0]['three'].shape == (10, 1, 2) def test_replacements_in_sample_node_aevb(three_var_aevb_approx, aevb_initial): inp = tt.matrix(dtype='float32') three_var_aevb_approx.sample_node( three_var_aevb_approx.model.one, 2, more_replacements={aevb_initial: inp}).eval({inp: np.random.rand(7, 7).astype('float32')}) three_var_aevb_approx.sample_node( three_var_aevb_approx.model.one, None, more_replacements={aevb_initial: inp}).eval({inp: np.random.rand(7, 7).astype('float32')}) def test_vae(): minibatch_size = 10 data = pm.floatX(np.random.rand(100)) x_mini = pm.Minibatch(data, minibatch_size) x_inp = tt.vector() x_inp.tag.test_value = data[:minibatch_size] ae = theano.shared(pm.floatX([.1, .1])) be = theano.shared(pm.floatX(1.)) ad = theano.shared(pm.floatX(1.)) bd = theano.shared(pm.floatX(1.)) enc = x_inp.dimshuffle(0, 'x') * ae.dimshuffle('x', 0) + be mu, rho = enc[:, 0], enc[:, 1] with pm.Model(): # Hidden variables zs = pm.Normal('zs', mu=0, sd=1, shape=minibatch_size) dec = zs * ad + bd # Observation model pm.Normal('xs_', mu=dec, sd=0.1, observed=x_inp) pm.fit(1, local_rv={zs: dict(mu=mu, rho=rho)}, more_replacements={x_inp: x_mini}, more_obj_params=[ae, be, ad, bd]) def test_logq_mini_1_sample_1_var(parametric_grouped_approxes, three_var_model): cls, kw = parametric_grouped_approxes approx = cls([three_var_model.one], model=three_var_model, **kw) logq = approx.logq logq = approx.set_size_and_deterministic(logq, 1, 0) logq.eval() def test_logq_mini_2_sample_2_var(parametric_grouped_approxes, three_var_model): cls, kw = parametric_grouped_approxes approx = cls([three_var_model.one, three_var_model.two], model=three_var_model, **kw) logq = approx.logq logq = approx.set_size_and_deterministic(logq, 2, 0) logq.eval() def test_logq_mini_sample_aevb(three_var_aevb_groups): approx = three_var_aevb_groups[0] logq, symbolic_logq = approx.set_size_and_deterministic([approx.logq, approx.symbolic_logq], 3, 0) e = logq.eval() es = symbolic_logq.eval() assert e.shape == () assert es.shape == (3,) def test_logq_aevb(three_var_aevb_approx): approx = three_var_aevb_approx logq, symbolic_logq = approx.set_size_and_deterministic([approx.logq, approx.symbolic_logq], 1, 0) e = logq.eval() es = symbolic_logq.eval() assert e.shape == () assert es.shape == (1,) logq, symbolic_logq = approx.set_size_and_deterministic([approx.logq, approx.symbolic_logq], 2, 0) e = logq.eval() es = symbolic_logq.eval() assert e.shape == () assert es.shape == (2,) def test_logq_globals(three_var_approx): if not three_var_approx.has_logq: pytest.skip('%s does not implement logq' % three_var_approx) approx = three_var_approx logq, symbolic_logq = approx.set_size_and_deterministic([approx.logq, approx.symbolic_logq], 1, 0) e = logq.eval() es = symbolic_logq.eval() assert e.shape == () assert es.shape == (1,) logq, symbolic_logq = approx.set_size_and_deterministic([approx.logq, approx.symbolic_logq], 2, 0) e = logq.eval() es = symbolic_logq.eval() assert e.shape == () assert es.shape == (2,) @pytest.mark.parametrize( 'raises, vfam, type_, kw', [ (not_raises(), 'mean_field', MeanFieldGroup, {}), (not_raises(), 'mf', MeanFieldGroup, {}), (not_raises(), 'full_rank', FullRankGroup, {}), (not_raises(), 'fr', FullRankGroup, {}), (not_raises(), 'FR', FullRankGroup, {}), (not_raises(), 'loc', NormalizingFlowGroup, {}), (not_raises(), 'scale', NormalizingFlowGroup, {}), (not_raises(), 'hh', NormalizingFlowGroup, {}), (not_raises(), 'planar', NormalizingFlowGroup, {}), (not_raises(), 'radial', NormalizingFlowGroup, {}), (not_raises(), 'scale-loc', NormalizingFlowGroup, {}), (pytest.raises(ValueError, match='Need `trace` or `size`'), 'empirical', EmpiricalGroup, {}), (not_raises(), 'empirical', EmpiricalGroup, {'size': 100}), ] ) def test_group_api_vfam(three_var_model, raises, vfam, type_, kw): with three_var_model, raises: g = Group([three_var_model.one], vfam, **kw) assert isinstance(g, type_) assert not hasattr(g, '_kwargs') if isinstance(g, NormalizingFlowGroup): assert isinstance(g.flow, pm.flows.AbstractFlow) assert g.flow.formula == vfam @pytest.mark.parametrize( 'raises, params, type_, kw, formula', [ (not_raises(), dict(mu=np.ones((10, 2), 'float32'), rho=np.ones((10, 2), 'float32')), MeanFieldGroup, {}, None), (not_raises(), dict(mu=np.ones((10, 2), 'float32'), L_tril=np.ones( FullRankGroup.get_param_spec_for(d=np.prod((10, 2)))['L_tril'], 'float32' )), FullRankGroup, {}, None), (not_raises(), {0: dict(loc=np.ones((10, 2), 'float32'))}, NormalizingFlowGroup, {}, 'loc'), (not_raises(), {0: dict(rho=np.ones((10, 2), 'float32'))}, NormalizingFlowGroup, {}, 'scale'), (not_raises(), {0: dict(v=np.ones((10, 2), 'float32'),)}, NormalizingFlowGroup, {}, 'hh'), (not_raises(), {0: dict(u=np.ones((10, 2), 'float32'), w=np.ones((10, 2), 'float32'), b=1.)}, NormalizingFlowGroup, {}, 'planar'), (not_raises(), {0: dict(z_ref=np.ones((10, 2), 'float32'), a=1., b=1.)}, NormalizingFlowGroup, {}, 'radial'), (not_raises(), {0: dict(rho=np.ones((10, 2), 'float32')), 1: dict(loc=np.ones((10, 2), 'float32'))}, NormalizingFlowGroup, {}, 'scale-loc'), (not_raises(), dict(histogram=np.ones((20, 10, 2), 'float32')), EmpiricalGroup, {}, None), ] ) def test_group_api_params(three_var_model, raises, params, type_, kw, formula): with three_var_model, raises: g = Group([three_var_model.one], params=params, **kw) assert isinstance(g, type_) if isinstance(g, NormalizingFlowGroup): assert g.flow.formula == formula if g.has_logq: # should work as well logq = g.logq logq = g.set_size_and_deterministic(logq, 1, 0) logq.eval() @pytest.mark.parametrize( 'gcls, approx, kw', [ (MeanFieldGroup, MeanField, {}), (FullRankGroup, FullRank, {}), (EmpiricalGroup, Empirical, {'size': 100}), (NormalizingFlowGroup, NormalizingFlow, {'flow': 'loc'}), (NormalizingFlowGroup, NormalizingFlow, {'flow': 'scale-loc-scale'}), (NormalizingFlowGroup, NormalizingFlow, {}) ] ) def test_single_group_shortcuts(three_var_model, approx, kw, gcls): with three_var_model: a = approx(**kw) assert isinstance(a, Approximation) assert len(a.groups) == 1 assert isinstance(a.groups[0], gcls) if isinstance(a, NormalizingFlow): assert a.flow.formula == kw.get('flow', NormalizingFlowGroup.default_flow) def test_elbo(): mu0 = 1.5 sigma = 1.0 y_obs = np.array([1.6, 1.4]) post_mu = np.array([1.88], dtype=theano.config.floatX) post_sd = np.array([1], dtype=theano.config.floatX) # Create a model for test with pm.Model() as model: mu = pm.Normal('mu', mu=mu0, sd=sigma) pm.Normal('y', mu=mu, sd=1, observed=y_obs) # Create variational gradient tensor mean_field = MeanField(model=model) with pm.theanof.change_flags(compute_test_value='off'): elbo = -pm.operators.KL(mean_field)()(10000) mean_field.shared_params['mu'].set_value(post_mu) mean_field.shared_params['rho'].set_value(np.log(np.exp(post_sd) - 1)) f = theano.function([], elbo) elbo_mc = f() # Exact value elbo_true = (-0.5 * ( 3 + 3 * post_mu ** 2 - 2 * (y_obs[0] + y_obs[1] + mu0) * post_mu + y_obs[0] ** 2 + y_obs[1] ** 2 + mu0 ** 2 + 3 * np.log(2 * np.pi)) + 0.5 * (np.log(2 * np.pi) + 1)) np.testing.assert_allclose(elbo_mc, elbo_true, rtol=0, atol=1e-1) @pytest.fixture( 'module', params=[True, False], ids=['mini', 'full'] ) def use_minibatch(request): return request.param @pytest.fixture('module') def simple_model_data(use_minibatch): n = 1000 sd0 = 2. mu0 = 4. sd = 3. mu = -5. data = sd * np.random.randn(n) + mu d = n / sd ** 2 + 1 / sd0 ** 2 mu_post = (n * np.mean(data) / sd ** 2 + mu0 / sd0 ** 2) / d if use_minibatch: data = pm.Minibatch(data) return dict( n=n, data=data, mu_post=mu_post, d=d, mu0=mu0, sd0=sd0, sd=sd, ) @pytest.fixture(scope='module') def simple_model(simple_model_data): with pm.Model() as model: mu_ = pm.Normal( 'mu', mu=simple_model_data['mu0'], sd=simple_model_data['sd0'], testval=0) pm.Normal('x', mu=mu_, sd=simple_model_data['sd'], observed=simple_model_data['data'], total_size=simple_model_data['n']) return model @pytest.fixture('module', params=[ dict(cls=NFVI, init=dict(flow='scale-loc')), dict(cls=ADVI, init=dict()), dict(cls=FullRankADVI, init=dict()), dict(cls=SVGD, init=dict(n_particles=500, jitter=1)), dict(cls=ASVGD, init=dict(temperature=1.)), ], ids=[ 'NFVI=scale-loc', 'ADVI', 'FullRankADVI', 'SVGD', 'ASVGD' ]) def inference_spec(request): cls = request.param['cls'] init = request.param['init'] def init_(**kw): k = init.copy() k.update(kw) return cls(**k) init_.cls = cls return init_ @pytest.fixture('function') def inference(inference_spec, simple_model): with simple_model: return inference_spec() @pytest.fixture('function') def fit_kwargs(inference, use_minibatch): _select = { (ADVI, 'full'): dict( obj_optimizer=pm.adagrad_window(learning_rate=0.02, n_win=50), n=5000 ), (ADVI, 'mini'): dict( obj_optimizer=pm.adagrad_window(learning_rate=0.01, n_win=50), n=12000 ), (NFVI, 'full'): dict( obj_optimizer=pm.adagrad_window(learning_rate=0.01, n_win=50), n=12000 ), (NFVI, 'mini'): dict( obj_optimizer=pm.adagrad_window(learning_rate=0.01, n_win=50), n=12000 ), (FullRankADVI, 'full'): dict( obj_optimizer=pm.adagrad_window(learning_rate=0.007, n_win=50), n=6000 ), (FullRankADVI, 'mini'): dict( obj_optimizer=pm.adagrad_window(learning_rate=0.007, n_win=50), n=12000 ), (SVGD, 'full'): dict( obj_optimizer=pm.adagrad_window(learning_rate=0.07, n_win=7), n=300 ), (SVGD, 'mini'): dict( obj_optimizer=pm.adagrad_window(learning_rate=0.07, n_win=7), n=300 ), (ASVGD, 'full'): dict( obj_optimizer=pm.adagrad_window(learning_rate=0.07, n_win=10), n=500, obj_n_mc=300 ), (ASVGD, 'mini'): dict( obj_optimizer=pm.adagrad_window(learning_rate=0.07, n_win=10), n=500, obj_n_mc=300 ) } if use_minibatch: key = 'mini' else: key = 'full' return _select[(type(inference), key)] @pytest.mark.run('first') def test_fit_oo(inference, fit_kwargs, simple_model_data): trace = inference.fit(**fit_kwargs).sample(10000) mu_post = simple_model_data['mu_post'] d = simple_model_data['d'] np.testing.assert_allclose(np.mean(trace['mu']), mu_post, rtol=0.05) np.testing.assert_allclose(np.std(trace['mu']), np.sqrt(1. / d), rtol=0.1) def test_profile(inference): try: inference.run_profiling(n=100).summary() except ZeroDivisionError: # weird error in SVGD, ASVGD pass @pytest.fixture('module') def another_simple_model(): _model = models.simple_model()[1] with _model: pm.Potential('pot', tt.ones((10, 10))) return _model @pytest.fixture(params=[ dict(name='advi', kw=dict(start={})), dict(name='fullrank_advi', kw=dict(start={})), dict(name='svgd', kw=dict(start={}))], ids=lambda d: d['name'] ) def fit_method_with_object(request, another_simple_model): _select = dict( advi=ADVI, fullrank_advi=FullRankADVI, svgd=SVGD ) with another_simple_model: return _select[request.param['name']]( **request.param['kw']) @pytest.mark.parametrize( ['method', 'kwargs', 'error'], [ ('undefined', dict(), KeyError), (1, dict(), TypeError), ('advi', dict(total_grad_norm_constraint=10), None), ('fullrank_advi', dict(), None), ('svgd', dict(total_grad_norm_constraint=10), None), ('svgd', dict(start={}), None), # start argument is not allowed for ASVGD ('asvgd', dict(start={}, total_grad_norm_constraint=10), TypeError), ('asvgd', dict(total_grad_norm_constraint=10), None), ('nfvi', dict(start={}), None), ('nfvi=scale-loc', dict(start={}), None), ('nfvi=bad-formula', dict(start={}), KeyError), ], ) def test_fit_fn_text(method, kwargs, error, another_simple_model): with another_simple_model: if error is not None: with pytest.raises(error): fit(10, method=method, **kwargs) else: fit(10, method=method, **kwargs) @pytest.fixture('module') def aevb_model(): with pm.Model() as model: pm.HalfNormal('x', shape=(2,), total_size=5) pm.Normal('y', shape=(2,)) x = model.x y = model.y mu = theano.shared(x.init_value) rho = theano.shared(np.zeros_like(x.init_value)) return { 'model': model, 'y': y, 'x': x, 'replace': dict(mu=mu, rho=rho) } def test_aevb(inference_spec, aevb_model): # add to inference that supports aevb x = aevb_model['x'] y = aevb_model['y'] model = aevb_model['model'] replace = aevb_model['replace'] with model: try: inference = inference_spec(local_rv={x: {'mu': replace['mu']*5, 'rho': replace['rho']}}) approx = inference.fit(3, obj_n_mc=2, more_obj_params=list(replace.values())) approx.sample(10) approx.sample_node( y, more_replacements={x: np.asarray([1, 1], dtype=x.dtype)} ).eval() except pm.opvi.AEVBInferenceError: pytest.skip('Does not support AEVB') def test_rowwise_approx(three_var_model, parametric_grouped_approxes): # add to inference that supports aevb cls, kw = parametric_grouped_approxes with three_var_model: try: approx = Approximation([cls([three_var_model.one], rowwise=True, **kw), Group(None, vfam='mf')]) inference = pm.KLqp(approx) approx = inference.fit(3, obj_n_mc=2) approx.sample(10) approx.sample_node( three_var_model.one ).eval() except pm.opvi.BatchedGroupError: pytest.skip('Does not support rowwise grouping') @pytest.fixture('module') def binomial_model(): n_samples = 100 xs = np.random.binomial(n=1, p=0.2, size=n_samples) with pm.Model() as model: p = pm.Beta('p', alpha=1, beta=1) pm.Binomial('xs', n=1, p=p, observed=xs) return model @pytest.fixture('module') def binomial_model_inference(binomial_model, inference_spec): with binomial_model: return inference_spec() @pytest.mark.run(after='test_sample_replacements') def test_replacements(binomial_model_inference): d = tt.bscalar() d.tag.test_value = 1 approx = binomial_model_inference.approx p = approx.model.p p_t = p ** 3 p_s = approx.sample_node(p_t) if theano.config.compute_test_value != 'off': assert p_s.tag.test_value.shape == p_t.tag.test_value.shape sampled = [p_s.eval() for _ in range(100)] assert any(map( operator.ne, sampled[1:], sampled[:-1]) ) # stochastic p_d = approx.sample_node(p_t, deterministic=True) sampled = [p_d.eval() for _ in range(100)] assert all(map( operator.eq, sampled[1:], sampled[:-1]) ) # deterministic p_r = approx.sample_node(p_t, deterministic=d) sampled = [p_r.eval({d: 1}) for _ in range(100)] assert all(map( operator.eq, sampled[1:], sampled[:-1]) ) # deterministic sampled = [p_r.eval({d: 0}) for _ in range(100)] assert any(map( operator.ne, sampled[1:], sampled[:-1]) ) # stochastic def test_sample_replacements(binomial_model_inference): i = tt.iscalar() i.tag.test_value = 1 approx = binomial_model_inference.approx p = approx.model.p p_t = p ** 3 p_s = approx.sample_node(p_t, size=100) if theano.config.compute_test_value != 'off': assert p_s.tag.test_value.shape == (100, ) + p_t.tag.test_value.shape sampled = p_s.eval() assert any(map( operator.ne, sampled[1:], sampled[:-1]) ) # stochastic assert sampled.shape[0] == 100 p_d = approx.sample_node(p_t, size=i) sampled = p_d.eval({i: 100}) assert any(map( operator.ne, sampled[1:], sampled[:-1]) ) # deterministic assert sampled.shape[0] == 100 sampled = p_d.eval({i: 101}) assert sampled.shape[0] == 101 def test_empirical_from_trace(another_simple_model): with another_simple_model: step = pm.Metropolis() trace = pm.sample(100, step=step) emp = Empirical(trace) assert emp.histogram.shape[0].eval() == 100 trace = pm.sample(100, step=step, njobs=4) emp = Empirical(trace) assert emp.histogram.shape[0].eval() == 400 @pytest.fixture( params=[ dict(cls=flows.PlanarFlow, init=dict(jitter=.1)), dict(cls=flows.RadialFlow, init=dict(jitter=.1)), dict(cls=flows.ScaleFlow, init=dict(jitter=.1)), dict(cls=flows.LocFlow, init=dict(jitter=.1)), dict(cls=flows.HouseholderFlow, init=dict(jitter=.1)), ], ids=lambda d: d['cls'].__name__ ) def flow_spec(request): cls = request.param['cls'] init = request.param['init'] def init_(**kw): k = init.copy() k.update(kw) return cls(**k) init_.cls = cls return init_ def test_flow_det(flow_spec): z0 = tt.arange(0, 20).astype('float32') flow = flow_spec(dim=20, z0=z0.dimshuffle('x', 0)) with change_flags(compute_test_value='off'): z1 = flow.forward.flatten() J = tt.jacobian(z1, z0) logJdet = tt.log(tt.abs_(tt.nlinalg.det(J))) det = flow.logdet[0] np.testing.assert_allclose(logJdet.eval(), det.eval(), atol=0.0001) def test_flow_det_local(flow_spec): z0 = tt.arange(0, 12).astype('float32') spec = flow_spec.cls.get_param_spec_for(d=12) params = dict() for k, shp in spec.items(): params[k] = np.random.randn(1, *shp).astype('float32') flow = flow_spec(dim=12, z0=z0.reshape((1, 1, 12)), **params) assert flow.batched with change_flags(compute_test_value='off'): z1 = flow.forward.flatten() J = tt.jacobian(z1, z0) logJdet = tt.log(tt.abs_(tt.nlinalg.det(J))) det = flow.logdet[0] np.testing.assert_allclose(logJdet.eval(), det.eval(), atol=0.0001) def test_flows_collect_chain(): initial = tt.ones((3, 2)) flow1 = flows.PlanarFlow(dim=2, z0=initial) flow2 = flows.PlanarFlow(dim=2, z0=flow1) assert len(flow2.params) == 3 assert len(flow2.all_params) == 6 np.testing.assert_allclose(flow1.logdet.eval() + flow2.logdet.eval(), flow2.sum_logdets.eval()) @pytest.mark.parametrize( 'formula,length,order', [ ('planar', 1, [flows.PlanarFlow]), ('planar*2', 2, [flows.PlanarFlow] * 2), ('planar-planar', 2, [flows.PlanarFlow] * 2), ('planar-planar*2', 3, [flows.PlanarFlow] * 3), ('hh-planar*2', 3, [flows.HouseholderFlow]+[flows.PlanarFlow] * 2) ] ) def test_flow_formula(formula, length, order): spec = flows.Formula(formula) flows_list = spec.flows assert len(flows_list) == length if order is not None: assert flows_list == order spec(dim=2, jitter=1)(tt.ones((3, 2))).eval() # should work
apache-2.0
jiguanglizipao/ucore_os_lab
related_info/lab5/process-cpuio-homework.py
49
9708
#! /usr/bin/env python import sys from optparse import OptionParser import random # process switch behavior SCHED_SWITCH_ON_IO = 'SWITCH_ON_IO' # io finished behavior IO_RUN_LATER = 'IO_RUN_LATER' # process states STATE_RUNNING = 'RUNNING' STATE_READY = 'READY' STATE_DONE = 'DONE' STATE_WAIT = 'WAITING' # members of process structure PROC_CODE = 'code_' PROC_PC = 'pc_' PROC_ID = 'pid_' PROC_STATE = 'proc_state_' # things a process can do DO_COMPUTE = 'cpu' DO_YIELD = 'yld' DO_IO = 'io' class scheduler: def __init__(self, process_switch_behavior, io_done_behavior, io_length): # keep set of instructions for each of the processes self.proc_info = {} self.process_switch_behavior = process_switch_behavior self.io_done_behavior = io_done_behavior self.io_length = io_length return def new_process(self): proc_id = len(self.proc_info) self.proc_info[proc_id] = {} self.proc_info[proc_id][PROC_PC] = 0 self.proc_info[proc_id][PROC_ID] = proc_id self.proc_info[proc_id][PROC_CODE] = [] self.proc_info[proc_id][PROC_STATE] = STATE_READY return proc_id def load(self, program_description): proc_id = self.new_process() tmp = program_description.split(':') if len(tmp) != 3: print 'Bad description (%s): Must be number <x:y:z>' print ' where X is the number of instructions' print ' and Y is the percent change that an instruction is YIELD' print ' and Z is the percent change that an instruction is IO' exit(1) num_instructions, chance_yield, chance_io = int(tmp[0]), float(tmp[1])/100.0, float(tmp[2])/100.0 assert(chance_yield+chance_io<1) #print "proc %d, num_instr %d, change_cpu %f" % (proc_id,num_instructions, chance_cpu) for i in range(num_instructions): randnum=random.random(); if randnum < (1.0-chance_yield-chance_io): self.proc_info[proc_id][PROC_CODE].append(DO_COMPUTE) elif randnum >= (1.0-chance_yield-chance_io) and randnum < (1.0-chance_io): self.proc_info[proc_id][PROC_CODE].append(DO_YIELD) else: self.proc_info[proc_id][PROC_CODE].append(DO_IO) #print "proc %d, instr idx %d, instr cxt %s" % (proc_id, i, self.proc_info[proc_id][PROC_CODE][i]) return #change to READY STATE, the current proc's state should be expected #if pid==-1, then pid=self.curr_proc def move_to_ready(self, expected, pid=-1): #YOUR CODE return #change to RUNNING STATE, the current proc's state should be expected def move_to_running(self, expected): #YOUR CODE return #change to DONE STATE, the current proc's state should be expected def move_to_done(self, expected): #YOUR CODE return #choose next proc using FIFO/FCFS scheduling, If pid==-1, then pid=self.curr_proc def next_proc(self, pid=-1): #YOUR CODE return def get_num_processes(self): return len(self.proc_info) def get_num_instructions(self, pid): return len(self.proc_info[pid][PROC_CODE]) def get_instruction(self, pid, index): return self.proc_info[pid][PROC_CODE][index] def get_num_active(self): num_active = 0 for pid in range(len(self.proc_info)): if self.proc_info[pid][PROC_STATE] != STATE_DONE: num_active += 1 return num_active def get_num_runnable(self): num_active = 0 for pid in range(len(self.proc_info)): if self.proc_info[pid][PROC_STATE] == STATE_READY or \ self.proc_info[pid][PROC_STATE] == STATE_RUNNING: num_active += 1 return num_active def get_ios_in_flight(self, current_time): num_in_flight = 0 for pid in range(len(self.proc_info)): for t in self.io_finish_times[pid]: if t > current_time: num_in_flight += 1 return num_in_flight def space(self, num_columns): for i in range(num_columns): print '%10s' % ' ', def check_if_done(self): if len(self.proc_info[self.curr_proc][PROC_CODE]) == 0: if self.proc_info[self.curr_proc][PROC_STATE] == STATE_RUNNING: self.move_to_done(STATE_RUNNING) self.next_proc() return def run(self): clock_tick = 0 if len(self.proc_info) == 0: return # track outstanding IOs, per process self.io_finish_times = {} for pid in range(len(self.proc_info)): self.io_finish_times[pid] = [] # make first one active self.curr_proc = 0 self.move_to_running(STATE_READY) # OUTPUT: heade`[rs for each column print '%s' % 'Time', for pid in range(len(self.proc_info)): print '%10s' % ('PID:%2d' % (pid)), print '%10s' % 'CPU', print '%10s' % 'IOs', print '' # init statistics io_busy = 0 cpu_busy = 0 while self.get_num_active() > 0: clock_tick += 1 # check for io finish io_done = False for pid in range(len(self.proc_info)): if clock_tick in self.io_finish_times[pid]: # if IO finished, the should do something for related process #YOUR CODE pass #YOU should delete this # if current proc is RUNNING and has an instruction, execute it instruction_to_execute = '' if self.proc_info[self.curr_proc][PROC_STATE] == STATE_RUNNING and \ len(self.proc_info[self.curr_proc][PROC_CODE]) > 0: #pop a instruction from proc_info[self.curr_proc][PROC_CODE]to instruction_to_execute #YOUR CODE pass #YOU should delete this # OUTPUT: print what everyone is up to if io_done: print '%3d*' % clock_tick, else: print '%3d ' % clock_tick, for pid in range(len(self.proc_info)): if pid == self.curr_proc and instruction_to_execute != '': print '%10s' % ('RUN:'+instruction_to_execute), else: print '%10s' % (self.proc_info[pid][PROC_STATE]), if instruction_to_execute == '': print '%10s' % ' ', else: print '%10s' % 1, num_outstanding = self.get_ios_in_flight(clock_tick) if num_outstanding > 0: print '%10s' % str(num_outstanding), io_busy += 1 else: print '%10s' % ' ', print '' # if this is an YIELD instruction, switch to ready state # and add an io completion in the future if instruction_to_execute == DO_YIELD: #YOUR CODE pass #YOU should delete this # if this is an IO instruction, switch to waiting state # and add an io completion in the future elif instruction_to_execute == DO_IO: #YOUR CODE pass #YOU should delete this # ENDCASE: check if currently running thing is out of instructions self.check_if_done() return (cpu_busy, io_busy, clock_tick) # # PARSE ARGUMENTS # parser = OptionParser() parser.add_option('-s', '--seed', default=0, help='the random seed', action='store', type='int', dest='seed') parser.add_option('-l', '--processlist', default='', help='a comma-separated list of processes to run, in the form X1:Y1:Z1,X2:Y2:Z2,... where X is the number of instructions that process should run, and Y/Z the chances (from 0 to 100) issue an YIELD/IO', action='store', type='string', dest='process_list') parser.add_option('-L', '--iolength', default=3, help='how long an IO takes', action='store', type='int', dest='io_length') parser.add_option('-p', '--printstats', help='print statistics at end; only useful with -c flag (otherwise stats are not printed)', action='store_true', default=False, dest='print_stats') (options, args) = parser.parse_args() random.seed(options.seed) process_switch_behavior = SCHED_SWITCH_ON_IO io_done_behavior = IO_RUN_LATER io_length=options.io_length s = scheduler(process_switch_behavior, io_done_behavior, io_length) # example process description (10:100,10:100) for p in options.process_list.split(','): s.load(p) print 'Produce a trace of what would happen when you run these processes:' for pid in range(s.get_num_processes()): print 'Process %d' % pid for inst in range(s.get_num_instructions(pid)): print ' %s' % s.get_instruction(pid, inst) print '' print 'Important behaviors:' print ' System will switch when', if process_switch_behavior == SCHED_SWITCH_ON_IO: print 'the current process is FINISHED or ISSUES AN YIELD or IO' else: print 'error in sched switch on iobehavior' exit (-1) print ' After IOs, the process issuing the IO will', if io_done_behavior == IO_RUN_LATER: print 'run LATER (when it is its turn)' else: print 'error in IO done behavior' exit (-1) print '' (cpu_busy, io_busy, clock_tick) = s.run() print '' print 'Stats: Total Time %d' % clock_tick print 'Stats: CPU Busy %d (%.2f%%)' % (cpu_busy, 100.0 * float(cpu_busy)/clock_tick) print 'Stats: IO Busy %d (%.2f%%)' % (io_busy, 100.0 * float(io_busy)/clock_tick) print ''
gpl-2.0
clef/python-social-auth
social/tests/backends/test_linkedin.py
92
1050
import json from social.p3 import urlencode from social.tests.backends.oauth import OAuth1Test, OAuth2Test class BaseLinkedinTest(object): user_data_url = 'https://api.linkedin.com/v1/people/~:' \ '(first-name,id,last-name)' expected_username = 'FooBar' access_token_body = json.dumps({ 'access_token': 'foobar', 'token_type': 'bearer' }) user_data_body = json.dumps({ 'lastName': 'Bar', 'id': '1010101010', 'firstName': 'Foo' }) def test_login(self): self.do_login() def test_partial_pipeline(self): self.do_partial_pipeline() class LinkedinOAuth1Test(BaseLinkedinTest, OAuth1Test): backend_path = 'social.backends.linkedin.LinkedinOAuth' request_token_body = urlencode({ 'oauth_token_secret': 'foobar-secret', 'oauth_token': 'foobar', 'oauth_callback_confirmed': 'true' }) class LinkedinOAuth2Test(BaseLinkedinTest, OAuth2Test): backend_path = 'social.backends.linkedin.LinkedinOAuth2'
bsd-3-clause
ZheJiuShiMing/tango_with_django_project
tango_with_django_project/populate_script.py
2
2160
import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tango_with_django_project.settings') import django django.setup() from rango.models import Category, Page def populate(): python_cat = add_cat('Python',128,64) add_page(cat=python_cat, title="Official Python Tutorial", url="http://docs.python.org/2/tutorial/", views=10) add_page(cat=python_cat, title="How to Think like a Computer Scientist", url="http://www.greenteapress.com/thinkpython/", views=11) add_page(cat=python_cat, title="Learn Python in 10 Minutes", url="http://www.korokithakis.net/tutorials/python/", views=12) django_cat = add_cat("Django",64,32) add_page(cat=django_cat, title="Official Django Tutorial", url="https://docs.djangoproject.com/en/1.5/intro/tutorial01/", views=13) add_page(cat=django_cat, title="Django Rocks", url="http://www.djangorocks.com/", views=14) add_page(cat=django_cat, title="How to Tango with Django", url="http://www.tangowithdjango.com/", views=15) frame_cat = add_cat("Other Frameworks",32,16) add_page(cat=frame_cat, title="Bottle", url="http://bottlepy.org/docs/dev/", views=16) add_page(cat=frame_cat, title="Flask", url="http://flask.pocoo.org", views=17) # Print out what we have added to the user. for c in Category.objects.all(): for p in Page.objects.filter(category=c): print "- {0} - {1}".format(str(c), str(p)) def add_page(cat, title, url, views=0): p = Page.objects.get_or_create(category=cat, title=title)[0] p.url=url p.views=views p.save() return p def add_cat(name,views,likes): c = Category.objects.get_or_create(name=name)[0] c.views = views; c.likes = likes; c.save() return c # Start execution here! if __name__ == '__main__': print "Starting Rango population script..." populate()
gpl-2.0
dsalazarr/pfc_ii
pfc/pfc/users/admin.py
1
5528
from django import forms from django.contrib import admin from django.contrib.auth.admin import UserAdmin as AuthUserAdmin from django.contrib.auth.forms import UserChangeForm, UserCreationForm from django.utils.translation import ugettext_lazy as _ from slugify import slugify from pfc.applications.admin import CompanyLicenseInline, UserApplicationInline from pfc.applications.models import Permission from pfc.users.models import UserRule, CompanyRule, Company from .models import User class UserRuleInline(admin.TabularInline): model = User.rules.through extra = 1 class MyUserChangeForm(UserChangeForm): class Meta(UserChangeForm.Meta): model = User class MyUserCreationForm(UserCreationForm): error_message = UserCreationForm.error_messages.update({ 'duplicate_username': 'This username has already been taken.' }) class Meta(UserCreationForm.Meta): model = User fields = ('username', 'company') def clean_username(self): username = self.cleaned_data["username"] try: User.objects.get(username=username) except User.DoesNotExist: return username raise forms.ValidationError(self.error_messages['duplicate_username']) def clean_company(self): return self.cleaned_data['company'] @admin.register(User) class MyUserAdmin(AuthUserAdmin): form = MyUserChangeForm add_form = MyUserCreationForm fieldsets = ( ('User Profile', {'fields': ('name',)}), ('Application Permissions', {'fields': ('permissions',)}), ) + AuthUserAdmin.fieldsets add_fieldsets = ( (None, { 'classes': ('wide',), 'fields': ('username', 'company', 'password1', 'password2'), }), ) list_display = ('username', 'name',) search_fields = ['name'] inlines = [UserApplicationInline, UserRuleInline] def get_readonly_fields(self, request, obj=None): fields = super(MyUserAdmin, self).get_readonly_fields(request, obj) if not request.user.is_from_main_company: return fields + ('company',) return fields def get_queryset(self, request): queryset = super(MyUserAdmin, self).get_queryset(request) return queryset.filter(company=request.user.company) def save_model(self, request, obj, form, change): if not change: if not request.user.is_from_main_company: obj.company = request.user.company obj.is_staff = True return super(MyUserAdmin, self).save_model(request, obj, form, change) def formfield_for_manytomany(self, db_field, request, **kwargs): if db_field.name == "permissions": kwargs["queryset"] = Permission.objects.filter( application__companyapplicationlicense__userapplicationlicense__user=request.user, application__companyapplicationlicense__active=True, ) return super(MyUserAdmin, self).formfield_for_manytomany(db_field, request, **kwargs) def formfield_for_foreignkey(self, db_field, request, **kwargs): if db_field.name == "company": if request.user.is_from_main_company: kwargs["required"] = True return super(MyUserAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs) def get_fieldsets(self, request, obj=None): if not obj: fieldsets = self.add_fieldsets fields = ('username', 'password1', 'password2') if request.user.is_from_main_company: fieldsets[0][1]['fields'] = fields + ('company',) else: fieldsets[0][1]['fields'] = fields return super(MyUserAdmin, self).get_fieldsets(request, obj) class UserRuleCreationForm(forms.ModelForm): operator = forms.CharField( label=_("Operator"), widget=forms.Select(choices=UserRule.OPERATOR_CHOICES) ) class Meta: model = UserRule fields = ('name', 'operator') @admin.register(UserRule) class MyUserRule(admin.ModelAdmin): form = UserRuleCreationForm add_form = UserRuleCreationForm list_display = ('name', 'operator') search_fields = ('name',) class CompanyRuleCreationForm(forms.ModelForm): operator = forms.CharField( label=_("Operator"), widget=forms.Select(choices=UserRule.OPERATOR_CHOICES) ) class Meta: model = CompanyRule fields = ('name', 'operator', 'argument') @admin.register(CompanyRule) class MyCompanyRule(admin.ModelAdmin): form = CompanyRuleCreationForm add_form = CompanyRuleCreationForm list_display = ('name', 'operator', 'argument') search_fields = ('name',) class CompanyCreationForm(forms.ModelForm): class Meta: model = Company fields = ('name',) def save(self, commit=True): return super(CompanyCreationForm, self).save(commit=commit) @admin.register(Company) class MyCompanyForm(admin.ModelAdmin): form = CompanyCreationForm add_form = CompanyCreationForm list_display = ('name', 'slug') search_fields = ('name', 'slug') inlines = (CompanyLicenseInline,) def save_model(self, request, obj, form, change): obj.slug = slugify(obj.name).lower() return super(MyCompanyForm, self).save_model(request, obj, form, change) def save_related(self, request, form, formsets, change): return super(MyCompanyForm, self).save_related(request, form, formsets, change)
gpl-3.0
home-assistant/home-assistant
homeassistant/components/lcn/schemas.py
7
5448
"""Schema definitions for LCN configuration and websockets api.""" import voluptuous as vol from homeassistant.components.climate import DEFAULT_MAX_TEMP, DEFAULT_MIN_TEMP from homeassistant.const import ( CONF_ADDRESS, CONF_BINARY_SENSORS, CONF_COVERS, CONF_HOST, CONF_LIGHTS, CONF_NAME, CONF_PASSWORD, CONF_PORT, CONF_SCENE, CONF_SENSORS, CONF_SOURCE, CONF_SWITCHES, CONF_UNIT_OF_MEASUREMENT, CONF_USERNAME, ) import homeassistant.helpers.config_validation as cv from .const import ( BINSENSOR_PORTS, CONF_CLIMATES, CONF_CONNECTIONS, CONF_DIM_MODE, CONF_DIMMABLE, CONF_LOCKABLE, CONF_MAX_TEMP, CONF_MIN_TEMP, CONF_MOTOR, CONF_OUTPUT, CONF_OUTPUTS, CONF_REGISTER, CONF_REVERSE_TIME, CONF_SCENES, CONF_SETPOINT, CONF_SK_NUM_TRIES, CONF_TRANSITION, DIM_MODES, DOMAIN, KEYS, LED_PORTS, LOGICOP_PORTS, MOTOR_PORTS, MOTOR_REVERSE_TIME, OUTPUT_PORTS, RELAY_PORTS, S0_INPUTS, SETPOINTS, TEMP_CELSIUS, TEMP_FAHRENHEIT, THRESHOLDS, VAR_UNITS, VARIABLES, ) from .helpers import has_unique_host_names, is_address # # Domain data # DOMAIN_DATA_BINARY_SENSOR = { vol.Required(CONF_SOURCE): vol.All( vol.Upper, vol.In(SETPOINTS + KEYS + BINSENSOR_PORTS) ), } DOMAIN_DATA_CLIMATE = { vol.Required(CONF_SOURCE): vol.All(vol.Upper, vol.In(VARIABLES)), vol.Required(CONF_SETPOINT): vol.All(vol.Upper, vol.In(VARIABLES + SETPOINTS)), vol.Optional(CONF_MAX_TEMP, default=DEFAULT_MAX_TEMP): vol.Coerce(float), vol.Optional(CONF_MIN_TEMP, default=DEFAULT_MIN_TEMP): vol.Coerce(float), vol.Optional(CONF_LOCKABLE, default=False): vol.Coerce(bool), vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=TEMP_CELSIUS): vol.In( TEMP_CELSIUS, TEMP_FAHRENHEIT ), } DOMAIN_DATA_COVER = { vol.Required(CONF_MOTOR): vol.All(vol.Upper, vol.In(MOTOR_PORTS)), vol.Optional(CONF_REVERSE_TIME, default="rt1200"): vol.All( vol.Upper, vol.In(MOTOR_REVERSE_TIME) ), } DOMAIN_DATA_LIGHT = { vol.Required(CONF_OUTPUT): vol.All(vol.Upper, vol.In(OUTPUT_PORTS + RELAY_PORTS)), vol.Optional(CONF_DIMMABLE, default=False): vol.Coerce(bool), vol.Optional(CONF_TRANSITION, default=0): vol.All( vol.Coerce(float), vol.Range(min=0.0, max=486.0), lambda value: value * 1000 ), } DOMAIN_DATA_SCENE = { vol.Required(CONF_REGISTER): vol.All(vol.Coerce(int), vol.Range(0, 9)), vol.Required(CONF_SCENE): vol.All(vol.Coerce(int), vol.Range(0, 9)), vol.Optional(CONF_OUTPUTS, default=[]): vol.All( cv.ensure_list, [vol.All(vol.Upper, vol.In(OUTPUT_PORTS + RELAY_PORTS))] ), vol.Optional(CONF_TRANSITION, default=None): vol.Any( vol.All( vol.Coerce(int), vol.Range(min=0.0, max=486.0), lambda value: value * 1000, ), None, ), } DOMAIN_DATA_SENSOR = { vol.Required(CONF_SOURCE): vol.All( vol.Upper, vol.In( VARIABLES + SETPOINTS + THRESHOLDS + S0_INPUTS + LED_PORTS + LOGICOP_PORTS ), ), vol.Optional(CONF_UNIT_OF_MEASUREMENT, default="native"): vol.All( vol.Upper, vol.In(VAR_UNITS) ), } DOMAIN_DATA_SWITCH = { vol.Required(CONF_OUTPUT): vol.All(vol.Upper, vol.In(OUTPUT_PORTS + RELAY_PORTS)), } # # Configuration # DOMAIN_DATA_BASE = { vol.Required(CONF_NAME): cv.string, vol.Required(CONF_ADDRESS): is_address, } BINARY_SENSORS_SCHEMA = vol.Schema({**DOMAIN_DATA_BASE, **DOMAIN_DATA_BINARY_SENSOR}) CLIMATES_SCHEMA = vol.Schema({**DOMAIN_DATA_BASE, **DOMAIN_DATA_CLIMATE}) COVERS_SCHEMA = vol.Schema({**DOMAIN_DATA_BASE, **DOMAIN_DATA_COVER}) LIGHTS_SCHEMA = vol.Schema({**DOMAIN_DATA_BASE, **DOMAIN_DATA_LIGHT}) SCENES_SCHEMA = vol.Schema({**DOMAIN_DATA_BASE, **DOMAIN_DATA_SCENE}) SENSORS_SCHEMA = vol.Schema({**DOMAIN_DATA_BASE, **DOMAIN_DATA_SENSOR}) SWITCHES_SCHEMA = vol.Schema({**DOMAIN_DATA_BASE, **DOMAIN_DATA_SWITCH}) CONNECTION_SCHEMA = vol.Schema( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_PORT): cv.port, vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_SK_NUM_TRIES, default=0): cv.positive_int, vol.Optional(CONF_DIM_MODE, default="steps50"): vol.All( vol.Upper, vol.In(DIM_MODES) ), vol.Optional(CONF_NAME): cv.string, } ) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_CONNECTIONS): vol.All( cv.ensure_list, has_unique_host_names, [CONNECTION_SCHEMA] ), vol.Optional(CONF_BINARY_SENSORS): vol.All( cv.ensure_list, [BINARY_SENSORS_SCHEMA] ), vol.Optional(CONF_CLIMATES): vol.All(cv.ensure_list, [CLIMATES_SCHEMA]), vol.Optional(CONF_COVERS): vol.All(cv.ensure_list, [COVERS_SCHEMA]), vol.Optional(CONF_LIGHTS): vol.All(cv.ensure_list, [LIGHTS_SCHEMA]), vol.Optional(CONF_SCENES): vol.All(cv.ensure_list, [SCENES_SCHEMA]), vol.Optional(CONF_SENSORS): vol.All(cv.ensure_list, [SENSORS_SCHEMA]), vol.Optional(CONF_SWITCHES): vol.All(cv.ensure_list, [SWITCHES_SCHEMA]), } ) }, extra=vol.ALLOW_EXTRA, )
apache-2.0