repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
webmull/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py | 117 | 16083 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.crashlogs import CrashLogs
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.thirdparty.mock import Mock
# Needed to support Windows port tests
from webkitpy.port.win import WinPort
def make_mock_crash_report_darwin(process_name, pid):
return """Process: {process_name} [{pid}]
Path: /Volumes/Data/slave/snowleopard-intel-release-tests/build/WebKitBuild/Release/{process_name}
Identifier: {process_name}
Version: ??? (???)
Code Type: X86-64 (Native)
Parent Process: Python [2578]
Date/Time: 2011-12-07 13:27:34.816 -0800
OS Version: Mac OS X 10.6.8 (10K549)
Report Version: 6
Interval Since Last Report: 1660 sec
Crashes Since Last Report: 1
Per-App Crashes Since Last Report: 1
Anonymous UUID: 507D4EEB-9D70-4E2E-B322-2D2F0ABFEDC0
Exception Type: EXC_BREAKPOINT (SIGTRAP)
Exception Codes: 0x0000000000000002, 0x0000000000000000
Crashed Thread: 0
Dyld Error Message:
Library not loaded: /Volumes/Data/WebKit-BuildSlave/snowleopard-intel-release/build/WebKitBuild/Release/WebCore.framework/Versions/A/WebCore
Referenced from: /Volumes/Data/slave/snowleopard-intel-release/build/WebKitBuild/Release/WebKit.framework/Versions/A/WebKit
Reason: image not found
Binary Images:
0x7fff5fc00000 - 0x7fff5fc3be0f dyld 132.1 (???) <29DECB19-0193-2575-D838-CF743F0400B2> /usr/lib/dyld
System Profile:
Model: Xserve3,1, BootROM XS31.0081.B04, 8 processors, Quad-Core Intel Xeon, 2.26 GHz, 6 GB, SMC 1.43f4
Graphics: NVIDIA GeForce GT 120, NVIDIA GeForce GT 120, PCIe, 256 MB
Memory Module: global_name
Network Service: Ethernet 2, Ethernet, en1
PCI Card: NVIDIA GeForce GT 120, sppci_displaycontroller, MXM-Slot
Serial ATA Device: OPTIARC DVD RW AD-5670S
""".format(process_name=process_name, pid=pid)
def make_mock_crash_report_win(process_name, pid):
return """Opened log file 'C:\Projects\WebKit\OpenSource\WebKitBuild\Release\bin32\layout-test-results\CrashLog_1d58_2013-06-03_12-21-20-110.txt'
0:000> .srcpath "C:\Projects\WebKit\OpenSource"
Source search path is: C:\Projects\WebKit\OpenSource
0:000> !analyze -vv
*******************************************************************************
* *
* Exception Analysis *
* *
*******************************************************************************
*** ERROR: Symbol file could not be found. Defaulted to export symbols for C:\Projects\WebKit\OpenSource\WebKitBuild\Release\bin32\libdispatch.dll -
*** ERROR: Symbol file could not be found. Defaulted to export symbols for C:\Windows\SYSTEM32\atiumdag.dll -
FAULTING_IP:
JavaScriptCore!JSC::JSActivation::getOwnPropertySlot+0 [c:\projects\webkit\opensource\source\javascriptcore\runtime\jsactivation.cpp @ 146]
01e3d070 55 push ebp
EXCEPTION_RECORD: 00092cc8 -- (.exr 0x92cc8)
.exr 0x92cc8
ExceptionAddress: 01e3d070 (JavaScriptCore!JSC::JSActivation::getOwnPropertySlot)
ExceptionCode: c00000fd (Stack overflow)
ExceptionFlags: 00000000
NumberParameters: 2
Parameter[0]: 00000001
Parameter[1]: 00092ffc
FAULTING_THREAD: 00000e68
PROCESS_NAME: {process_name}
ERROR_CODE: (NTSTATUS) 0xc0000005 - The instruction at 0x%08lx referenced memory at 0x%08lx. The memory could not be %s.
EXCEPTION_CODE: (NTSTATUS) 0xc0000005 - The instruction at 0x%08lx referenced memory at 0x%08lx. The memory could not be %s.
EXCEPTION_CODE_STR: c0000005
EXCEPTION_PARAMETER1: 00000000
EXCEPTION_PARAMETER2: 00090000
READ_ADDRESS: 00090000
FOLLOWUP_IP:
JavaScriptCore!JSC::JSActivation::getOwnPropertySlot+0 [c:\projects\webkit\opensource\source\javascriptcore\runtime\jsactivation.cpp @ 146]
01e3d070 55 push ebp
WATSON_BKT_PROCSTAMP: 51a8f979
WATSON_BKT_MODULE: MSVCR100.dll
WATSON_BKT_MODVER: 10.0.40219.325
WATSON_BKT_MODSTAMP: 4df2be1e
WATSON_BKT_MODOFFSET: 160d7
MODULE_VER_PRODUCT: Microsoft(R) Visual Studio(R) 2010
BUILD_VERSION_STRING: 6.2.9200.16384 (win8_rtm.120725-1247)
NTGLOBALFLAG: 0
APPLICATION_VERIFIER_FLAGS: 0
APP: {process_name}
ANALYSIS_SESSION_HOST: FULGBR-PC
ANALYSIS_SESSION_TIME: 06-03-2013 12:21:20.0111
CONTEXT: 00092d18 -- (.cxr 0x92d18)
.cxr 0x92d18
eax=01e3d070 ebx=000930bc ecx=7fe03ed0 edx=0751e168 esi=07a7ff98 edi=0791ff78
eip=01e3d070 esp=00093000 ebp=0009306c iopl=0 nv up ei ng nz ac po cy
cs=0023 ss=002b ds=002b es=002b fs=0053 gs=002b efl=00210293
JavaScriptCore!JSC::JSActivation::getOwnPropertySlot:
01e3d070 55 push ebp
.cxr
Resetting default scope
RECURRING_STACK: From frames 0x14 to 0x1d
THREAD_ATTRIBUTES:
[ GLOBAL ]
Global PID: [{pid}]
Global Thread_Count: [19]
Global PageSize: [4096]
Global ModList_SHA1_Hash: [aacef4e7e83b9bddc9cd0cc094dac88d531ea4a3]
Global CommandLine: [C:\Projects\WebKit\OpenSource\WebKitBuild\Release\bin32\{process_name} -]
Global Desktop_Name: [Winsta0\Default]
Global ProcessName: [{process_name}]
Global Debugger_CPU_Architecture: [X86]
Global CPU_ProcessorCount: [24]
Global CPU_MHZ: [1596]
Global CPU_Architecture: [X86]
Global CPU_Family: [6]
Global CPU_Model: [12]
Global CPU_Stepping: [2]
Global CPU_VendorString: [GenuineIntel]
Global LoadedModule_Count: [82]
Global ProcessBeingDebugged
Global GFlags: [0]
Global Application_Verifer_Flags: [0]
Global FinalExh: [2012093943]
Global SystemUpTime: [3 days 23:52:56.000]
Global SystemUpTime: [345176]
Global ProcessUpTime: [0 days 0:00:00.000]
Global ProcessUpTime: [0]
Global CurrentTimeDate: [Mon Jun 3 12:21:20.000 2013 (UTC - 7:00)]
Global CurrentTimeDate: [1370287280]
Global ProductType: [1]
Global SuiteMask: [272]
Global ApplicationName: [{process_name}]
Global ASLR_Enabled
Global SafeSEH_Enabled
FAULT_INSTR_CODE: 83ec8b55
FAULTING_SOURCE_LINE: c:\projects\webkit\opensource\source\javascriptcore\runtime\jsactivation.cpp
FAULTING_SOURCE_FILE: c:\projects\webkit\opensource\source\javascriptcore\runtime\jsactivation.cpp
FAULTING_SOURCE_LINE_NUMBER: 146
SYMBOL_STACK_INDEX: 0
SYMBOL_NAME: javascriptcore!JSC::JSActivation::getOwnPropertySlot+92ffc
FOLLOWUP_NAME: MachineOwner
MODULE_NAME: JavaScriptCore
IMAGE_NAME: JavaScriptCore.dll
DEBUG_FLR_IMAGE_TIMESTAMP: 51ace473
STACK_COMMAND: .cxr 00092D18 ; kb ; dps 93000 ; kb
FAILURE_BUCKET_ID: STACK_OVERFLOW_c0000005_JavaScriptCore.dll!JSC::JSActivation::getOwnPropertySlot
BUCKET_ID: APPLICATION_FAULT_STACK_OVERFLOW_INVALID_POINTER_READ_javascriptcore!JSC::JSActivation::getOwnPropertySlot+92ffc
ANALYSIS_SESSION_ELAPSED_TIME: 18df
Followup: MachineOwner
---------
0:000> ~*kpn
. 0 Id: 18e0.e68 Suspend: 1 Teb: 7ffdd000 Unfrozen
# ChildEBP RetAddr
00 00092a08 7261ece1 MSVCR100!_alloca_probe+0x27
01 00092a4c 7261a5d0 MSVCR100!_write+0x95
02 00092a6c 7261ef6b MSVCR100!_flush+0x3b
03 00092a7c 7261ef1c MSVCR100!_fflush_nolock+0x1c
04 00092ab4 1000f814 MSVCR100!fflush+0x30
05 00092ac8 77c0084e DumpRenderTree_10000000!exceptionFilter(struct _EXCEPTION_POINTERS * __formal = 0x852ac807)+0x24 [c:\projects\webkit\opensource\tools\dumprendertree\win\dumprendertree.cpp @ 1281]
06 00092b60 77e8bf2c KERNELBASE!UnhandledExceptionFilter+0x164
07 00092b68 77e530b4 ntdll!__RtlUserThreadStart+0x57
08 00092b7c 77e15246 ntdll!_EH4_CallFilterFunc+0x12
09 00092ba4 77e151b1 ntdll!_except_handler4_common+0x8e
0a 00092bc4 77e52e71 ntdll!_except_handler4+0x20
0b 00092be8 77e52e43 ntdll!ExecuteHandler2+0x26
0c 00092cb0 77e52cbb ntdll!ExecuteHandler+0x24
0d 00092cb0 01e3d070 ntdll!KiUserExceptionDispatcher+0xf
0e 00092ffc 01e67d25 JavaScriptCore!JSC::JSActivation::getOwnPropertySlot(class JSC::JSCell * cell = 0x07a7ff98, class JSC::ExecState * exec = 0x0751e168, class JSC::PropertyName propertyName = class JSC::PropertyName, class JSC::PropertySlot * slot = 0x000930bc) [c:\projects\webkit\opensource\source\javascriptcore\runtime\jsactivation.cpp @ 146]
0f 0009306c 01e68837 JavaScriptCore!JSC::JSScope::resolveContainingScopeInternal<1,2>(class JSC::ExecState * callFrame = 0x0751e168, class JSC::Identifier * identifier = 0x7fe0ebc0, class JSC::PropertySlot * slot = 0x7fe03ed0, class WTF::Vector<JSC::ResolveOperation,0,WTF::CrashOnOverflow> * operations = 0x7fda16c0, struct JSC::PutToBaseOperation * putToBaseOperation = 0x00000000, bool __formal = false)+0x205 [c:\projects\webkit\opensource\source\javascriptcore\runtime\jsscope.cpp @ 247]
10 00093090 01e65860 JavaScriptCore!JSC::JSScope::resolveContainingScope<1>(class JSC::ExecState * callFrame = 0x0751e168, class JSC::Identifier * identifier = 0x7fe0ebc0, class JSC::PropertySlot * slot = 0x000930bc, class WTF::Vector<JSC::ResolveOperation,0,WTF::CrashOnOverflow> * operations = 0x7fda16c0, struct JSC::PutToBaseOperation * putToBaseOperation = 0x00000000, bool isStrict = false)+0x27 [c:\projects\webkit\opensource\source\javascriptcore\runtime\jsscope.cpp @ 427]
11 00093104 01dceeff JavaScriptCore!JSC::JSScope::resolve(class JSC::ExecState * callFrame = 0x0751e168, class JSC::Identifier * identifier = 0x7fe0ebc0, class WTF::Vector<JSC::ResolveOperation,0,WTF::CrashOnOverflow> * operations = 0x7fda16c0)+0xc0 [c:\projects\webkit\opensource\source\javascriptcore\runtime\jsscope.cpp @ 447]
0:000> q
quit:
""".format(process_name=process_name, pid=pid)
class CrashLogsTest(unittest.TestCase):
def test_find_log_darwin(self):
if not SystemHost().platform.is_mac():
return
older_mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28528)
mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28530)
newer_mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28529)
other_process_mock_crash_report = make_mock_crash_report_darwin('FooProcess', 28527)
misformatted_mock_crash_report = 'Junk that should not appear in a crash report' + make_mock_crash_report_darwin('DumpRenderTree', 28526)[200:]
files = {}
files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150718_quadzen.crash'] = older_mock_crash_report
files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash'] = mock_crash_report
files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150720_quadzen.crash'] = newer_mock_crash_report
files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150721_quadzen.crash'] = None
files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150722_quadzen.crash'] = other_process_mock_crash_report
files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150723_quadzen.crash'] = misformatted_mock_crash_report
filesystem = MockFileSystem(files)
crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
log = crash_logs.find_newest_log("DumpRenderTree")
self.assertMultiLineEqual(log, newer_mock_crash_report)
log = crash_logs.find_newest_log("DumpRenderTree", 28529)
self.assertMultiLineEqual(log, newer_mock_crash_report)
log = crash_logs.find_newest_log("DumpRenderTree", 28530)
self.assertMultiLineEqual(log, mock_crash_report)
log = crash_logs.find_newest_log("DumpRenderTree", 28531)
self.assertIsNone(log)
log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0)
self.assertIsNone(log)
def bad_read(path):
raise IOError('IOError: No such file or directory')
def bad_mtime(path):
raise OSError('OSError: No such file or directory')
filesystem.read_text_file = bad_read
log = crash_logs.find_newest_log("DumpRenderTree", 28531, include_errors=True)
self.assertIn('IOError: No such file or directory', log)
filesystem = MockFileSystem(files)
crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
filesystem.mtime = bad_mtime
log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0, include_errors=True)
self.assertIn('OSError: No such file or directory', log)
def test_find_log_win(self):
if not SystemHost().platform.is_win():
return
older_mock_crash_report = make_mock_crash_report_win('DumpRenderTree', 28528)
mock_crash_report = make_mock_crash_report_win('DumpRenderTree', 28530)
newer_mock_crash_report = make_mock_crash_report_win('DumpRenderTree', 28529)
other_process_mock_crash_report = make_mock_crash_report_win('FooProcess', 28527)
misformatted_mock_crash_report = 'Junk that should not appear in a crash report' + make_mock_crash_report_win('DumpRenderTree', 28526)[200:]
files = {}
files['~/CrashLog_1d58_2013-06-03_12-21-20-110.txt'] = older_mock_crash_report
files['~/CrashLog_abcd_2013-06-03_12-22-19-129.txt'] = mock_crash_report
files['~/CrashLog_2eff_2013-06-03_12-23-20-150.txt'] = newer_mock_crash_report
files['~/CrashLog_31a0_2013-06-03_12-24-22-119.txt'] = None
files['~/CrashLog_01a3_2013-06-03_12-25-23-120.txt'] = other_process_mock_crash_report
files['~/CrashLog_aadd_2013-06-03_12-26-24-121.txt'] = misformatted_mock_crash_report
filesystem = MockFileSystem(files)
mock_host = MockSystemHost(os_name='win', filesystem=filesystem)
crash_logs = CrashLogs(mock_host, "~")
log = crash_logs.find_newest_log("DumpRenderTree", 28529)
self.assertMultiLineEqual(log, newer_mock_crash_report)
log = crash_logs.find_newest_log("DumpRenderTree", 28530)
self.assertMultiLineEqual(log, mock_crash_report)
log = crash_logs.find_newest_log("DumpRenderTree", 28531)
self.assertIsNone(log)
log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0)
self.assertIsNone(log)
def bad_read(path):
raise IOError('IOError: No such file or directory')
filesystem.read_text_file = bad_read
filesystem.read_binary_file = bad_read
log = crash_logs.find_newest_log("DumpRenderTree", 28531, include_errors=True)
self.assertIn('IOError: No such file or directory', log)
| bsd-3-clause |
esthermm/odoo-addons | stock_information_mrp/models/stock_information.py | 4 | 5085 | # -*- coding: utf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api, _
import openerp.addons.decimal_precision as dp
class StockInformation(models.Model):
_inherit = 'stock.information'
@api.multi
def _compute_week(self):
pro_obj = self.env['mrp.production']
super(StockInformation, self)._compute_week()
for line in self:
moves = line.incoming_pending_moves.filtered(
lambda x: not x.purchase_line_id and not x.production_id)
line.incoming_pending_amount_moves = sum(
moves.mapped('product_uom_qty'))
moves = line.incoming_pending_moves.filtered(
lambda x: x.purchase_line_id)
line.incoming_pending_amount_purchases = sum(
moves.mapped('product_uom_qty'))
moves = line.incoming_pending_moves.filtered(
lambda x: x.production_id)
line.incoming_pending_amount_productions = sum(
moves.mapped('product_uom_qty'))
productions = self.env['mrp.production']
productions |= moves.mapped('production_id')
line.incoming_pending_productions = [(6, 0, productions.ids)]
if line.first_week:
draft_prods = pro_obj._find_productions_from_stock_information(
line.company, line.last_day_week, line.product,
line.location, state=['draft'])
else:
draft_prods = pro_obj._find_productions_from_stock_information(
line.company, line.last_day_week, line.product,
line.location, state=['draft'],
from_date=line.first_day_week)
line.draft_productions_amount = sum(
draft_prods.mapped('product_qty'))
line.draft_productions = [(6, 0, draft_prods.ids)]
@api.multi
@api.depends('product', 'product.seller_ids')
def _compute_product_info(self):
super(StockInformation, self)._compute_product_info()
route_id = self.env.ref('mrp.route_warehouse0_manufacture').id
for line in self:
line.product_to_produce = False
if (line.product and route_id in line.product.route_ids.ids):
line.product_to_produce = True
product_to_produce = fields.Boolean(
'To produce', compute='_compute_product_info', store=True)
incoming_pending_amount_moves = fields.Float(
'Incoming pending amount moves', compute='_compute_week',
digits=dp.get_precision('Product Unit of Measure'),
help='Incoming moves')
incoming_pending_amount_purchases = fields.Float(
'Incoming pending amount purchases', compute='_compute_week',
digits=dp.get_precision('Product Unit of Measure'),
help='Incoming purchases')
incoming_pending_amount_productions = fields.Float(
'Incoming pending amount productions', compute='_compute_week',
digits=dp.get_precision('Product Unit of Measure'),
help='Incoming productions')
incoming_pending_productions = fields.Many2many(
comodel_name='mrp.production',
relation='rel_stock_info_mrp_production',
column1='stock_info_id', column2='production_id',
string='MRP Productions', compute='_compute_week')
draft_productions_amount = fields.Float(
'Draft productions amount (INFO)', compute='_compute_week',
digits=dp.get_precision('Product Unit of Measure'),
help='Draft productions amount')
draft_productions = fields.Many2many(
comodel_name='mrp.production', string='Draft productions',
relation='rel_stock_info_production', compute='_compute_week',
column1='stock_info_id', column2='sale_id')
@api.model
def create(self, vals):
information = super(StockInformation, self).create(vals)
if not information.route:
manufac_route = self.env.ref('mrp.route_warehouse0_manufacture')
if (information.product.route_ids and manufac_route.id in
information.product.route_ids.ids):
information.write({'route': manufac_route.id})
return information
@api.multi
def show_incoming_pending_productions(self):
self.ensure_one()
return {'name': _('MRP productions'),
'view_type': 'form',
"view_mode": 'tree,form',
'res_model': 'mrp.production',
'type': 'ir.actions.act_window',
'domain': [('id', 'in', self.incoming_pending_productions.ids)]
}
@api.multi
def show_draft_productions(self):
self.ensure_one()
return {'name': _('MRP draft productions'),
'view_type': 'form',
"view_mode": 'tree,form',
'res_model': 'mrp.production',
'type': 'ir.actions.act_window',
'domain': [('id', 'in', self.draft_productions.ids)]}
| agpl-3.0 |
raymondgom/pmip6ns3.13new | src/visualizer/visualizer/ipython_view.py | 89 | 10362 | """
Backend to the console plugin.
@author: Eitan Isaacson
@organization: IBM Corporation
@copyright: Copyright (c) 2007 IBM Corporation
@license: BSD
All rights reserved. This program and the accompanying materials are made
available under the terms of the BSD which accompanies this distribution, and
is available at U{http://www.opensource.org/licenses/bsd-license.php}
"""
# this file is a modified version of source code from the Accerciser project
# http://live.gnome.org/accerciser
import gtk
import re
import sys
import os
import pango
from StringIO import StringIO
import IPython
ansi_colors = {'0;30': 'Black',
'0;31': 'Red',
'0;32': 'Green',
'0;33': 'Brown',
'0;34': 'Blue',
'0;35': 'Purple',
'0;36': 'Cyan',
'0;37': 'LightGray',
'1;30': 'DarkGray',
'1;31': 'DarkRed',
'1;32': 'SeaGreen',
'1;33': 'Yellow',
'1;34': 'LightBlue',
'1;35': 'MediumPurple',
'1;36': 'LightCyan',
'1;37': 'White'}
class IterableIPShell:
def __init__(self,argv=None,user_ns=None,user_global_ns=None,
cin=None, cout=None,cerr=None, input_func=None):
if input_func:
IPython.iplib.raw_input_original = input_func
if cin:
IPython.Shell.Term.cin = cin
if cout:
IPython.Shell.Term.cout = cout
if cerr:
IPython.Shell.Term.cerr = cerr
if argv is None:
argv=[]
# This is to get rid of the blockage that occurs during
# IPython.Shell.InteractiveShell.user_setup()
IPython.iplib.raw_input = lambda x: None
self.term = IPython.genutils.IOTerm(cin=cin, cout=cout, cerr=cerr)
os.environ['TERM'] = 'dumb'
excepthook = sys.excepthook
self.IP = IPython.Shell.make_IPython(argv,user_ns=user_ns,
user_global_ns=user_global_ns,
embedded=True,
shell_class=IPython.Shell.InteractiveShell)
self.IP.system = lambda cmd: self.shell(self.IP.var_expand(cmd),
header='IPython system call: ',
verbose=self.IP.rc.system_verbose)
sys.excepthook = excepthook
self.iter_more = 0
self.history_level = 0
self.complete_sep = re.compile('[\s\{\}\[\]\(\)]')
def execute(self):
self.history_level = 0
orig_stdout = sys.stdout
sys.stdout = IPython.Shell.Term.cout
try:
line = self.IP.raw_input(None, self.iter_more)
if self.IP.autoindent:
self.IP.readline_startup_hook(None)
except KeyboardInterrupt:
self.IP.write('\nKeyboardInterrupt\n')
self.IP.resetbuffer()
# keep cache in sync with the prompt counter:
self.IP.outputcache.prompt_count -= 1
if self.IP.autoindent:
self.IP.indent_current_nsp = 0
self.iter_more = 0
except:
self.IP.showtraceback()
else:
self.iter_more = self.IP.push(line)
if (self.IP.SyntaxTB.last_syntax_error and
self.IP.rc.autoedit_syntax):
self.IP.edit_syntax_error()
if self.iter_more:
self.prompt = str(self.IP.outputcache.prompt2).strip()
if self.IP.autoindent:
self.IP.readline_startup_hook(self.IP.pre_readline)
else:
self.prompt = str(self.IP.outputcache.prompt1).strip()
sys.stdout = orig_stdout
def historyBack(self):
self.history_level -= 1
return self._getHistory()
def historyForward(self):
self.history_level += 1
return self._getHistory()
def _getHistory(self):
try:
rv = self.IP.user_ns['In'][self.history_level].strip('\n')
except IndexError:
self.history_level = 0
rv = ''
return rv
def updateNamespace(self, ns_dict):
self.IP.user_ns.update(ns_dict)
def complete(self, line):
split_line = self.complete_sep.split(line)
possibilities = self.IP.complete(split_line[-1])
if possibilities:
common_prefix = reduce(self._commonPrefix, possibilities)
completed = line[:-len(split_line[-1])]+common_prefix
else:
completed = line
return completed, possibilities
def _commonPrefix(self, str1, str2):
for i in range(len(str1)):
if not str2.startswith(str1[:i+1]):
return str1[:i]
return str1
def shell(self, cmd,verbose=0,debug=0,header=''):
stat = 0
if verbose or debug: print header+cmd
# flush stdout so we don't mangle python's buffering
if not debug:
input, output = os.popen4(cmd)
print output.read()
output.close()
input.close()
class ConsoleView(gtk.TextView):
def __init__(self):
gtk.TextView.__init__(self)
self.modify_font(pango.FontDescription('Mono'))
self.set_cursor_visible(True)
self.text_buffer = self.get_buffer()
self.mark = self.text_buffer.create_mark('scroll_mark',
self.text_buffer.get_end_iter(),
False)
for code in ansi_colors:
self.text_buffer.create_tag(code,
foreground=ansi_colors[code],
weight=700)
self.text_buffer.create_tag('0')
self.text_buffer.create_tag('notouch', editable=False)
self.color_pat = re.compile('\x01?\x1b\[(.*?)m\x02?')
self.line_start = \
self.text_buffer.create_mark('line_start',
self.text_buffer.get_end_iter(), True
)
self.connect('key-press-event', self._onKeypress)
self.last_cursor_pos = 0
def write(self, text, editable=False):
segments = self.color_pat.split(text)
segment = segments.pop(0)
start_mark = self.text_buffer.create_mark(None,
self.text_buffer.get_end_iter(),
True)
self.text_buffer.insert(self.text_buffer.get_end_iter(), segment)
if segments:
ansi_tags = self.color_pat.findall(text)
for tag in ansi_tags:
i = segments.index(tag)
self.text_buffer.insert_with_tags_by_name(self.text_buffer.get_end_iter(),
segments[i+1], tag)
segments.pop(i)
if not editable:
self.text_buffer.apply_tag_by_name('notouch',
self.text_buffer.get_iter_at_mark(start_mark),
self.text_buffer.get_end_iter())
self.text_buffer.delete_mark(start_mark)
self.scroll_mark_onscreen(self.mark)
def showPrompt(self, prompt):
self.write(prompt)
self.text_buffer.move_mark(self.line_start,self.text_buffer.get_end_iter())
def changeLine(self, text):
iter = self.text_buffer.get_iter_at_mark(self.line_start)
iter.forward_to_line_end()
self.text_buffer.delete(self.text_buffer.get_iter_at_mark(self.line_start), iter)
self.write(text, True)
def getCurrentLine(self):
rv = self.text_buffer.get_slice(self.text_buffer.get_iter_at_mark(self.line_start),
self.text_buffer.get_end_iter(), False)
return rv
def showReturned(self, text):
iter = self.text_buffer.get_iter_at_mark(self.line_start)
iter.forward_to_line_end()
self.text_buffer.apply_tag_by_name('notouch',
self.text_buffer.get_iter_at_mark(self.line_start),
iter)
self.write('\n'+text)
if text:
self.write('\n')
self.showPrompt(self.prompt)
self.text_buffer.move_mark(self.line_start,self.text_buffer.get_end_iter())
self.text_buffer.place_cursor(self.text_buffer.get_end_iter())
def _onKeypress(self, obj, event):
if not event.string:
return
insert_mark = self.text_buffer.get_insert()
insert_iter = self.text_buffer.get_iter_at_mark(insert_mark)
selection_mark = self.text_buffer.get_selection_bound()
selection_iter = self.text_buffer.get_iter_at_mark(selection_mark)
start_iter = self.text_buffer.get_iter_at_mark(self.line_start)
if start_iter.compare(insert_iter) <= 0 and \
start_iter.compare(selection_iter) <= 0:
return
elif start_iter.compare(insert_iter) > 0 and \
start_iter.compare(selection_iter) > 0:
self.text_buffer.place_cursor(start_iter)
elif insert_iter.compare(selection_iter) < 0:
self.text_buffer.move_mark(insert_mark, start_iter)
elif insert_iter.compare(selection_iter) > 0:
self.text_buffer.move_mark(selection_mark, start_iter)
class IPythonView(ConsoleView, IterableIPShell):
def __init__(self):
ConsoleView.__init__(self)
self.cout = StringIO()
IterableIPShell.__init__(self, cout=self.cout,cerr=self.cout,
input_func=self.raw_input)
self.connect('key_press_event', self.keyPress)
self.execute()
self.cout.truncate(0)
self.showPrompt(self.prompt)
self.interrupt = False
def raw_input(self, prompt=''):
if self.interrupt:
self.interrupt = False
raise KeyboardInterrupt
return self.getCurrentLine()
def keyPress(self, widget, event):
if event.state & gtk.gdk.CONTROL_MASK and event.keyval == 99:
self.interrupt = True
self._processLine()
return True
elif event.keyval == gtk.keysyms.Return:
self._processLine()
return True
elif event.keyval == gtk.keysyms.Up:
self.changeLine(self.historyBack())
return True
elif event.keyval == gtk.keysyms.Down:
self.changeLine(self.historyForward())
return True
elif event.keyval == gtk.keysyms.Tab:
if not self.getCurrentLine().strip():
return False
completed, possibilities = self.complete(self.getCurrentLine())
if len(possibilities) > 1:
slice = self.getCurrentLine()
self.write('\n')
for symbol in possibilities:
self.write(symbol+'\n')
self.showPrompt(self.prompt)
self.changeLine(completed or slice)
return True
def _processLine(self):
self.history_pos = 0
self.execute()
rv = self.cout.getvalue()
if rv: rv = rv.strip('\n')
self.showReturned(rv)
self.cout.truncate(0)
| gpl-2.0 |
saneyuki/servo | python/servo/bootstrap_commands.py | 1 | 22544 | # Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
from __future__ import absolute_import, print_function, unicode_literals
import base64
import json
import os
import os.path as path
import platform
import re
import subprocess
import sys
import traceback
import six.moves.urllib as urllib
import glob
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
import servo.bootstrap as bootstrap
from servo.command_base import CommandBase, cd, check_call
from servo.util import delete, download_bytes, download_file, extract, check_hash
@CommandProvider
class MachCommands(CommandBase):
@Command('bootstrap',
description='Install required packages for building.',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Boostrap without confirmation')
def bootstrap(self, force=False):
# This entry point isn't actually invoked, ./mach bootstrap is directly
# called by mach (see mach_bootstrap.bootstrap_command_only) so that
# it can install dependencies without needing mach's dependencies
return bootstrap.bootstrap(self.context, force=force)
@Command('bootstrap-salt',
description='Install and set up the salt environment.',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Boostrap without confirmation')
def bootstrap_salt(self, force=False):
return bootstrap.bootstrap(self.context, force=force, specific="salt")
@Command('bootstrap-gstreamer',
description='Set up a local copy of the gstreamer libraries (linux only).',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Boostrap without confirmation')
def bootstrap_gstreamer(self, force=False):
return bootstrap.bootstrap(self.context, force=force, specific="gstreamer")
@Command('bootstrap-android',
description='Install the Android SDK and NDK.',
category='bootstrap')
@CommandArgument('--build',
action='store_true',
help='Install Android-specific dependencies for building')
@CommandArgument('--emulator-x86',
action='store_true',
help='Install Android x86 emulator and system image')
@CommandArgument('--accept-all-licences',
action='store_true',
help='For non-interactive use')
def bootstrap_android(self, build=False, emulator_x86=False, accept_all_licences=False):
if not (build or emulator_x86):
print("Must specify `--build` or `--emulator-x86` or both.")
ndk = "android-ndk-r15c-{system}-{arch}"
tools = "sdk-tools-{system}-4333796"
emulator_platform = "android-28"
emulator_image = "system-images;%s;google_apis;x86" % emulator_platform
known_sha1 = {
# https://dl.google.com/android/repository/repository2-1.xml
"sdk-tools-darwin-4333796.zip": "ed85ea7b59bc3483ce0af4c198523ba044e083ad",
"sdk-tools-linux-4333796.zip": "8c7c28554a32318461802c1291d76fccfafde054",
"sdk-tools-windows-4333796.zip": "aa298b5346ee0d63940d13609fe6bec621384510",
# https://developer.android.com/ndk/downloads/older_releases
"android-ndk-r15c-windows-x86.zip": "f2e47121feb73ec34ced5e947cbf1adc6b56246e",
"android-ndk-r15c-windows-x86_64.zip": "970bb2496de0eada74674bb1b06d79165f725696",
"android-ndk-r15c-darwin-x86_64.zip": "ea4b5d76475db84745aa8828000d009625fc1f98",
"android-ndk-r15c-linux-x86_64.zip": "0bf02d4e8b85fd770fd7b9b2cdec57f9441f27a2",
}
toolchains = path.join(self.context.topdir, "android-toolchains")
if not path.isdir(toolchains):
os.makedirs(toolchains)
def download(target_dir, name, flatten=False):
final = path.join(toolchains, target_dir)
if path.isdir(final):
return
base_url = "https://dl.google.com/android/repository/"
filename = name + ".zip"
url = base_url + filename
archive = path.join(toolchains, filename)
if not path.isfile(archive):
download_file(filename, url, archive)
check_hash(archive, known_sha1[filename], "sha1")
print("Extracting " + filename)
remove = True # Set to False to avoid repeated downloads while debugging this script
if flatten:
extracted = final + "_"
extract(archive, extracted, remove=remove)
contents = os.listdir(extracted)
assert len(contents) == 1
os.rename(path.join(extracted, contents[0]), final)
os.rmdir(extracted)
else:
extract(archive, final, remove=remove)
system = platform.system().lower()
machine = platform.machine().lower()
arch = {"i386": "x86"}.get(machine, machine)
if build:
download("ndk", ndk.format(system=system, arch=arch), flatten=True)
download("sdk", tools.format(system=system))
components = []
if emulator_x86:
components += [
"platform-tools",
"emulator",
"platforms;" + emulator_platform,
emulator_image,
]
if build:
components += [
"platform-tools",
"platforms;android-18",
]
sdkmanager = [path.join(toolchains, "sdk", "tools", "bin", "sdkmanager")] + components
if accept_all_licences:
yes = subprocess.Popen(["yes"], stdout=subprocess.PIPE)
process = subprocess.Popen(
sdkmanager, stdin=yes.stdout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
)
# Reduce progress bar spam by removing duplicate lines.
# Printing the same line again with \r is a no-op in a real terminal,
# but each line is shown individually in Taskcluster's log viewer.
previous_line = None
line = b""
while 1:
# Read one byte at a time because in Python:
# * readline() blocks until "\n", which doesn't come before the prompt
# * read() blocks until EOF, which doesn't come before the prompt
# * read(n) keeps reading until it gets n bytes or EOF,
# but we don't know reliably how many bytes to read until the prompt
byte = process.stdout.read(1)
if len(byte) == 0:
print(line)
break
line += byte
if byte == b'\n' or byte == b'\r':
if line != previous_line:
print(line.decode("utf-8", "replace"), end="")
sys.stdout.flush()
previous_line = line
line = b""
exit_code = process.wait()
yes.terminate()
if exit_code:
return exit_code
else:
subprocess.check_call(sdkmanager)
if emulator_x86:
avd_path = path.join(toolchains, "avd", "servo-x86")
process = subprocess.Popen(stdin=subprocess.PIPE, stdout=subprocess.PIPE, args=[
path.join(toolchains, "sdk", "tools", "bin", "avdmanager"),
"create", "avd",
"--path", avd_path,
"--name", "servo-x86",
"--package", emulator_image,
"--force",
])
output = b""
while 1:
# Read one byte at a time, see comment above.
byte = process.stdout.read(1)
if len(byte) == 0:
break
output += byte
# There seems to be no way to disable this prompt:
if output.endswith(b"Do you wish to create a custom hardware profile? [no]"):
process.stdin.write("no\n")
assert process.wait() == 0
with open(path.join(avd_path, "config.ini"), "a") as f:
f.write("disk.dataPartition.size=2G\n")
@Command('update-hsts-preload',
description='Download the HSTS preload list',
category='bootstrap')
def bootstrap_hsts_preload(self, force=False):
preload_filename = "hsts_preload.json"
preload_path = path.join(self.context.topdir, "resources")
chromium_hsts_url = "https://chromium.googlesource.com/chromium/src" + \
"/net/+/master/http/transport_security_state_static.json?format=TEXT"
try:
content_base64 = download_bytes("Chromium HSTS preload list", chromium_hsts_url)
except urllib.error.URLError:
print("Unable to download chromium HSTS preload list; are you connected to the internet?")
sys.exit(1)
content_decoded = base64.b64decode(content_base64)
# The chromium "json" has single line comments in it which, of course,
# are non-standard/non-valid json. Simply strip them out before parsing
content_json = re.sub(r'(^|\s+)//.*$', '', content_decoded, flags=re.MULTILINE)
try:
pins_and_static_preloads = json.loads(content_json)
entries = {
"entries": [
{
"host": e["name"],
"include_subdomains": e.get("include_subdomains", False)
}
for e in pins_and_static_preloads["entries"]
]
}
with open(path.join(preload_path, preload_filename), 'w') as fd:
json.dump(entries, fd, indent=4)
except ValueError as e:
print("Unable to parse chromium HSTS preload list, has the format changed?")
sys.exit(1)
@Command('update-pub-domains',
description='Download the public domains list and update resources/public_domains.txt',
category='bootstrap')
def bootstrap_pub_suffix(self, force=False):
list_url = "https://publicsuffix.org/list/public_suffix_list.dat"
dst_filename = path.join(self.context.topdir, "resources", "public_domains.txt")
not_implemented_case = re.compile(r'^[^*]+\*')
try:
content = download_bytes("Public suffix list", list_url)
except urllib.error.URLError:
print("Unable to download the public suffix list; are you connected to the internet?")
sys.exit(1)
lines = [l.strip() for l in content.decode("utf8").split("\n")]
suffixes = [l for l in lines if not l.startswith("//") and not l == ""]
with open(dst_filename, "wb") as fo:
for suffix in suffixes:
if not_implemented_case.match(suffix):
print("Warning: the new list contains a case that servo can't handle: %s" % suffix)
fo.write(suffix.encode("idna") + "\n")
@Command('clean-nightlies',
description='Clean unused nightly builds of Rust and Cargo',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Actually remove stuff')
@CommandArgument('--keep',
default='1',
help='Keep up to this many most recent nightlies')
def clean_nightlies(self, force=False, keep=None):
default_toolchain = self.default_toolchain()
print("Current Rust version for Servo: {}".format(default_toolchain))
old_toolchains = []
keep = int(keep)
stdout = subprocess.check_output(['git', 'log', '--format=%H', 'rust-toolchain'])
for i, commit_hash in enumerate(stdout.split(), 1):
if i > keep:
toolchain = subprocess.check_output(
['git', 'show', '%s:rust-toolchain' % commit_hash])
old_toolchains.append(toolchain.strip())
removing_anything = False
stdout = subprocess.check_output(['rustup', 'toolchain', 'list'])
for toolchain_with_host in stdout.split():
for old in old_toolchains:
if toolchain_with_host.startswith(old):
removing_anything = True
if force:
print("Removing {}".format(toolchain_with_host))
check_call(["rustup", "uninstall", toolchain_with_host])
else:
print("Would remove {}".format(toolchain_with_host))
if not removing_anything:
print("Nothing to remove.")
elif not force:
print("Nothing done. "
"Run `./mach clean-nightlies -f` to actually remove.")
@Command('clean-cargo-cache',
description='Clean unused Cargo packages',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Actually remove stuff')
@CommandArgument('--show-size', '-s',
action='store_true',
help='Show packages size')
@CommandArgument('--keep',
default='1',
help='Keep up to this many most recent dependencies')
def clean_cargo_cache(self, force=False, show_size=False, keep=None):
def get_size(path):
if os.path.isfile(path):
return os.path.getsize(path) / (1024 * 1024.0)
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size / (1024 * 1024.0)
removing_anything = False
packages = {
'crates': {},
'git': {},
}
import toml
if os.environ.get("CARGO_HOME", ""):
cargo_dir = os.environ.get("CARGO_HOME")
else:
home_dir = os.path.expanduser("~")
cargo_dir = path.join(home_dir, ".cargo")
if not os.path.isdir(cargo_dir):
return
cargo_file = open(path.join(self.context.topdir, "Cargo.lock"))
content = toml.load(cargo_file)
for package in content.get("package", []):
source = package.get("source", "")
version = package["version"]
if source == u"registry+https://github.com/rust-lang/crates.io-index":
crate_name = "{}-{}".format(package["name"], version)
if not packages["crates"].get(crate_name, False):
packages["crates"][package["name"]] = {
"current": [],
"exist": [],
}
packages["crates"][package["name"]]["current"].append(crate_name)
elif source.startswith("git+"):
name = source.split("#")[0].split("/")[-1].replace(".git", "")
branch = ""
crate_name = "{}-{}".format(package["name"], source.split("#")[1])
crate_branch = name.split("?")
if len(crate_branch) > 1:
branch = crate_branch[1].replace("branch=", "")
name = crate_branch[0]
if not packages["git"].get(name, False):
packages["git"][name] = {
"current": [],
"exist": [],
}
packages["git"][name]["current"].append(source.split("#")[1][:7])
if branch:
packages["git"][name]["current"].append(branch)
crates_dir = path.join(cargo_dir, "registry")
crates_cache_dir = ""
crates_src_dir = ""
if os.path.isdir(path.join(crates_dir, "cache")):
for p in os.listdir(path.join(crates_dir, "cache")):
crates_cache_dir = path.join(crates_dir, "cache", p)
crates_src_dir = path.join(crates_dir, "src", p)
git_dir = path.join(cargo_dir, "git")
git_db_dir = path.join(git_dir, "db")
git_checkout_dir = path.join(git_dir, "checkouts")
if os.path.isdir(git_db_dir):
git_db_list = filter(lambda f: not f.startswith('.'), os.listdir(git_db_dir))
else:
git_db_list = []
if os.path.isdir(git_checkout_dir):
git_checkout_list = os.listdir(git_checkout_dir)
else:
git_checkout_list = []
for d in list(set(git_db_list + git_checkout_list)):
crate_name = d.replace("-{}".format(d.split("-")[-1]), "")
if not packages["git"].get(crate_name, False):
packages["git"][crate_name] = {
"current": [],
"exist": [],
}
if os.path.isdir(path.join(git_checkout_dir, d)):
with cd(path.join(git_checkout_dir, d)):
git_crate_hash = glob.glob('*')
if not git_crate_hash or not os.path.isdir(path.join(git_db_dir, d)):
packages["git"][crate_name]["exist"].append(("del", d, ""))
continue
for d2 in git_crate_hash:
dep_path = path.join(git_checkout_dir, d, d2)
if os.path.isdir(dep_path):
packages["git"][crate_name]["exist"].append((path.getmtime(dep_path), d, d2))
elif os.path.isdir(path.join(git_db_dir, d)):
packages["git"][crate_name]["exist"].append(("del", d, ""))
if crates_src_dir:
for d in os.listdir(crates_src_dir):
crate_name = re.sub(r"\-\d+(\.\d+){1,3}.+", "", d)
if not packages["crates"].get(crate_name, False):
packages["crates"][crate_name] = {
"current": [],
"exist": [],
}
packages["crates"][crate_name]["exist"].append(d)
total_size = 0
for packages_type in ["git", "crates"]:
sorted_packages = sorted(packages[packages_type])
for crate_name in sorted_packages:
crate_count = 0
existed_crates = packages[packages_type][crate_name]["exist"]
for exist in sorted(existed_crates, reverse=True):
current_crate = packages[packages_type][crate_name]["current"]
size = 0
exist_name = path.join(exist[1], exist[2]) if packages_type == "git" else exist
exist_item = exist[2] if packages_type == "git" else exist
if exist_item not in current_crate:
crate_count += 1
if int(crate_count) >= int(keep) or not current_crate or \
exist[0] == "del" or exist[2] == "master":
removing_anything = True
crate_paths = []
if packages_type == "git":
exist_checkout_path = path.join(git_checkout_dir, exist[1])
exist_db_path = path.join(git_db_dir, exist[1])
exist_path = path.join(git_checkout_dir, exist_name)
if exist[0] == "del":
if os.path.isdir(exist_checkout_path):
crate_paths.append(exist_checkout_path)
if os.path.isdir(exist_db_path):
crate_paths.append(exist_db_path)
crate_count += -1
else:
crate_paths.append(exist_path)
exist_checkout_list = glob.glob(path.join(exist_checkout_path, '*'))
if len(exist_checkout_list) <= 1:
crate_paths.append(exist_checkout_path)
if os.path.isdir(exist_db_path):
crate_paths.append(exist_db_path)
else:
crate_paths.append(path.join(crates_cache_dir, "{}.crate".format(exist)))
crate_paths.append(path.join(crates_src_dir, exist))
size = sum(get_size(p) for p in crate_paths) if show_size else 0
total_size += size
print_msg = (exist_name, " ({}MB)".format(round(size, 2)) if show_size else "", cargo_dir)
if force:
print("Removing `{}`{} package from {}".format(*print_msg))
for crate_path in crate_paths:
if os.path.exists(crate_path):
try:
delete(crate_path)
except:
print(traceback.format_exc())
print("Delete %s failed!" % crate_path)
else:
print("Would remove `{}`{} package from {}".format(*print_msg))
if removing_anything and show_size:
print("\nTotal size of {} MB".format(round(total_size, 2)))
if not removing_anything:
print("Nothing to remove.")
elif not force:
print("\nNothing done. "
"Run `./mach clean-cargo-cache -f` to actually remove.")
| mpl-2.0 |
followloda/PornGuys | FlaskServer/venv/Lib/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py | 356 | 1555 | from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName, Attrs
from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
def to_genshi(walker):
text = []
for token in walker:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
text.append(token["data"])
elif text:
yield TEXT, "".join(text), (None, -1, -1)
text = []
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],
token["systemId"]), (None, -1, -1)
else:
pass # FIXME: What to do?
if text:
yield TEXT, "".join(text), (None, -1, -1)
| gpl-3.0 |
jesseengel/magenta | magenta/models/piano_genie/util.py | 2 | 2633 | # Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for Piano Genie."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def demidify(pitches):
"""Transforms MIDI pitches [21,108] to [0, 88)."""
assertions = [
tf.assert_greater_equal(pitches, 21),
tf.assert_less_equal(pitches, 108)
]
with tf.control_dependencies(assertions):
return pitches - 21
def remidify(pitches):
"""Transforms [0, 88) to MIDI pitches [21, 108]."""
assertions = [
tf.assert_greater_equal(pitches, 0),
tf.assert_less_equal(pitches, 87)
]
with tf.control_dependencies(assertions):
return pitches + 21
def discrete_to_piano_roll(categorical, dim, dilation=1, colorize=True):
"""Visualizes discrete sequences as a colorful piano roll."""
# Create piano roll
if categorical.dtype == tf.int32:
piano_roll = tf.one_hot(categorical, dim)
elif categorical.dtype == tf.float32:
assert int(categorical.get_shape()[-1]) == dim
piano_roll = categorical
else:
raise NotImplementedError()
piano_roll = tf.stack([piano_roll] * 3, axis=3)
# Colorize
if colorize:
# Create color palette
hues = np.linspace(0., 1., num=dim, endpoint=False)
colors_hsv = np.ones([dim, 3], dtype=np.float32)
colors_hsv[:, 0] = hues
colors_hsv[:, 1] = 0.85
colors_hsv[:, 2] = 0.85
colors_rgb = tf.image.hsv_to_rgb(colors_hsv) * 255.
colors_rgb = tf.reshape(colors_rgb, [1, 1, dim, 3])
piano_roll = tf.multiply(piano_roll, colors_rgb)
else:
piano_roll *= 255.
# Rotate and flip for visual ease
piano_roll = tf.image.rot90(piano_roll)
# Increase vertical dilation for visual ease
if dilation > 1:
old_height = tf.shape(piano_roll)[1]
old_width = tf.shape(piano_roll)[2]
piano_roll = tf.image.resize_nearest_neighbor(
piano_roll, [old_height * dilation, old_width])
# Cast to tf.uint8
piano_roll = tf.cast(tf.clip_by_value(piano_roll, 0., 255.), tf.uint8)
return piano_roll
| apache-2.0 |
gusmaogabriels/GPy | GPy/kern/_src/todo/ODE_1.py | 19 | 6388 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from kernpart import Kernpart
import numpy as np
class ODE_1(Kernpart):
"""
kernel resultiong from a first order ODE with OU driving GP
:param input_dim: the number of input dimension, has to be equal to one
:type input_dim: int
:param varianceU: variance of the driving GP
:type varianceU: float
:param lengthscaleU: lengthscale of the driving GP (sqrt(3)/lengthscaleU)
:type lengthscaleU: float
:param varianceY: 'variance' of the transfer function
:type varianceY: float
:param lengthscaleY: 'lengthscale' of the transfer function (1/lengthscaleY)
:type lengthscaleY: float
:rtype: kernel object
"""
def __init__(self, input_dim=1, varianceU=1., varianceY=1., lengthscaleU=None, lengthscaleY=None):
assert input_dim==1, "Only defined for input_dim = 1"
self.input_dim = input_dim
self.num_params = 4
self.name = 'ODE_1'
if lengthscaleU is not None:
lengthscaleU = np.asarray(lengthscaleU)
assert lengthscaleU.size == 1, "lengthscaleU should be one dimensional"
else:
lengthscaleU = np.ones(1)
if lengthscaleY is not None:
lengthscaleY = np.asarray(lengthscaleY)
assert lengthscaleY.size == 1, "lengthscaleY should be one dimensional"
else:
lengthscaleY = np.ones(1)
#lengthscaleY = 0.5
self._set_params(np.hstack((varianceU, varianceY, lengthscaleU,lengthscaleY)))
def _get_params(self):
"""return the value of the parameters."""
return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))
def _set_params(self, x):
"""set the value of the parameters."""
assert x.size == self.num_params
self.varianceU = x[0]
self.varianceY = x[1]
self.lengthscaleU = x[2]
self.lengthscaleY = x[3]
def _get_param_names(self):
"""return parameter names."""
return ['varianceU','varianceY', 'lengthscaleU', 'lengthscaleY']
def K(self, X, X2, target):
"""Compute the covariance matrix between X and X2."""
if X2 is None: X2 = X
# i1 = X[:,1]
# i2 = X2[:,1]
# X = X[:,0].reshape(-1,1)
# X2 = X2[:,0].reshape(-1,1)
dist = np.abs(X - X2.T)
ly=1/self.lengthscaleY
lu=np.sqrt(3)/self.lengthscaleU
#ly=self.lengthscaleY
#lu=self.lengthscaleU
k1 = np.exp(-ly*dist)*(2*lu+ly)/(lu+ly)**2
k2 = (np.exp(-lu*dist)*(ly-2*lu+lu*ly*dist-lu**2*dist) + np.exp(-ly*dist)*(2*lu-ly) ) / (ly-lu)**2
k3 = np.exp(-lu*dist) * ( (1+lu*dist)/(lu+ly) + (lu)/(lu+ly)**2 )
np.add(self.varianceU*self.varianceY*(k1+k2+k3), target, target)
def Kdiag(self, X, target):
"""Compute the diagonal of the covariance matrix associated to X."""
ly=1/self.lengthscaleY
lu=np.sqrt(3)/self.lengthscaleU
#ly=self.lengthscaleY
#lu=self.lengthscaleU
k1 = (2*lu+ly)/(lu+ly)**2
k2 = (ly-2*lu + 2*lu-ly ) / (ly-lu)**2
k3 = 1/(lu+ly) + (lu)/(lu+ly)**2
np.add(self.varianceU*self.varianceY*(k1+k2+k3), target, target)
def _param_grad_helper(self, dL_dK, X, X2, target):
"""derivative of the covariance matrix with respect to the parameters."""
if X2 is None: X2 = X
dist = np.abs(X - X2.T)
ly=1/self.lengthscaleY
lu=np.sqrt(3)/self.lengthscaleU
#ly=self.lengthscaleY
#lu=self.lengthscaleU
dk1theta1 = np.exp(-ly*dist)*2*(-lu)/(lu+ly)**3
#c=np.sqrt(3)
#t1=c/lu
#t2=1/ly
#dk1theta1=np.exp(-dist*ly)*t2*( (2*c*t2+2*t1)/(c*t2+t1)**2 -2*(2*c*t2*t1+t1**2)/(c*t2+t1)**3 )
dk2theta1 = 1*(
np.exp(-lu*dist)*dist*(-ly+2*lu-lu*ly*dist+dist*lu**2)*(ly-lu)**(-2) + np.exp(-lu*dist)*(-2+ly*dist-2*dist*lu)*(ly-lu)**(-2)
+np.exp(-dist*lu)*(ly-2*lu+ly*lu*dist-dist*lu**2)*2*(ly-lu)**(-3)
+np.exp(-dist*ly)*2*(ly-lu)**(-2)
+np.exp(-dist*ly)*2*(2*lu-ly)*(ly-lu)**(-3)
)
dk3theta1 = np.exp(-dist*lu)*(lu+ly)**(-2)*((2*lu+ly+dist*lu**2+lu*ly*dist)*(-dist-2/(lu+ly))+2+2*lu*dist+ly*dist)
dktheta1 = self.varianceU*self.varianceY*(dk1theta1+dk2theta1+dk3theta1)
dk1theta2 = np.exp(-ly*dist) * ((lu+ly)**(-2)) * ( (-dist)*(2*lu+ly) + 1 + (-2)*(2*lu+ly)/(lu+ly) )
dk2theta2 = 1*(
np.exp(-dist*lu)*(ly-lu)**(-2) * ( 1+lu*dist+(-2)*(ly-2*lu+lu*ly*dist-dist*lu**2)*(ly-lu)**(-1) )
+np.exp(-dist*ly)*(ly-lu)**(-2) * ( (-dist)*(2*lu-ly) -1+(2*lu-ly)*(-2)*(ly-lu)**(-1) )
)
dk3theta2 = np.exp(-dist*lu) * (-3*lu-ly-dist*lu**2-lu*ly*dist)/(lu+ly)**3
dktheta2 = self.varianceU*self.varianceY*(dk1theta2 + dk2theta2 +dk3theta2)
k1 = np.exp(-ly*dist)*(2*lu+ly)/(lu+ly)**2
k2 = (np.exp(-lu*dist)*(ly-2*lu+lu*ly*dist-lu**2*dist) + np.exp(-ly*dist)*(2*lu-ly) ) / (ly-lu)**2
k3 = np.exp(-lu*dist) * ( (1+lu*dist)/(lu+ly) + (lu)/(lu+ly)**2 )
dkdvar = k1+k2+k3
target[0] += np.sum(self.varianceY*dkdvar * dL_dK)
target[1] += np.sum(self.varianceU*dkdvar * dL_dK)
target[2] += np.sum(dktheta1*(-np.sqrt(3)*self.lengthscaleU**(-2)) * dL_dK)
target[3] += np.sum(dktheta2*(-self.lengthscaleY**(-2)) * dL_dK)
# def dKdiag_dtheta(self, dL_dKdiag, X, target):
# """derivative of the diagonal of the covariance matrix with respect to the parameters."""
# # NB: derivative of diagonal elements wrt lengthscale is 0
# target[0] += np.sum(dL_dKdiag)
# def dK_dX(self, dL_dK, X, X2, target):
# """derivative of the covariance matrix with respect to X."""
# if X2 is None: X2 = X
# dist = np.sqrt(np.sum(np.square((X[:, None, :] - X2[None, :, :]) / self.lengthscale), -1))[:, :, None]
# ddist_dX = (X[:, None, :] - X2[None, :, :]) / self.lengthscale ** 2 / np.where(dist != 0., dist, np.inf)
# dK_dX = -np.transpose(self.variance * np.exp(-dist) * ddist_dX, (1, 0, 2))
# target += np.sum(dK_dX * dL_dK.T[:, :, None], 0)
# def dKdiag_dX(self, dL_dKdiag, X, target):
# pass
| bsd-3-clause |
fancasy/final | lib/flask/wrappers.py | 4 | 6893 | # -*- coding: utf-8 -*-
"""
flask.wrappers
~~~~~~~~~~~~~~
Implements the WSGI wrappers (request and response).
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase
from werkzeug.exceptions import BadRequest
from .debughelpers import attach_enctype_error_multidict
from . import json
from .globals import _request_ctx_stack
_missing = object()
def _get_data(req, cache):
getter = getattr(req, 'get_data', None)
if getter is not None:
return getter(cache=cache)
return req.data
class Request(RequestBase):
"""The request object used by default in Flask. Remembers the
matched endpoint and view arguments.
It is what ends up as :class:`~flask.request`. If you want to replace
the request object used you can subclass this and set
:attr:`~flask.Flask.request_class` to your subclass.
The request object is a :class:`~werkzeug.wrappers.Request` subclass and
provides all of the attributes Werkzeug defines plus a few Flask
specific ones.
"""
#: the internal URL rule that matched the request. This can be
#: useful to inspect which methods are allowed for the URL from
#: a before/after handler (``request.url_rule.methods``) etc.
#:
#: .. versionadded:: 0.6
url_rule = None
#: a dict of view arguments that matched the request. If an exception
#: happened when matching, this will be `None`.
view_args = None
#: if matching the URL failed, this is the exception that will be
#: raised / was raised as part of the request handling. This is
#: usually a :exc:`~werkzeug.exceptions.NotFound` exception or
#: something similar.
routing_exception = None
# switched by the request context until 1.0 to opt in deprecated
# module functionality
_is_old_module = False
@property
def max_content_length(self):
"""Read-only view of the `MAX_CONTENT_LENGTH` config key."""
ctx = _request_ctx_stack.top
if ctx is not None:
return ctx.app.config['MAX_CONTENT_LENGTH']
@property
def endpoint(self):
"""The endpoint that matched the request. This in combination with
:attr:`view_args` can be used to reconstruct the same or a
modified URL. If an exception happened when matching, this will
be `None`.
"""
if self.url_rule is not None:
return self.url_rule.endpoint
@property
def module(self):
"""The name of the current module if the request was dispatched
to an actual module. This is deprecated functionality, use blueprints
instead.
"""
from warnings import warn
warn(DeprecationWarning('modules were deprecated in favor of '
'blueprints. Use request.blueprint '
'instead.'), stacklevel=2)
if self._is_old_module:
return self.blueprint
@property
def blueprint(self):
"""The name of the current blueprint"""
if self.url_rule and '.' in self.url_rule.endpoint:
return self.url_rule.endpoint.rsplit('.', 1)[0]
@property
def json(self):
"""If the mimetype is `application/json` this will contain the
parsed JSON data. Otherwise this will be `None`.
The :meth:`get_json` method should be used instead.
"""
# XXX: deprecate property
return self.get_json()
def get_json(self, force=False, silent=False, cache=True):
"""Parses the incoming JSON request data and returns it. If
parsing fails the :meth:`on_json_loading_failed` method on the
request object will be invoked. By default this function will
only load the json data if the mimetype is ``application/json``
but this can be overriden by the `force` parameter.
:param force: if set to `True` the mimetype is ignored.
:param silent: if set to `False` this method will fail silently
and return `False`.
:param cache: if set to `True` the parsed JSON data is remembered
on the request.
"""
rv = getattr(self, '_cached_json', _missing)
if rv is not _missing:
return rv
if self.mimetype != 'application/json' and not force:
return None
# We accept a request charset against the specification as
# certain clients have been using this in the past. This
# fits our general approach of being nice in what we accept
# and strict in what we send out.
request_charset = self.mimetype_params.get('charset')
try:
data = _get_data(self, cache)
if request_charset is not None:
rv = json.loads(data, encoding=request_charset)
else:
rv = json.loads(data)
except ValueError as e:
if silent:
rv = None
else:
rv = self.on_json_loading_failed(e)
if cache:
self._cached_json = rv
return rv
def on_json_loading_failed(self, e):
"""Called if decoding of the JSON data failed. The return value of
this method is used by :meth:`get_json` when an error occurred. The
default implementation just raises a :class:`BadRequest` exception.
.. versionchanged:: 0.10
Removed buggy previous behavior of generating a random JSON
response. If you want that behavior back you can trivially
add it by subclassing.
.. versionadded:: 0.8
"""
raise BadRequest()
def _load_form_data(self):
RequestBase._load_form_data(self)
# in debug mode we're replacing the files multidict with an ad-hoc
# subclass that raises a different error for key errors.
ctx = _request_ctx_stack.top
if ctx is not None and ctx.app.debug and \
self.mimetype != 'multipart/form-data' and not self.files:
attach_enctype_error_multidict(self)
class Response(ResponseBase):
"""The response object that is used by default in Flask. Works like the
response object from Werkzeug but is set to have an HTML mimetype by
default. Quite often you don't have to create this object yourself because
:meth:`~flask.Flask.make_response` will take care of that for you.
If you want to replace the response object used you can subclass this and
set :attr:`~flask.Flask.response_class` to your subclass.
"""
default_mimetype = 'text/html'
| apache-2.0 |
Just-D/chromium-1 | tools/site_compare/drivers/win32/keyboard.py | 173 | 6934 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""SiteCompare module for simulating keyboard input.
This module contains functions that can be used to simulate a user
pressing keys on a keyboard. Support is provided for formatted strings
including special characters to represent modifier keys like CTRL and ALT
"""
import time # for sleep
import win32api # for keybd_event and VkKeyCode
import win32con # Windows constants
# TODO(jhaas): Ask the readability guys if this would be acceptable:
#
# from win32con import VK_SHIFT, VK_CONTROL, VK_MENU, VK_LWIN, KEYEVENTF_KEYUP
#
# This is a violation of the style guide but having win32con. everywhere
# is just plain ugly, and win32con is a huge import for just a handful of
# constants
def PressKey(down, key):
"""Presses or unpresses a key.
Uses keybd_event to simulate either depressing or releasing
a key
Args:
down: Whether the key is to be pressed or released
key: Virtual key code of key to press or release
"""
# keybd_event injects key events at a very low level (it's the
# Windows API keyboard device drivers call) so this is a very
# reliable way of simulating user input
win32api.keybd_event(key, 0, (not down) * win32con.KEYEVENTF_KEYUP)
def TypeKey(key, keystroke_time=0):
"""Simulate a keypress of a virtual key.
Args:
key: which key to press
keystroke_time: length of time (in seconds) to "hold down" the key
Note that zero works just fine
Returns:
None
"""
# This just wraps a pair of PressKey calls with an intervening delay
PressKey(True, key)
time.sleep(keystroke_time)
PressKey(False, key)
def TypeString(string_to_type,
use_modifiers=False,
keystroke_time=0,
time_between_keystrokes=0):
"""Simulate typing a string on the keyboard.
Args:
string_to_type: the string to print
use_modifiers: specifies whether the following modifier characters
should be active:
{abc}: type characters with ALT held down
[abc]: type characters with CTRL held down
\ escapes {}[] and treats these values as literal
standard escape sequences are valid even if use_modifiers is false
\p is "pause" for one second, useful when driving menus
\1-\9 is F-key, \0 is F10
TODO(jhaas): support for explicit control of SHIFT, support for
nonprintable keys (F-keys, ESC, arrow keys, etc),
support for explicit control of left vs. right ALT or SHIFT,
support for Windows key
keystroke_time: length of time (in secondes) to "hold down" the key
time_between_keystrokes: length of time (seconds) to pause between keys
Returns:
None
"""
shift_held = win32api.GetAsyncKeyState(win32con.VK_SHIFT ) < 0
ctrl_held = win32api.GetAsyncKeyState(win32con.VK_CONTROL) < 0
alt_held = win32api.GetAsyncKeyState(win32con.VK_MENU ) < 0
next_escaped = False
escape_chars = {
'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v'}
for char in string_to_type:
vk = None
handled = False
# Check to see if this is the start or end of a modified block (that is,
# {abc} for ALT-modified keys or [abc] for CTRL-modified keys
if use_modifiers and not next_escaped:
handled = True
if char == "{" and not alt_held:
alt_held = True
PressKey(True, win32con.VK_MENU)
elif char == "}" and alt_held:
alt_held = False
PressKey(False, win32con.VK_MENU)
elif char == "[" and not ctrl_held:
ctrl_held = True
PressKey(True, win32con.VK_CONTROL)
elif char == "]" and ctrl_held:
ctrl_held = False
PressKey(False, win32con.VK_CONTROL)
else:
handled = False
# If this is an explicitly-escaped character, replace it with the
# appropriate code
if next_escaped and char in escape_chars: char = escape_chars[char]
# If this is \p, pause for one second.
if next_escaped and char == 'p':
time.sleep(1)
next_escaped = False
handled = True
# If this is \(d), press F key
if next_escaped and char.isdigit():
fkey = int(char)
if not fkey: fkey = 10
next_escaped = False
vk = win32con.VK_F1 + fkey - 1
# If this is the backslash, the next character is escaped
if not next_escaped and char == "\\":
next_escaped = True
handled = True
# If we make it here, it's not a special character, or it's an
# escaped special character which should be treated as a literal
if not handled:
next_escaped = False
if not vk: vk = win32api.VkKeyScan(char)
# VkKeyScan() returns the scan code in the low byte. The upper
# byte specifies modifiers necessary to produce the given character
# from the given scan code. The only one we're concerned with at the
# moment is Shift. Determine the shift state and compare it to the
# current state... if it differs, press or release the shift key.
new_shift_held = bool(vk & (1<<8))
if new_shift_held != shift_held:
PressKey(new_shift_held, win32con.VK_SHIFT)
shift_held = new_shift_held
# Type the key with the specified length, then wait the specified delay
TypeKey(vk & 0xFF, keystroke_time)
time.sleep(time_between_keystrokes)
# Release the modifier keys, if held
if shift_held: PressKey(False, win32con.VK_SHIFT)
if ctrl_held: PressKey(False, win32con.VK_CONTROL)
if alt_held: PressKey(False, win32con.VK_MENU)
def main():
# We're being invoked rather than imported. Let's do some tests
# Press command-R to bring up the Run dialog
PressKey(True, win32con.VK_LWIN)
TypeKey(ord('R'))
PressKey(False, win32con.VK_LWIN)
# Wait a sec to make sure it comes up
time.sleep(1)
# Invoke Notepad through the Run dialog
TypeString("wordpad\n")
# Wait another sec, then start typing
time.sleep(1)
TypeString("This is a test of SiteCompare's Keyboard.py module.\n\n")
TypeString("There should be a blank line above and below this one.\n\n")
TypeString("This line has control characters to make "
"[b]boldface text[b] and [i]italic text[i] and normal text.\n\n",
use_modifiers=True)
TypeString(r"This line should be typed with a visible delay between "
"characters. When it ends, there should be a 3-second pause, "
"then the menu will select File/Exit, then another 3-second "
"pause, then No to exit without saving. Ready?\p\p\p{f}x\p\p\pn",
use_modifiers=True,
keystroke_time=0.05,
time_between_keystrokes=0.05)
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
cabanm/project-euler | Problem 23/problem23.py | 1 | 1277 | # Find the sum of all the positive integers which
# cannot be written as the sum of two abundant numbers.
#
# Facts:
# All integers greater than 28123 can be
# written as the sum of two abundant numbers.
# Abundant number = sum of proper divisors of n exceed n.
#
# Find all abundant numbers up to and including 28123
# Add all combinations of these and store if not greater then 28123
# Add all integers <= 28123 not in the list to get required sum
from myMath import *
abundants = list()
for n in range(1, 28123 + 1):
if sum(int(n).properDivisors()) > n:
abundants.append(n)
print('stage 1 complete --', 'number of abundants = ', len(abundants))
sums = list()
for i, n in enumerate(abundants):
for m in abundants[i:]:
if n+m <= 28123:
sums.append(n+m)
sums = sorted(set(sums))
print('stage 2 complete --', 'number of sums of abundants = ', len(sums))
sumsIndeces = [0]*(28123 + 1)
for i, n in enumerate(sums):
sumsIndeces.pop(n)
sumsIndeces.insert(n,1) # places a one at every index that is sum of abundants
if i%1000 == 0:
print(i)
print('stage 3 complete')
total = 0
for n in range(len(sumsIndeces)):
if sumsIndeces[n] == 0:
total += n
print('sum = ', total)
| gpl-2.0 |
SteveHNH/ansible | lib/ansible/modules/network/aci/aci_epg_to_domain.py | 18 | 7721 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_epg_to_domain
short_description: Bind EPGs to Domains on Cisco ACI fabrics (fv:RsDomAtt)
description:
- Bind EPGs to Physical and Virtual Domains on Cisco ACI fabrics.
- More information from the internal APIC class
I(fv:RsDomAtt) at U(https://developer.cisco.com/media/mim-ref/MO-fvRsDomAtt.html).
author:
- Swetha Chunduri (@schunduri)
- Dag Wieers (@dagwieers)
- Jacob Mcgill (@jmcgill298)
version_added: '2.4'
requirements:
- ACI Fabric 1.0(3f)+
notes:
- The C(tenant), C(ap), C(epg), and C(domain) used must exist before using this module in your playbook.
The M(aci_tenant) M(aci_ap), M(aci_epg) M(aci_domain) modules can be used for this.
options:
allow_useg:
description:
- Allows micro-segmentation.
- The APIC defaults new EPG to Domain bindings to use C(encap).
choices: [ encap, useg ]
default: encap
ap:
description:
- Name of an existing application network profile, that will contain the EPGs.
aliases: [ app_profile, app_profile_name ]
deploy_immediacy:
description:
- Determines when the policy is pushed to hardware Policy CAM.
- The APIC defaults new EPG to Domain bindings to C(lazy).
choices: [ immediate, lazy ]
default: lazy
domain:
description:
- Name of the physical or virtual domain being associated with the EPG.
aliases: [ domain_name, domain_profile ]
domain_type:
description:
- Determines if the Domain is physical (phys) or virtual (vmm).
choices: [ phys, vmm ]
aliases: [ type ]
encap:
description:
- The VLAN encapsulation for the EPG when binding a VMM Domain with static encap_mode.
- This acts as the secondary encap when using useg.
choices: [ range from 1 to 4096 ]
encap_mode:
description:
- The ecapsulataion method to be used.
- The APIC defaults new EPG to Domain bindings to C(auto).
choices: [ auto, vlan, vxlan ]
default: auto
epg:
description:
- Name of the end point group.
aliases: [ epg_name ]
netflow:
description:
- Determines if netflow should be enabled.
- The APIC defaults new EPG to Domain binings to C(disabled).
choices: [ disabled, enabled ]
default: disabled
primary_encap:
description:
- Determines the primary VLAN ID when using useg.
choices: [ range from 1 to 4096 ]
resolution_immediacy:
description:
- Determines when the policies should be resolved and available.
- The APIC defaults new EPG to Domain bindings to C(lazy).
choices: [ immediate, lazy, pre-provision ]
default: lazy
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- Name of an existing tenant.
aliases: [ tenant_name ]
vm_provider:
description:
- The VM platform for VMM Domains.
choices: [ microsoft, openstack, vmware ]
extends_documentation_fragment: aci
'''
EXAMPLES = r''' # '''
RETURN = r''' # '''
from ansible.module_utils.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
VM_PROVIDER_MAPPING = dict(microsoft="uni/vmmp-Microsoft/dom-", openstack="uni/vmmp-OpenStack/dom-", vmware="uni/vmmp-VMware/dom-")
def main():
argument_spec = aci_argument_spec
argument_spec.update(
allow_useg=dict(type='str', choices=['encap', 'useg']),
ap=dict(type='str', aliases=['app_profile', 'app_profile_name']),
deploy_immediacy=dict(type='str', choices=['immediate', 'on-demand']),
domain=dict(type='str', aliases=['domain_name', 'domain_profile']),
domain_type=dict(type='str', choices=['phys', 'vmm'], aliases=['type']),
encap=dict(type='int'),
encap_mode=dict(type='str', choices=['auto', 'vlan', 'vxlan']),
epg=dict(type='str', aliases=['name', 'epg_name']),
netflow=dict(type='str', choices=['disabled', 'enabled']),
primary_encap=dict(type='int'),
resolution_immediacy=dict(type='str', choices=['immediate', 'lazy', 'pre-provision']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
tenant=dict(type='str', aliases=['tenant_name']),
vm_provider=dict(type='str', choices=['microsoft', 'openstack', 'vmware']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['domain_type', 'vmm', ['vm_provider']],
['state', 'absent', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],
['state', 'present', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],
],
)
allow_useg = module.params['allow_useg']
deploy_immediacy = module.params['deploy_immediacy']
domain = module.params['domain']
domain_type = module.params['domain_type']
vm_provider = module.params['vm_provider']
encap = module.params['encap']
if encap is not None:
if encap in range(1, 4097):
encap = 'vlan-{}'.format(encap)
else:
module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')
encap_mode = module.params['encap_mode']
netflow = module.params['netflow']
primary_encap = module.params['primary_encap']
if primary_encap is not None:
if primary_encap in range(1, 4097):
primary_encap = 'vlan-{}'.format(primary_encap)
else:
module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')
resolution_immediacy = module.params['resolution_immediacy']
state = module.params['state']
if domain_type == 'phys' and vm_provider is not None:
module.fail_json(msg="Domain type 'phys' cannot have a 'vm_provider'")
# Compile the full domain and add it to module.params for URL building
if domain_type == 'vmm':
module.params["epg_domain"] = VM_PROVIDER_MAPPING[vm_provider] + domain
elif domain_type is not None:
module.params["epg_domain"] = 'uni/phys-' + domain
aci = ACIModule(module)
aci.construct_url(root_class="tenant", subclass_1="ap", subclass_2="epg", subclass_3="epg_domain")
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class='fvRsDomAtt',
class_config=dict(
classPref=allow_useg,
encap=encap,
encapMode=encap_mode,
instrImedcy=deploy_immediacy,
netflowPref=netflow,
primaryEncap=primary_encap,
resImedcy=resolution_immediacy,
),
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='fvRsDomAtt')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
# Pop the epg_domain key that was added for URL building
module.params.pop("epg_domain")
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
| gpl-3.0 |
MihaiMoldovanu/ansible | lib/ansible/modules/network/aos/aos_blueprint_param.py | 19 | 12925 | #!/usr/bin/python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_blueprint_param
author: jeremy@apstra.com (@jeremyschulman)
version_added: "2.3"
short_description: Manage AOS blueprint parameter values
description:
- Apstra AOS Blueprint Parameter module let you manage your Blueprint Parameter easily.
You can create access, define and delete Blueprint Parameter. The list of
Parameters supported is different per Blueprint. The option I(get_param_list)
can help you to access the list of supported Parameters for your blueprint.
This module is idempotent and support the I(check) mode. It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
blueprint:
description:
- Blueprint Name or Id as defined in AOS.
required: True
name:
description:
- Name of blueprint parameter, as defined by AOS design template. You can
use the option I(get_param_list) to get the complete list of supported
parameters for your blueprint.
value:
description:
- Blueprint parameter value. This value may be transformed by using the
I(param_map) field; used when the blueprint parameter requires
an AOS unique ID value.
get_param_list:
description:
- Get the complete list of supported parameters for this blueprint and the
description of those parameters.
state:
description:
- Indicate what is the expected state of the Blueprint Parameter (present or not).
default: present
choices: ['present', 'absent']
param_map:
description:
- Defines the aos-pyez collection that will is used to map the user-defined
item name into the AOS unique ID value. For example, if the caller
provides an IP address pool I(param_value) called "Server-IpAddrs", then
the aos-pyez collection is 'IpPools'. Some I(param_map) are already defined
by default like I(logical_device_maps).
'''
EXAMPLES = '''
- name: Add Logical Device Maps information in a Blueprint
aos_blueprint_param:
session: "{{ aos_session }}"
blueprint: "my-blueprint-l2"
name: "logical_device_maps"
value:
spine_1: CumulusVX-Spine-Switch
spine_2: CumulusVX-Spine-Switch
leaf_1: CumulusVX-Leaf-Switch
leaf_2: CumulusVX-Leaf-Switch
leaf_3: CumulusVX-Leaf-Switch
state: present
- name: Access Logical Device Maps information from a Blueprint
aos_blueprint_param:
session: "{{ aos_session }}"
blueprint: "my-blueprint-l2"
name: "logical_device_maps"
state: present
- name: Reset Logical Device Maps information in a Blueprint
aos_blueprint_param:
session: "{{ aos_session }}"
blueprint: "my-blueprint-l2"
name: "logical_device_maps"
state: absent
- name: Get list of all supported Params for a blueprint
aos_blueprint_param:
session: "{{ aos_session }}"
blueprint: "my-blueprint-l2"
get_param_list: yes
register: params_list
- debug: var=params_list
- name: Add Resource Pools information in a Blueprint, by providing a param_map
aos_blueprint_param:
session: "{{ aos_session }}"
blueprint: "my-blueprint-l2"
name: "resource_pools"
value:
leaf_loopback_ips: ['Switches-IpAddrs']
spine_loopback_ips: ['Switches-IpAddrs']
spine_leaf_link_ips: ['Switches-IpAddrs']
spine_asns: ['Private-ASN-pool']
leaf_asns: ['Private-ASN-pool']
virtual_network_svi_subnets: ['Servers-IpAddrs']
param_map:
leaf_loopback_ips: IpPools
spine_loopback_ips: IpPools
spine_leaf_link_ips: IpPools
spine_asns: AsnPools
leaf_asns: AsnPools
virtual_network_svi_subnets: IpPools
state: present
'''
RETURNS = '''
blueprint:
description: Name of the Blueprint
returned: always
type: str
sample: Server-IpAddrs
name:
description: Name of the Blueprint Parameter
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Value of the Blueprint Parameter as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
params_list:
description: Value of the Blueprint Parameter as returned by the AOS Server
returned: when I(get_param_list) is defined.
type: dict
sample: {'...'}
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aos import get_aos_session, find_collection_item, check_aos_version
from ansible.module_utils.pycompat24 import get_exception
try:
import yaml
HAS_YAML = True
except ImportError:
HAS_YAML = False
try:
from apstra.aosom.collection_mapper import CollectionMapper, MultiCollectionMapper
HAS_AOS_PYEZ_MAPPER = True
except ImportError:
HAS_AOS_PYEZ_MAPPER = False
param_map_list = dict(
logical_device_maps='LogicalDeviceMaps',
resource_pools=dict(
spine_asns="AsnPools",
leaf_asns="AsnPools",
virtual_network_svi_subnets="IpPools",
spine_loopback_ips="IpPools",
leaf_loopback_ips="IpPools",
spine_leaf_link_ips="IpPools"
)
)
def get_collection_from_param_map(module, aos):
param_map = None
# Check if param_map is provided
if module.params['param_map'] is not None:
param_map_json = module.params['param_map']
if not HAS_YAML:
module.fail_json(msg="Python library Yaml is mandatory to use 'param_map'")
try:
param_map = yaml.safe_load(param_map_json)
except:
module.fail_json(msg="Unable to parse param_map information")
else:
# search in the param_map_list to find the right one
for key, value in param_map_list.items():
if module.params['name'] == key:
param_map = value
# If param_map is defined, search for a Collection that matches
if param_map:
if isinstance(param_map, dict):
return MultiCollectionMapper(aos, param_map)
else:
return CollectionMapper(getattr(aos, param_map))
return None
def blueprint_param_present(module, aos, blueprint, param, param_value):
margs = module.params
# If param_value is not defined, just return the object
if not param_value:
module.exit_json(changed=False,
blueprint=blueprint.name,
name=param.name,
value=param.value)
# Check if current value is the same or not
elif param.value != param_value:
if not module.check_mode:
try:
param.value = param_value
except:
exc = get_exception()
module.fail_json(msg='unable to write to param %s: %r' %
(margs['name'], exc))
module.exit_json(changed=True,
blueprint=blueprint.name,
name=param.name,
value=param.value)
# If value are already the same, nothing needs to be changed
else:
module.exit_json(changed=False,
blueprint=blueprint.name,
name=param.name,
value=param.value)
def blueprint_param_absent(module, aos, blueprint, param, param_value):
margs = module.params
# Check if current value is the same or not
if param.value != dict():
if not module.check_mode:
try:
param.value = {}
except:
exc = get_exception()
module.fail_json(msg='Unable to write to param %s: %r' % (margs['name'], exc))
module.exit_json(changed=True,
blueprint=blueprint.name,
name=param.name,
value=param.value)
else:
module.exit_json(changed=False,
blueprint=blueprint.name,
name=param.name,
value=param.value)
def blueprint_param(module):
margs = module.params
# --------------------------------------------------------------------
# Get AOS session object based on Session Info
# --------------------------------------------------------------------
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
# --------------------------------------------------------------------
# Get the blueprint Object based on either name or ID
# --------------------------------------------------------------------
try:
blueprint = find_collection_item(aos.Blueprints,
item_name=margs['blueprint'],
item_id=margs['blueprint'])
except:
module.fail_json(msg="Unable to find the Blueprint based on name or ID, something went wrong")
if blueprint.exists is False:
module.fail_json(msg='Blueprint %s does not exist.\n'
'known blueprints are [%s]'%
(margs['blueprint'],','.join(aos.Blueprints.names)))
# --------------------------------------------------------------------
# If get_param_list is defined, build the list of supported parameters
# and extract info for each
# --------------------------------------------------------------------
if margs['get_param_list']:
params_list = {}
for param in blueprint.params.names:
params_list[param] = blueprint.params[param].info
module.exit_json(changed=False,
blueprint= blueprint.name,
params_list=params_list )
# --------------------------------------------------------------------
# Check Param name, return an error if not supported by this blueprint
# --------------------------------------------------------------------
if margs['name'] in blueprint.params.names:
param = blueprint.params[margs['name']]
else:
module.fail_json(msg='unable to access param %s' % margs['name'] )
# --------------------------------------------------------------------
# Check if param_value needs to be converted to an object
# based on param_map
# --------------------------------------------------------------------
param_value = margs['value']
param_collection = get_collection_from_param_map(module, aos)
# If a collection is find and param_value is defined,
# convert param_value into an object
if param_collection and param_value:
param_value = param_collection.from_label(param_value)
# --------------------------------------------------------------------
# Proceed based on State value
# --------------------------------------------------------------------
if margs['state'] == 'absent':
blueprint_param_absent(module, aos, blueprint, param, param_value)
elif margs['state'] == 'present':
blueprint_param_present(module, aos, blueprint, param, param_value)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
blueprint=dict(required=True),
get_param_list=dict(required=False, type="bool"),
name=dict(required=False),
value=dict(required=False, type="dict"),
param_map=dict(required=False),
state=dict( choices=['present', 'absent'], default='present')
),
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
# aos-pyez availability has been verify already by "check_aos_version"
# but this module requires few more object
if not HAS_AOS_PYEZ_MAPPER:
module.fail_json(msg='unable to load the Mapper library from aos-pyez')
blueprint_param(module)
if __name__ == '__main__':
main()
| gpl-3.0 |
mapbox/atom-shell | script/create-dist.py | 1 | 7365 | #!/usr/bin/env python
import argparse
import os
import re
import shutil
import subprocess
import sys
import tarfile
from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, NODE_VERSION, \
TARGET_PLATFORM, DIST_ARCH
from lib.util import scoped_cwd, rm_rf, get_atom_shell_version, make_zip, \
safe_mkdir, execute
ATOM_SHELL_VERSION = get_atom_shell_version()
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'Release')
NODE_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'node')
DIST_HEADERS_NAME = 'node-{0}'.format(NODE_VERSION)
DIST_HEADERS_DIR = os.path.join(DIST_DIR, DIST_HEADERS_NAME)
SYMBOL_NAME = {
'darwin': 'libchromiumcontent.dylib.dSYM',
'linux': 'libchromiumcontent.so.dbg',
'win32': 'chromiumcontent.dll.pdb',
}[TARGET_PLATFORM]
TARGET_BINARIES = {
'darwin': [
],
'win32': [
'atom.exe',
'chromiumcontent.dll',
'content_shell.pak',
'd3dcompiler_43.dll',
'ffmpegsumo.dll',
'icudtl.dat',
'libEGL.dll',
'libGLESv2.dll',
'msvcp120.dll',
'msvcr120.dll',
'ui_resources_200_percent.pak',
'vccorlib120.dll',
'webkit_resources_200_percent.pak',
'xinput1_3.dll',
],
'linux': [
'atom',
'content_shell.pak',
'icudtl.dat',
'libchromiumcontent.so',
'libffmpegsumo.so',
],
}
TARGET_DIRECTORIES = {
'darwin': [
'Atom.app',
],
'win32': [
'resources',
'locales',
],
'linux': [
'resources',
'locales',
],
}
SYSTEM_LIBRARIES = [
'libudev.so',
'libgcrypt.so',
'libnotify.so',
]
HEADERS_SUFFIX = [
'.h',
'.gypi',
]
HEADERS_DIRS = [
'src',
'deps/http_parser',
'deps/zlib',
'deps/uv',
'deps/npm',
'deps/mdb_v8',
]
HEADERS_FILES = [
'common.gypi',
'config.gypi',
]
def main():
rm_rf(DIST_DIR)
os.makedirs(DIST_DIR)
args = parse_args()
force_build()
download_libchromiumcontent_symbols(args.url)
create_symbols()
copy_binaries()
copy_headers()
copy_license()
if TARGET_PLATFORM == 'linux':
copy_system_libraries()
create_version()
create_dist_zip()
create_symbols_zip()
create_header_tarball()
def parse_args():
parser = argparse.ArgumentParser(description='Create distributions')
parser.add_argument('-u', '--url',
help='The base URL from which to download '
'libchromiumcontent (i.e., the URL you passed to '
'libchromiumcontent\'s script/upload script',
default=BASE_URL,
required=False)
return parser.parse_args()
def force_build():
build = os.path.join(SOURCE_ROOT, 'script', 'build.py')
execute([sys.executable, build, '-c', 'Release'])
def copy_binaries():
for binary in TARGET_BINARIES[TARGET_PLATFORM]:
shutil.copy2(os.path.join(OUT_DIR, binary), DIST_DIR)
for directory in TARGET_DIRECTORIES[TARGET_PLATFORM]:
shutil.copytree(os.path.join(OUT_DIR, directory),
os.path.join(DIST_DIR, directory),
symlinks=True)
def copy_headers():
os.mkdir(DIST_HEADERS_DIR)
# Copy standard node headers from node. repository.
for include_path in HEADERS_DIRS:
abs_path = os.path.join(NODE_DIR, include_path)
for dirpath, _, filenames in os.walk(abs_path):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension not in HEADERS_SUFFIX:
continue
copy_source_file(os.path.join(dirpath, filename))
for other_file in HEADERS_FILES:
copy_source_file(source = os.path.join(NODE_DIR, other_file))
# Copy V8 headers from chromium's repository.
src = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor', 'download',
'libchromiumcontent', 'src')
for dirpath, _, filenames in os.walk(os.path.join(src, 'v8')):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension not in HEADERS_SUFFIX:
continue
copy_source_file(source=os.path.join(dirpath, filename),
start=src,
destination=os.path.join(DIST_HEADERS_DIR, 'deps'))
def copy_license():
shutil.copy2(os.path.join(SOURCE_ROOT, 'LICENSE'), DIST_DIR)
def copy_system_libraries():
ldd = execute(['ldd', os.path.join(OUT_DIR, 'atom')])
lib_re = re.compile('\t(.*) => (.+) \(.*\)$')
for line in ldd.splitlines():
m = lib_re.match(line)
if not m:
continue
for i, library in enumerate(SYSTEM_LIBRARIES):
real_library = m.group(1)
if real_library.startswith(library):
shutil.copyfile(m.group(2), os.path.join(DIST_DIR, real_library))
SYSTEM_LIBRARIES[i] = real_library
def create_version():
version_path = os.path.join(SOURCE_ROOT, 'dist', 'version')
with open(version_path, 'w') as version_file:
version_file.write(ATOM_SHELL_VERSION)
def download_libchromiumcontent_symbols(url):
brightray_dir = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor')
target_dir = os.path.join(brightray_dir, 'download', 'libchromiumcontent')
symbols_path = os.path.join(target_dir, 'Release', SYMBOL_NAME)
if os.path.exists(symbols_path):
return
download = os.path.join(brightray_dir, 'libchromiumcontent', 'script',
'download')
subprocess.check_call([sys.executable, download, '-f', '-s', '-c',
LIBCHROMIUMCONTENT_COMMIT, url, target_dir])
def create_symbols():
build = os.path.join(SOURCE_ROOT, 'script', 'build.py')
subprocess.check_output([sys.executable, build, '-c', 'Release',
'-t', 'atom_dump_symbols'])
directory = 'Atom-Shell.breakpad.syms'
shutil.copytree(os.path.join(OUT_DIR, directory),
os.path.join(DIST_DIR, directory),
symlinks=True)
def create_dist_zip():
dist_name = 'atom-shell-{0}-{1}-{2}.zip'.format(ATOM_SHELL_VERSION,
TARGET_PLATFORM, DIST_ARCH)
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = TARGET_BINARIES[TARGET_PLATFORM] + ['LICENSE', 'version']
if TARGET_PLATFORM == 'linux':
files += SYSTEM_LIBRARIES
dirs = TARGET_DIRECTORIES[TARGET_PLATFORM]
make_zip(zip_file, files, dirs)
def create_symbols_zip():
dist_name = 'atom-shell-{0}-{1}-{2}-symbols.zip'.format(ATOM_SHELL_VERSION,
TARGET_PLATFORM,
DIST_ARCH)
zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name)
with scoped_cwd(DIST_DIR):
files = ['LICENSE', 'version']
dirs = ['Atom-Shell.breakpad.syms']
make_zip(zip_file, files, dirs)
def create_header_tarball():
with scoped_cwd(DIST_DIR):
tarball = tarfile.open(name=DIST_HEADERS_DIR + '.tar.gz', mode='w:gz')
tarball.add(DIST_HEADERS_NAME)
tarball.close()
def copy_source_file(source, start=NODE_DIR, destination=DIST_HEADERS_DIR):
relative = os.path.relpath(source, start=start)
final_destination = os.path.join(destination, relative)
safe_mkdir(os.path.dirname(final_destination))
shutil.copy2(source, final_destination)
if __name__ == '__main__':
sys.exit(main())
| mit |
llhe/tensorflow | tensorflow/contrib/metrics/python/ops/metric_ops_test.py | 6 | 203166 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
NAN = float('nan')
metrics = metrics_lib
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_sparse_value(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_2d_label_to_sparse(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_2d_label_to_sparse_value(labels))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_3d_label_to_sparse(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_3d_label_to_sparse_value(labels))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_local_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
class StreamingMeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean(array_ops.ones([4, 3]))
_assert_local_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
class StreamingMeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_tensor(array_ops.ones([4, 3]))
_assert_local_variables(self, ('mean/total_tensor:0',
'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class StreamingAccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_local_variables(self, ('my_accuracy/count:0',
'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=2)
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class StreamingTruePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives((0, 1, 0), (0, 1, 1))
_assert_local_variables(self, ('true_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tp, tp_update_op = metrics.streaming_true_positives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(1, tp_update_op.eval())
self.assertEqual(1, tp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels, weights=37.0)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(37.0, tp_update_op.eval())
self.assertEqual(37.0, tp.eval())
class StreamingFalseNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives((0, 1, 0),
(0, 1, 1))
_assert_local_variables(self, ('false_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fn, fn_update_op = metrics.streaming_false_negatives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(2, fn_update_op.eval())
self.assertEqual(2, fn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels, weights=((3.0,), (5.0,), (7.0,)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(8.0, fn_update_op.eval())
self.assertEqual(8.0, fn.eval())
class StreamingFalsePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives((0, 1, 0),
(0, 1, 1))
_assert_local_variables(self, ('false_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fp, fp_update_op = metrics.streaming_false_positives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(4, fp_update_op.eval())
self.assertEqual(4, fp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
fp, fp_update_op = metrics.streaming_false_positives(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(42.0, fp_update_op.eval())
self.assertEqual(42.0, fp.eval())
class StreamingTrueNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives((0, 1, 0),
(0, 1, 1))
_assert_local_variables(self, ('true_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tn, tn_update_op = metrics.streaming_true_negatives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(5, tn_update_op.eval())
self.assertEqual(5, tn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels, weights=((0.0, 2.0, 3.0, 5.0),))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(15.0, tn_update_op.eval())
self.assertEqual(15.0, tn.eval())
class StreamingTruePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_local_variables(self, ('true_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp.eval())
self.assertAllEqual((3, 1, 0), tp_update_op.eval())
self.assertAllEqual((3, 1, 0), tp.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, weights=37.0, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())
class StreamingFalseNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(
0.15,
0.5,
0.85,))
_assert_local_variables(self, ('false_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn.eval())
self.assertAllEqual((0, 2, 3), fn_update_op.eval())
self.assertAllEqual((0, 2, 3), fn.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions,
labels,
weights=((3.0,), (5.0,), (7.0,)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())
class StreamingFalsePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_local_variables(self, ('false_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp.eval())
self.assertAllEqual((7, 4, 2), fp_update_op.eval())
self.assertAllEqual((7, 4, 2), fp.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())
class StreamingTrueNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_local_variables(self, ('true_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn.eval())
self.assertAllEqual((2, 5, 7), tn_update_op.eval())
self.assertAllEqual((2, 5, 7), tn.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions,
labels,
weights=((0.0, 2.0, 3.0, 5.0),),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())
class StreamingPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class StreamingRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('recall/false_negatives/count:0',
'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class StreamingCurvePointsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metric_ops.streaming_curve_points(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(
self,
('curve_points/true_positives:0', 'curve_points/false_negatives:0',
'curve_points/false_positives:0', 'curve_points/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
points, _ = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [points])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def _testValueTensorIsIdempotent(self, curve):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(high=2, size=(10, 3)), dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels, predictions=predictions, curve=curve)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
initial_points = points.eval()
sess.run(update_op)
self.assertAllClose(initial_points, points.eval())
def testValueTensorIsIdempotentROC(self):
self._testValueTensorIsIdempotent(curve='ROC')
def testValueTensorIsIdempotentPR(self):
self._testValueTensorIsIdempotent(curve='PR')
def _testCase(self, labels, predictions, curve, expected_points):
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels=labels_tensor,
predictions=predictions_tensor,
num_thresholds=3,
curve=curve)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAllClose(expected_points, points.eval())
def testEdgeCasesROC(self):
self._testCase([[1]], [[1]], 'ROC', [[0, 1], [0, 1], [0, 0]])
self._testCase([[0]], [[0]], 'ROC', [[1, 1], [0, 1], [0, 1]])
self._testCase([[0]], [[1]], 'ROC', [[1, 1], [1, 1], [0, 1]])
self._testCase([[1]], [[0]], 'ROC', [[0, 1], [0, 0], [0, 0]])
def testManyValuesROC(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'ROC',
[[1.0, 1.0], [0.0, 0.75], [0.0, 0.0]])
def testEdgeCasesPR(self):
self._testCase([[1]], [[1]], 'PR', [[1, 1], [1, 1], [0, 1]])
self._testCase([[0]], [[0]], 'PR', [[1, 0], [1, 1], [1, 1]])
self._testCase([[0]], [[1]], 'PR', [[1, 0], [1, 0], [1, 1]])
self._testCase([[1]], [[0]], 'PR', [[1, 1], [0, 1], [0, 1]])
def testManyValuesPR(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'PR',
[[1.0, 4.0 / 6.0], [0.75, 1.0], [0.0, 1.0]])
class StreamingAUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_auc(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_auc(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_auc(predictions, labels, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.test_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def np_auc(self, predictions, labels, weights):
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples), np.random.exponential(
scale=1.0, size=num_samples)):
expected_auc = self.np_auc(predictions, labels, weights)
with self.test_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (_enqueue_as_batches(weights, enqueue_ops) if
weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.streaming_auc(
tf_predictions,
tf_labels,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class StreamingSpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_local_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class StreamingSensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_local_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, weights=weights, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class StreamingPrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_local_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates, then verify idempotency.
sess.run([prec_op, rec_op])
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
sess.run([prec_op, rec_op])
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.streaming_precision_at_thresholds(tf_predictions,
tf_labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(tf_predictions,
tf_labels,
thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
# TODO(ptucker): Remove when we remove `streaming_recall_at_k`.
# This op will be deprecated soon in favor of `streaming_sparse_recall_at_k`.
# Until then, this test validates that both ops yield the same results.
class StreamingRecallAtKTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
self._batch_size = 4
self._num_classes = 3
self._np_predictions = np.matrix(('0.1 0.2 0.7;'
'0.6 0.2 0.2;'
'0.0 0.9 0.1;'
'0.2 0.0 0.8'))
self._np_labels = [0, 0, 0, 0]
def testVars(self):
metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1)
_assert_local_variables(self, ('recall_at_1/count:0',
'recall_at_1/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testSingleUpdateKIs1(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=1)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.25, sess.run(update_op))
self.assertEqual(0.25, recall.eval())
self.assertEqual(0.25, sess.run(sp_update_op))
self.assertEqual(0.25, sp_recall.eval())
def testSingleUpdateKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=2)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, recall.eval())
self.assertEqual(0.5, sess.run(sp_update_op))
self.assertEqual(0.5, sp_recall.eval())
def testSingleUpdateKIs3(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=3)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=3)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
def testSingleUpdateSomeMissingKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
weights = constant_op.constant(
[0, 1, 0, 1], shape=(self._batch_size,), dtype=dtypes_lib.float32)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=2, weights=weights)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions,
array_ops.reshape(labels, (self._batch_size, 1)),
k=2,
weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
class StreamingSparsePrecisionTest(test.TestCase):
def _test_streaming_sparse_precision_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
labels=labels,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_k(self,
predictions,
labels,
k,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_k(
predictions, labels, k, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_top_k(
top_k_predictions, labels, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def test_top_k_rank_invalid(self):
with self.test_session():
# top_k_predictions has rank < 2.
top_k_predictions = [9, 4, 6, 2, 0]
sp_labels = sparse_tensor.SparseTensorValue(
indices=np.array([[0,], [1,], [2,]], np.int64),
values=np.array([2, 7, 8], np.int64),
dense_shape=np.array([10,], np.int64))
with self.assertRaises(ValueError):
precision, _ = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int64),
labels=sp_labels)
variables.variables_initializer(variables.local_variables()).run()
precision.eval()
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_top_k_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=precision_ex2[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [(ex1 + ex2) / 2
for ex1, ex2 in zip(precision_ex1, precision_ex2)]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
predictions_top_k = (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k])
self._test_streaming_sparse_precision_at_top_k(
predictions_top_k, labels, expected=streaming_precision[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
self._test_streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k], predictions_top_k_ex2[:k]),
labels,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
def test_average_precision_at_top_k_static_shape_check(self):
predictions_top_k = array_ops.placeholder(shape=(2, None),
dtype=dtypes_lib.int64)
labels = np.array(((1,), (2,)), dtype=np.int64)
# Fails due to non-static predictions_idx shape.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
predictions_top_k = (2, 1)
# Fails since rank of predictions_idx is less than one.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
predictions_top_k = ((2,), (1,))
# Valid static shape.
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=7.0 / 20)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected_precision = 0.5
with self.test_session():
_, precision = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_precision, precision.eval())
class StreamingSparseRecallTest(test.TestCase):
def _test_streaming_sparse_recall_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_sparse_recall_at_top_k(self,
labels,
top_k_predictions,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metric_ops.sparse_recall_at_top_k(
labels=labels,
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in (sparse_labels, dense_labels):
for class_id in (-1, 0, 1, 4):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_one_label_at_k1_no_predictions(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 0 predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2)
def test_one_label_at_k1_weighted(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=3, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.6 / 0.9, weights=(0.6, 0.3))
def test_three_labels_at_k5_nan(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 8: 1 label, no predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=8)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=8)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=3.0 / 6)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 6)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=2.0 / 2,
class_id=2)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=2.0 / 2,
class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=1.0 / 1,
class_id=5)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=1.0 / 1,
class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=0.0 / 1,
class_id=7)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=0.0 / 1,
class_id=7)
# All classes: 8 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=3.0 / 8)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=3.0 / 8)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 labels, all correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=7.0 / 12)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=7.0 / 12)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4],
[0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 1, 0],
[0, 0, 0, 1]]
expected_recall = 0.5
with self.test_session():
_, recall = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_recall, recall.eval())
class StreamingMeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_absolute_error/count:0',
'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_absolute_error(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_absolute_error(predictions,
labels, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class StreamingMeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_relative_error/count:0',
'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.streaming_mean_relative_error(predictions,
labels, normalizer)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=array_ops.zeros_like(labels))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class StreamingMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_squared_error/count:0',
'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_squared_error(predictions, labels,
weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.streaming_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.test_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.streaming_mean_squared_error(
predictions0, labels0, name='msd0')
mse1, update_op1 = metrics.streaming_mean_squared_error(
predictions1, labels1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.streaming_mean_absolute_error(predictions,
labels)
mse, ms_update_op = metrics.streaming_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class StreamingRootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('root_mean_squared_error/count:0',
'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels,
weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
def _reweight(predictions, labels, weights):
return (np.concatenate([[p] * int(w) for p, w in zip(predictions, weights)]),
np.concatenate([[l] * int(w) for l, w in zip(labels, weights)]))
class StreamingCovarianceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_local_variables(self, (
'covariance/comoment:0',
'covariance/count:0',
'covariance/mean_label:0',
'covariance/mean_prediction:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
cov, _ = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [cov])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
cov, update_op = metrics.streaming_covariance(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_cov = cov.eval()
for _ in range(10):
self.assertEqual(initial_cov, cov.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op), 5)
self.assertAlmostEqual(expected_cov, cov.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, update_op.eval())
self.assertAlmostEqual(expected_cov, cov.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 7], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 3, 1], shape=(1, 4), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(
predictions, labels, weights=weights)
p, l = _reweight([2, 4, 6, 8], [1, 3, 2, 7], [0, 1, 3, 1])
expected_cov = np.cov(p, l)[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op))
self.assertAlmostEqual(expected_cov, cov.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = 0.
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertAlmostEqual(
prev_expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_cov, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
prev_expected_cov = expected_cov
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = 0.
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertAlmostEqual(
prev_expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
p, l = _reweight(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)], weights[:stride * (i + 1)])
expected_cov = np.cov(p, l)[0, 1]
self.assertAlmostEqual(
expected_cov, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
prev_expected_cov = expected_cov
class StreamingPearsonRTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_local_variables(self, (
'pearson_r/covariance/comoment:0',
'pearson_r/covariance/count:0',
'pearson_r/covariance/mean_label:0',
'pearson_r/covariance/mean_prediction:0',
'pearson_r/variance_labels/count:0',
'pearson_r/variance_labels/comoment:0',
'pearson_r/variance_labels/mean_label:0',
'pearson_r/variance_labels/mean_prediction:0',
'pearson_r/variance_predictions/comoment:0',
'pearson_r/variance_predictions/count:0',
'pearson_r/variance_predictions/mean_label:0',
'pearson_r/variance_predictions/mean_prediction:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
pearson_r, _ = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [pearson_r])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_r = pearson_r.eval()
for _ in range(10):
self.assertEqual(initial_r, pearson_r.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
expected_r = np.corrcoef(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op), 5)
self.assertAlmostEqual(expected_r, pearson_r.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
expected_r = np.corrcoef([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, update_op.eval())
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = np.array([2, 4, 6, 8])
labels = np.array([1, 3, 2, 7])
weights = np.array([0, 1, 3, 1])
predictions_t = constant_op.constant(
predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels_t = constant_op.constant(
labels, shape=(1, 4), dtype=dtypes_lib.float32)
weights_t = constant_op.constant(
weights, shape=(1, 4), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
p, l = _reweight(predictions, labels, weights)
cmat = np.cov(p, l)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op))
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = 0.
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertAlmostEqual(
prev_expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
expected_r = np.corrcoef(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_r, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = 0.
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertAlmostEqual(
prev_expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
p, l = _reweight(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)], weights[:stride * (i + 1)])
cmat = np.cov(p, l)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
self.assertAlmostEqual(
expected_r, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
class StreamingMeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_local_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_percentage_less(values=array_ops.ones((10,)), threshold=2)
_assert_local_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class StreamingMeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_local_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(predictions, labels, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(
predictions, labels, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=2)
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes=num_classes)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_miou = miou.eval()
for _ in range(10):
self.assertEqual(initial_miou, miou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0, 0.])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
weights = array_ops.concat(
[
constant_op.constant(
0, shape=[1]), constant_op.constant(
1, shape=[8]), constant_op.constant(
0, shape=[1])
],
0)
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
class StreamingConcatTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_concat(values=array_ops.ones((10,)))
_assert_local_variables(self, (
'streaming_concat/array:0',
'streaming_concat/size:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
value, _ = metrics.streaming_concat(
values=array_ops.ones((10,)), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [value])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_concat(
values=array_ops.ones((10,)), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testNextArraySize(self):
next_array_size = metric_ops._next_array_size # pylint: disable=protected-access
with self.test_session():
self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2)
self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(4, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(5, growth_factor=2).eval(), 8)
self.assertEqual(next_array_size(6, growth_factor=2).eval(), 8)
def testStreamingConcat(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())
sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})
self.assertAllEqual(np.arange(10), concatenated.eval())
def testStreamingConcatStringValues(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.string, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertItemsEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: ['a', 'b', 'c']})
self.assertItemsEqual([b'a', b'b', b'c'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['d', 'e']})
self.assertItemsEqual([b'a', b'b', b'c', b'd', b'e'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['f', 'g', 'h', 'i', 'j']})
self.assertItemsEqual(
[b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j'],
concatenated.eval())
def testStreamingConcatMaxSize(self):
with self.test_session() as sess:
values = math_ops.range(3)
concatenated, update_op = metrics.streaming_concat(values, max_size=5)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
def testStreamingConcat2D(self):
with self.test_session() as sess:
values = array_ops.reshape(math_ops.range(3), (3, 1))
concatenated, update_op = metrics.streaming_concat(values, axis=-1)
sess.run(variables.local_variables_initializer())
for _ in range(10):
sess.run([update_op])
self.assertAllEqual([[0] * 10, [1] * 10, [2] * 10], concatenated.eval())
def testStreamingConcatErrors(self):
with self.assertRaises(ValueError):
metrics.streaming_concat(array_ops.placeholder(dtypes_lib.float32))
values = array_ops.zeros((2, 3))
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=-3, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=2, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(
array_ops.placeholder(dtypes_lib.float32, [None, None]))
def testStreamingConcatReset(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run(variables.local_variables_initializer())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([3, 4], concatenated.eval())
class AggregateMetricsTest(test.TestCase):
def testAggregateNoMetricsRaisesValueError(self):
with self.assertRaises(ValueError):
metrics.aggregate_metrics()
def testAggregateSingleMetricReturnsOneItemLists(self):
values = array_ops.ones((10, 4))
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean(values))
self.assertEqual(len(value_tensors), 1)
self.assertEqual(len(update_ops), 1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, update_ops[0].eval())
self.assertEqual(1, value_tensors[0].eval())
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean_absolute_error(predictions, labels),
metrics.streaming_mean_squared_error(predictions, labels))
self.assertEqual(len(value_tensors), 2)
self.assertEqual(len(update_ops), 2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, update_ops[0].eval())
self.assertEqual(4, update_ops[1].eval())
self.assertEqual(2, value_tensors[0].eval())
self.assertEqual(4, value_tensors[1].eval())
class AggregateMetricMapTest(test.TestCase):
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
names_to_values, names_to_updates = metrics.aggregate_metric_map({
'm1': metrics.streaming_mean_absolute_error(predictions, labels),
'm2': metrics.streaming_mean_squared_error(predictions, labels),
})
self.assertEqual(2, len(names_to_values))
self.assertEqual(2, len(names_to_updates))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, names_to_updates['m1'].eval())
self.assertEqual(4, names_to_updates['m2'].eval())
self.assertEqual(2, names_to_values['m1'].eval())
self.assertEqual(4, names_to_values['m2'].eval())
if __name__ == '__main__':
test.main()
| apache-2.0 |
Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/google/protobuf/text_encoding.py | 152 | 4617 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Encoding related utilities."""
import re
import six
# Lookup table for utf8
_cescape_utf8_to_str = [chr(i) for i in range(0, 256)]
_cescape_utf8_to_str[9] = r'\t' # optional escape
_cescape_utf8_to_str[10] = r'\n' # optional escape
_cescape_utf8_to_str[13] = r'\r' # optional escape
_cescape_utf8_to_str[39] = r"\'" # optional escape
_cescape_utf8_to_str[34] = r'\"' # necessary escape
_cescape_utf8_to_str[92] = r'\\' # necessary escape
# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32)
_cescape_byte_to_str = ([r'\%03o' % i for i in range(0, 32)] +
[chr(i) for i in range(32, 127)] +
[r'\%03o' % i for i in range(127, 256)])
_cescape_byte_to_str[9] = r'\t' # optional escape
_cescape_byte_to_str[10] = r'\n' # optional escape
_cescape_byte_to_str[13] = r'\r' # optional escape
_cescape_byte_to_str[39] = r"\'" # optional escape
_cescape_byte_to_str[34] = r'\"' # necessary escape
_cescape_byte_to_str[92] = r'\\' # necessary escape
def CEscape(text, as_utf8):
"""Escape a bytes string for use in an ascii protocol buffer.
text.encode('string_escape') does not seem to satisfy our needs as it
encodes unprintable characters using two-digit hex escapes whereas our
C++ unescaping function allows hex escapes to be any length. So,
"\0011".encode('string_escape') ends up being "\\x011", which will be
decoded in C++ as a single-character string with char code 0x11.
Args:
text: A byte string to be escaped
as_utf8: Specifies if result should be returned in UTF-8 encoding
Returns:
Escaped string
"""
# PY3 hack: make Ord work for str and bytes:
# //platforms/networking/data uses unicode here, hence basestring.
Ord = ord if isinstance(text, six.string_types) else lambda x: x
if as_utf8:
return ''.join(_cescape_utf8_to_str[Ord(c)] for c in text)
return ''.join(_cescape_byte_to_str[Ord(c)] for c in text)
_CUNESCAPE_HEX = re.compile(r'(\\+)x([0-9a-fA-F])(?![0-9a-fA-F])')
_cescape_highbit_to_str = ([chr(i) for i in range(0, 127)] +
[r'\%03o' % i for i in range(127, 256)])
def CUnescape(text):
"""Unescape a text string with C-style escape sequences to UTF-8 bytes."""
def ReplaceHex(m):
# Only replace the match if the number of leading back slashes is odd. i.e.
# the slash itself is not escaped.
if len(m.group(1)) & 1:
return m.group(1) + 'x0' + m.group(2)
return m.group(0)
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
if str is bytes: # PY2
return result.decode('string_escape')
result = ''.join(_cescape_highbit_to_str[ord(c)] for c in result)
return (result.encode('ascii') # Make it bytes to allow decode.
.decode('unicode_escape')
# Make it bytes again to return the proper type.
.encode('raw_unicode_escape'))
| gpl-2.0 |
requests/requests-ntlm | tests/functional/test_functional.py | 1 | 2038 | import requests
import requests_ntlm
"""
This test is meant to run with Appveyor but until the integration is solved
it can only be run locally. The script setup_iis.ps1 can set up an IIS server
with the 4 scenarios tested below if you wish to run a sanity check
"""
username = '.\\User'
password = 'Password01'
http_with_cbt = 'http://127.0.0.1:81/contents.txt'
http_without_cbt = 'http://127.0.0.1:82/contents.txt'
https_with_cbt = 'https://127.0.0.1:441/contents.txt'
https_without_cbt = 'https://127.0.0.1:442/contents.txt'
expected = 'contents'
class Test_Functional():
def test_ntlm_http_with_cbt(self):
actual = send_request(http_with_cbt, username, password)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == expected
def test_ntlm_http_without_cbt(self):
actual = send_request(http_without_cbt, username, password)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == expected
def test_ntlm_https_with_cbt(self):
actual = send_request(https_with_cbt, username, password)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == expected
def test_ntlm_https_without_cbt(self):
actual = send_request(https_without_cbt, username, password)
actual_content = actual.content.decode('utf-8')
actual_code = actual.status_code
assert actual_code == 200
assert actual_content == expected
def send_request(url, username, password):
"""
Sends a request to the url with the credentials specified. Returns the final response
"""
session = requests.Session()
session.verify = False
session.auth = requests_ntlm.HttpNtlmAuth(username, password)
response = session.get(url)
return response
| isc |
vks/servo | tests/wpt/css-tests/tools/html5lib/html5lib/filters/optionaltags.py | 1727 | 10500 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| mpl-2.0 |
tectronics/mythbox | resources/test/mythboxtest/test_orphans.py | 5 | 4262 | #
# MythBox for XBMC - http://mythbox.googlecode.com
# Copyright (C) 2011 analogue@yahoo.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import logging
import os
import tempfile
import unittest2 as unittest
from mockito import Mock
from mythbox.bus import EventBus
from mythbox.mythtv.enums import Upcoming
from mythbox.mythtv.conn import Connection, EventConnection, createChainId, ServerException, encodeLongLong, decodeLongLong
from mythbox.mythtv.db import MythDatabase
from mythbox.mythtv.protocol import ProtocolException
from mythbox.platform import getPlatform
from mythbox.settings import MythSettings
from mythbox.util import OnDemandConfig
log = logging.getLogger('mythbox.unittest')
class DeleteOrphansTest(unittest.TestCase):
def setUp(self):
self.platform = getPlatform()
self.translator = Mock()
self.domainCache = Mock()
self.settings = MythSettings(self.platform, self.translator)
self.settings.put('streaming_enabled', 'False')
privateConfig = OnDemandConfig()
self.settings.put('mysql_host', privateConfig.get('mysql_host'))
self.settings.put('mysql_port', privateConfig.get('mysql_port'))
self.settings.setMySqlDatabase(privateConfig.get('mysql_database'))
self.settings.setMySqlUser(privateConfig.get('mysql_user'))
self.settings.put('mysql_password', privateConfig.get('mysql_password'))
self.settings.put('paths_recordedprefix', privateConfig.get('paths_recordedprefix'))
self.db = MythDatabase(self.settings, self.translator, self.domainCache)
self.bus = EventBus()
self.conn = Connection(self.settings, self.translator, self.platform, self.bus, self.db)
def tearDown(self):
self.conn.close()
def test_getAllRecordings(self):
recordings = self.conn.getAllRecordings()
log.debug('Num Recordings = %s' % len(recordings))
for i,r in enumerate(recordings):
print i,r.getBareFilename()
dirs = ['/usr2/mythtv','/usr2/mythtv2', '/usr2/mythtv3']
mpgs = []
for d in dirs:
files = os.listdir(d)
for f in files:
if f.endswith('.mpg'):
mpgs.append(f)
print f
print 'Recs total = ', len(recordings)
print 'Files total = ', len(mpgs)
print 'Extras = ', len(mpgs) - len(recordings)
todelete = mpgs[:]
for r in recordings:
if r.getBareFilename() in mpgs:
todelete.remove(r.getBareFilename())
print 'Todelete = ', len(todelete)
bucket = []
import datetime
for f in todelete:
for d in dirs:
path = os.path.join(d,f)
if os.path.exists(path):
bucket.append(path)
print path, os.path.getsize(path)
print 'Bucket = ', len(bucket)
sorted(bucket)
total = 0
for f in bucket:
s = os.path.getsize(f)
total += s
print total/1000000000
import shutil
for src in bucket[:25]:
dest = '/usr2/mythtv/backup/' + os.path.basename(src)
print src,' -> ', dest
#shutil.move(src, dest)
if __name__ == '__main__':
import logging.config
logging.config.fileConfig('mythbox_log.ini')
unittest.main()
| gpl-2.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/pywin32-219/com/win32comext/axdebug/codecontainer.py | 18 | 8885 | """A utility class for a code container.
A code container is a class which holds source code for a debugger. It knows how
to color the text, and also how to translate lines into offsets, and back.
"""
import sys
from win32com.axdebug import axdebug
import tokenize
from util import RaiseNotImpl, _wrap
from win32com.server.exception import Exception
import win32api, winerror
import contexts
_keywords = {} # set of Python keywords
for name in """
and assert break class continue def del elif else except exec
finally for from global if import in is lambda not
or pass print raise return try while
""".split():
_keywords[name] = 1
class SourceCodeContainer:
def __init__(self, text, fileName = "<Remove Me!>", sourceContext = 0, startLineNumber = 0, site = None, debugDocument = None):
self.sourceContext = sourceContext # The source context added by a smart host.
self.text = text
if text:
self._buildlines()
self.nextLineNo = 0
self.fileName = fileName
self.codeContexts = {}
self.site = site
self.startLineNumber = startLineNumber
self.debugDocument = None
def _Close(self):
self.text = self.lines = self.lineOffsets = None
self.codeContexts = None
self.debugDocument = None
self.site = None
self.sourceContext = None
def GetText(self):
return self.text
def GetName(self, dnt):
assert 0, "You must subclass this"
def GetFileName(self):
return self.fileName
def GetPositionOfLine(self, cLineNumber):
self.GetText() # Prime us.
try:
return self.lineOffsets[cLineNumber]
except IndexError:
raise Exception(scode=winerror.S_FALSE)
def GetLineOfPosition(self, charPos):
self.GetText() # Prime us.
lastOffset = 0
lineNo = 0
for lineOffset in self.lineOffsets[1:]:
if lineOffset > charPos:
break
lastOffset = lineOffset
lineNo = lineNo + 1
else: # for not broken.
# print "Cant find", charPos, "in", self.lineOffsets
raise Exception(scode=winerror.S_FALSE)
# print "GLOP ret=",lineNo, (charPos-lastOffset)
return lineNo, (charPos-lastOffset)
def GetNextLine(self):
if self.nextLineNo>=len(self.lines):
self.nextLineNo = 0 # auto-reset.
return ""
rc = self.lines[self.nextLineNo]
self.nextLineNo = self.nextLineNo + 1
return rc
def GetLine(self, num):
self.GetText() # Prime us.
return self.lines[num]
def GetNumChars(self):
return len(self.GetText())
def GetNumLines(self):
self.GetText() # Prime us.
return len(self.lines)
def _buildline(self, pos):
i = self.text.find('\n', pos)
if i < 0:
newpos = len(self.text)
else:
newpos = i+1
r = self.text[pos:newpos]
return r, newpos
def _buildlines(self):
self.lines = []
self.lineOffsets = [0]
line, pos = self._buildline(0)
while line:
self.lines.append(line)
self.lineOffsets.append(pos)
line, pos = self._buildline(pos)
def _ProcessToken(self, type, token, spos, epos, line):
srow, scol = spos
erow, ecol = epos
self.GetText() # Prime us.
linenum = srow - 1 # Lines zero based for us too.
realCharPos = self.lineOffsets[linenum] + scol
numskipped = realCharPos - self.lastPos
if numskipped==0:
pass
elif numskipped==1:
self.attrs.append(axdebug.SOURCETEXT_ATTR_COMMENT)
else:
self.attrs.append((axdebug.SOURCETEXT_ATTR_COMMENT, numskipped))
kwSize = len(token)
self.lastPos = realCharPos + kwSize
attr = 0
if type==tokenize.NAME:
if token in _keywords:
attr = axdebug.SOURCETEXT_ATTR_KEYWORD
elif type==tokenize.STRING:
attr = axdebug.SOURCETEXT_ATTR_STRING
elif type==tokenize.NUMBER:
attr = axdebug.SOURCETEXT_ATTR_NUMBER
elif type==tokenize.OP:
attr = axdebug.SOURCETEXT_ATTR_OPERATOR
elif type==tokenize.COMMENT:
attr = axdebug.SOURCETEXT_ATTR_COMMENT
# else attr remains zero...
if kwSize==0:
pass
elif kwSize==1:
self.attrs.append(attr)
else:
self.attrs.append((attr, kwSize))
def GetSyntaxColorAttributes(self):
self.lastPos = 0
self.attrs = []
try:
tokenize.tokenize(self.GetNextLine, self._ProcessToken)
except tokenize.TokenError:
pass # Ignore - will cause all subsequent text to be commented.
numAtEnd = len(self.GetText()) - self.lastPos
if numAtEnd:
self.attrs.append((axdebug.SOURCETEXT_ATTR_COMMENT, numAtEnd))
return self.attrs
# We also provide and manage DebugDocumentContext objects
def _MakeDebugCodeContext(self, lineNo, charPos, len):
return _wrap(contexts.DebugCodeContext(lineNo, charPos, len, self, self.site), axdebug.IID_IDebugCodeContext)
# Make a context at the given position. It should take up the entire context.
def _MakeContextAtPosition(self, charPos):
lineNo, offset = self.GetLineOfPosition(charPos)
try:
endPos = self.GetPositionOfLine(lineNo+1)
except:
endPos = charPos
codecontext = self._MakeDebugCodeContext(lineNo, charPos, endPos-charPos)
return codecontext
# Returns a DebugCodeContext. debugDocument can be None for smart hosts.
def GetCodeContextAtPosition(self, charPos):
# trace("GetContextOfPos", charPos, maxChars)
# Convert to line number.
lineNo, offset = self.GetLineOfPosition(charPos)
charPos = self.GetPositionOfLine(lineNo)
try:
cc = self.codeContexts[charPos]
# trace(" GetContextOfPos using existing")
except KeyError:
cc = self._MakeContextAtPosition(charPos)
self.codeContexts[charPos] = cc
return cc
class SourceModuleContainer(SourceCodeContainer):
def __init__(self, module):
self.module = module
if hasattr(module, '__file__'):
fname = self.module.__file__
# Check for .pyc or .pyo or even .pys!
if fname[-1] in ['O','o','C','c', 'S', 's']: fname = fname[:-1]
try:
fname = win32api.GetFullPathName(fname)
except win32api.error:
pass
else:
if module.__name__=='__main__' and len(sys.argv)>0:
fname = sys.argv[0]
else:
fname = "<Unknown!>"
SourceCodeContainer.__init__(self, None, fname)
def GetText(self):
if self.text is None:
fname = self.GetFileName()
if fname:
try:
self.text = open(fname, "r").read()
except IOError, details:
self.text = "# Exception opening file\n# %s" % (repr(details))
else:
self.text = "# No file available for module '%s'" % (self.module)
self._buildlines()
return self.text
def GetName(self, dnt):
name = self.module.__name__
try:
fname = win32api.GetFullPathName(self.module.__file__)
except win32api.error:
fname = self.module.__file__
except AttributeError:
fname = name
if dnt==axdebug.DOCUMENTNAMETYPE_APPNODE:
return name.split(".")[-1]
elif dnt==axdebug.DOCUMENTNAMETYPE_TITLE:
return fname
elif dnt==axdebug.DOCUMENTNAMETYPE_FILE_TAIL:
return os.path.split(fname)[1]
elif dnt==axdebug.DOCUMENTNAMETYPE_URL:
return "file:%s" % fname
else:
raise Exception(scode=winerror.E_UNEXPECTED)
if __name__=='__main__':
import sys
sys.path.append(".")
import ttest
sc = SourceModuleContainer(ttest)
# sc = SourceCodeContainer(open(sys.argv[1], "rb").read(), sys.argv[1])
attrs = sc.GetSyntaxColorAttributes()
attrlen = 0
for attr in attrs:
if type(attr)==type(()):
attrlen = attrlen + attr[1]
else:
attrlen = attrlen + 1
text = sc.GetText()
if attrlen!=len(text):
print "Lengths dont match!!! (%d/%d)" % (attrlen, len(text))
# print "Attributes:"
# print attrs
print "GetLineOfPos=", sc.GetLineOfPosition(0)
print "GetLineOfPos=", sc.GetLineOfPosition(4)
print "GetLineOfPos=", sc.GetLineOfPosition(10)
| mit |
DavidGuben/rcbplayspokemon | app/pywin32-220/com/win32comext/propsys/pscon.py | 17 | 48201 | # hand generated from propsys.h
## PROPENUMTYPE, used with IPropertyEnumType
PET_DISCRETEVALUE = 0
PET_RANGEDVALUE = 1
PET_DEFAULTVALUE = 2
PET_ENDRANGE = 3
PDTF_DEFAULT = 0
PDTF_MULTIPLEVALUES = 0x1
PDTF_ISINNATE = 0x2
PDTF_ISGROUP = 0x4
PDTF_CANGROUPBY = 0x8
PDTF_CANSTACKBY = 0x10
PDTF_ISTREEPROPERTY = 0x20
PDTF_INCLUDEINFULLTEXTQUERY = 0x40
PDTF_ISVIEWABLE = 0x80
PDTF_ISQUERYABLE = 0x100
PDTF_ISSYSTEMPROPERTY = 0x80000000
PDTF_MASK_ALL = 0x800001ff
PDVF_DEFAULT = 0
PDVF_CENTERALIGN = 0x1
PDVF_RIGHTALIGN = 0x2
PDVF_BEGINNEWGROUP = 0x4
PDVF_FILLAREA = 0x8
PDVF_SORTDESCENDING = 0x10
PDVF_SHOWONLYIFPRESENT = 0x20
PDVF_SHOWBYDEFAULT = 0x40
PDVF_SHOWINPRIMARYLIST = 0x80
PDVF_SHOWINSECONDARYLIST = 0x100
PDVF_HIDELABEL = 0x200
PDVF_HIDDEN = 0x800
PDVF_CANWRAP = 0x1000
PDVF_MASK_ALL = 0x1bff
PDDT_STRING = 0
PDDT_NUMBER = 1
PDDT_BOOLEAN = 2
PDDT_DATETIME = 3
PDDT_ENUMERATED = 4
PDGR_DISCRETE = 0
PDGR_ALPHANUMERIC = 1
PDGR_SIZE = 2
PDGR_DYNAMIC = 3
PDGR_DATE = 4
PDGR_PERCENT = 5
PDGR_ENUMERATED = 6
## PROPDESC_FORMAT_FLAGS
PDFF_DEFAULT = 0
PDFF_PREFIXNAME = 0x1
PDFF_FILENAME = 0x2
PDFF_ALWAYSKB = 0x4
PDFF_RESERVED_RIGHTTOLEFT = 0x8
PDFF_SHORTTIME = 0x10
PDFF_LONGTIME = 0x20
PDFF_HIDETIME = 0x40
PDFF_SHORTDATE = 0x80
PDFF_LONGDATE = 0x100
PDFF_HIDEDATE = 0x200
PDFF_RELATIVEDATE = 0x400
PDFF_USEEDITINVITATION = 0x800
PDFF_READONLY = 0x1000
PDFF_NOAUTOREADINGORDER = 0x2000
PDSD_GENERAL = 0
PDSD_A_Z = 1
PDSD_LOWEST_HIGHEST = 2
PDSD_SMALLEST_BIGGEST = 3
PDSD_OLDEST_NEWEST = 4
PDRDT_GENERAL = 0
PDRDT_DATE = 1
PDRDT_SIZE = 2
PDRDT_COUNT = 3
PDRDT_REVISION = 4
PDRDT_LENGTH = 5
PDRDT_DURATION = 6
PDRDT_SPEED = 7
PDRDT_RATE = 8
PDRDT_RATING = 9
PDRDT_PRIORITY = 10
PDAT_DEFAULT = 0
PDAT_FIRST = 1
PDAT_SUM = 2
PDAT_AVERAGE = 3
PDAT_DATERANGE = 4
PDAT_UNION = 5
PDAT_MAX = 6
PDAT_MIN = 7
PDCOT_NONE = 0
PDCOT_STRING = 1
PDCOT_SIZE = 2
PDCOT_DATETIME = 3
PDCOT_BOOLEAN = 4
PDCOT_NUMBER = 5
PDSIF_DEFAULT = 0
PDSIF_ININVERTEDINDEX = 0x1
PDSIF_ISCOLUMN = 0x2
PDSIF_ISCOLUMNSPARSE = 0x4
PDCIT_NONE = 0
PDCIT_ONDISK = 1
PDCIT_INMEMORY = 2
## PROPDESC_ENUMFILTER, used with IPropertySystem::EnumeratePropertyDescriptions
PDEF_ALL = 0
PDEF_SYSTEM = 1
PDEF_NONSYSTEM = 2
PDEF_VIEWABLE = 3
PDEF_QUERYABLE = 4
PDEF_INFULLTEXTQUERY = 5
PDEF_COLUMN = 6
## PSC_STATE, used with IPropertyStoreCache
PSC_NORMAL = 0
PSC_NOTINSOURCE = 1
PSC_DIRTY = 2
## CONDITION_OPERATION
COP_IMPLICIT = 0
COP_EQUAL = 1
COP_NOTEQUAL = 2
COP_LESSTHAN = 3
COP_GREATERTHAN = 4
COP_LESSTHANOREQUAL = 5
COP_GREATERTHANOREQUAL = 6
COP_VALUE_STARTSWITH = 7
COP_VALUE_ENDSWITH = 8
COP_VALUE_CONTAINS = 9
COP_VALUE_NOTCONTAINS = 10
COP_DOSWILDCARDS = 11
COP_WORD_EQUAL = 12
COP_WORD_STARTSWITH = 13
COP_APPLICATION_SPECIFIC = 14
## PERSIST_SPROPSTORE_FLAGS, used with IPersistSerializedPropStorage
FPSPS_READONLY = 1
PKEY_PIDSTR_MAX = 10 # will take care of any long integer value
#define GUIDSTRING_MAX (1 + 8 + 1 + 4 + 1 + 4 + 1 + 4 + 1 + 12 + 1 + 1) // "{12345678-1234-1234-1234-123456789012}"
GUIDSTRING_MAX = (1 + 8 + 1 + 4 + 1 + 4 + 1 + 4 + 1 + 12 + 1 + 1) # hrm ???
#define PKEYSTR_MAX (GUIDSTRING_MAX + 1 + PKEY_PIDSTR_MAX)
PKEYSTR_MAX = GUIDSTRING_MAX + 1 + PKEY_PIDSTR_MAX
## Property keys from propkey.h
from pywintypes import IID
PKEY_Audio_ChannelCount = (IID('{64440490-4C8B-11D1-8B70-080036B11A03}'), 7)
PKEY_Audio_Compression = (IID('{64440490-4C8B-11D1-8B70-080036B11A03}'), 10)
PKEY_Audio_EncodingBitrate = (IID('{64440490-4C8B-11D1-8B70-080036B11A03}'), 4)
PKEY_Audio_Format = (IID('{64440490-4C8B-11D1-8B70-080036B11A03}'), 2)
PKEY_Audio_IsVariableBitRate = (IID('{E6822FEE-8C17-4D62-823C-8E9CFCBD1D5C}'), 100)
PKEY_Audio_PeakValue = (IID('{2579E5D0-1116-4084-BD9A-9B4F7CB4DF5E}'), 100)
PKEY_Audio_SampleRate = (IID('{64440490-4C8B-11D1-8B70-080036B11A03}'), 5)
PKEY_Audio_SampleSize = (IID('{64440490-4C8B-11D1-8B70-080036B11A03}'), 6)
PKEY_Audio_StreamName = (IID('{64440490-4C8B-11D1-8B70-080036B11A03}'), 9)
PKEY_Audio_StreamNumber = (IID('{64440490-4C8B-11D1-8B70-080036B11A03}'), 8)
PKEY_Calendar_Duration = (IID('{293CA35A-09AA-4DD2-B180-1FE245728A52}'), 100)
PKEY_Calendar_IsOnline = (IID('{BFEE9149-E3E2-49A7-A862-C05988145CEC}'), 100)
PKEY_Calendar_IsRecurring = (IID('{315B9C8D-80A9-4EF9-AE16-8E746DA51D70}'), 100)
PKEY_Calendar_Location = (IID('{F6272D18-CECC-40B1-B26A-3911717AA7BD}'), 100)
PKEY_Calendar_OptionalAttendeeAddresses = (IID('{D55BAE5A-3892-417A-A649-C6AC5AAAEAB3}'), 100)
PKEY_Calendar_OptionalAttendeeNames = (IID('{09429607-582D-437F-84C3-DE93A2B24C3C}'), 100)
PKEY_Calendar_OrganizerAddress = (IID('{744C8242-4DF5-456C-AB9E-014EFB9021E3}'), 100)
PKEY_Calendar_OrganizerName = (IID('{AAA660F9-9865-458E-B484-01BC7FE3973E}'), 100)
PKEY_Calendar_ReminderTime = (IID('{72FC5BA4-24F9-4011-9F3F-ADD27AFAD818}'), 100)
PKEY_Calendar_RequiredAttendeeAddresses = (IID('{0BA7D6C3-568D-4159-AB91-781A91FB71E5}'), 100)
PKEY_Calendar_RequiredAttendeeNames = (IID('{B33AF30B-F552-4584-936C-CB93E5CDA29F}'), 100)
PKEY_Calendar_Resources = (IID('{00F58A38-C54B-4C40-8696-97235980EAE1}'), 100)
PKEY_Calendar_ShowTimeAs = (IID('{5BF396D4-5EB2-466F-BDE9-2FB3F2361D6E}'), 100)
PKEY_Calendar_ShowTimeAsText = (IID('{53DA57CF-62C0-45C4-81DE-7610BCEFD7F5}'), 100)
PKEY_Communication_AccountName = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 9)
PKEY_Communication_Suffix = (IID('{807B653A-9E91-43EF-8F97-11CE04EE20C5}'), 100)
PKEY_Communication_TaskStatus = (IID('{BE1A72C6-9A1D-46B7-AFE7-AFAF8CEF4999}'), 100)
PKEY_Communication_TaskStatusText = (IID('{A6744477-C237-475B-A075-54F34498292A}'), 100)
PKEY_Computer_DecoratedFreeSpace = (IID('{9B174B35-40FF-11D2-A27E-00C04FC30871}'), 7)
PKEY_Contact_Anniversary = (IID('{9AD5BADB-CEA7-4470-A03D-B84E51B9949E}'), 100)
PKEY_Contact_AssistantName = (IID('{CD102C9C-5540-4A88-A6F6-64E4981C8CD1}'), 100)
PKEY_Contact_AssistantTelephone = (IID('{9A93244D-A7AD-4FF8-9B99-45EE4CC09AF6}'), 100)
PKEY_Contact_Birthday = (IID('{176DC63C-2688-4E89-8143-A347800F25E9}'), 47)
PKEY_Contact_BusinessAddress = (IID('{730FB6DD-CF7C-426B-A03F-BD166CC9EE24}'), 100)
PKEY_Contact_BusinessAddressCity = (IID('{402B5934-EC5A-48C3-93E6-85E86A2D934E}'), 100)
PKEY_Contact_BusinessAddressCountry = (IID('{B0B87314-FCF6-4FEB-8DFF-A50DA6AF561C}'), 100)
PKEY_Contact_BusinessAddressPostalCode = (IID('{E1D4A09E-D758-4CD1-B6EC-34A8B5A73F80}'), 100)
PKEY_Contact_BusinessAddressPostOfficeBox = (IID('{BC4E71CE-17F9-48D5-BEE9-021DF0EA5409}'), 100)
PKEY_Contact_BusinessAddressState = (IID('{446F787F-10C4-41CB-A6C4-4D0343551597}'), 100)
PKEY_Contact_BusinessAddressStreet = (IID('{DDD1460F-C0BF-4553-8CE4-10433C908FB0}'), 100)
PKEY_Contact_BusinessFaxNumber = (IID('{91EFF6F3-2E27-42CA-933E-7C999FBE310B}'), 100)
PKEY_Contact_BusinessHomePage = (IID('{56310920-2491-4919-99CE-EADB06FAFDB2}'), 100)
PKEY_Contact_BusinessTelephone = (IID('{6A15E5A0-0A1E-4CD7-BB8C-D2F1B0C929BC}'), 100)
PKEY_Contact_CallbackTelephone = (IID('{BF53D1C3-49E0-4F7F-8567-5A821D8AC542}'), 100)
PKEY_Contact_CarTelephone = (IID('{8FDC6DEA-B929-412B-BA90-397A257465FE}'), 100)
PKEY_Contact_Children = (IID('{D4729704-8EF1-43EF-9024-2BD381187FD5}'), 100)
PKEY_Contact_CompanyMainTelephone = (IID('{8589E481-6040-473D-B171-7FA89C2708ED}'), 100)
PKEY_Contact_Department = (IID('{FC9F7306-FF8F-4D49-9FB6-3FFE5C0951EC}'), 100)
PKEY_Contact_EmailAddress = (IID('{F8FA7FA3-D12B-4785-8A4E-691A94F7A3E7}'), 100)
PKEY_Contact_EmailAddress2 = (IID('{38965063-EDC8-4268-8491-B7723172CF29}'), 100)
PKEY_Contact_EmailAddress3 = (IID('{644D37B4-E1B3-4BAD-B099-7E7C04966ACA}'), 100)
PKEY_Contact_EmailAddresses = (IID('{84D8F337-981D-44B3-9615-C7596DBA17E3}'), 100)
PKEY_Contact_EmailName = (IID('{CC6F4F24-6083-4BD4-8754-674D0DE87AB8}'), 100)
PKEY_Contact_FileAsName = (IID('{F1A24AA7-9CA7-40F6-89EC-97DEF9FFE8DB}'), 100)
PKEY_Contact_FirstName = (IID('{14977844-6B49-4AAD-A714-A4513BF60460}'), 100)
PKEY_Contact_FullName = (IID('{635E9051-50A5-4BA2-B9DB-4ED056C77296}'), 100)
PKEY_Contact_Gender = (IID('{3C8CEE58-D4F0-4CF9-B756-4E5D24447BCD}'), 100)
PKEY_Contact_Hobbies = (IID('{5DC2253F-5E11-4ADF-9CFE-910DD01E3E70}'), 100)
PKEY_Contact_HomeAddress = (IID('{98F98354-617A-46B8-8560-5B1B64BF1F89}'), 100)
PKEY_Contact_HomeAddressCity = (IID('{176DC63C-2688-4E89-8143-A347800F25E9}'), 65)
PKEY_Contact_HomeAddressCountry = (IID('{08A65AA1-F4C9-43DD-9DDF-A33D8E7EAD85}'), 100)
PKEY_Contact_HomeAddressPostalCode = (IID('{8AFCC170-8A46-4B53-9EEE-90BAE7151E62}'), 100)
PKEY_Contact_HomeAddressPostOfficeBox = (IID('{7B9F6399-0A3F-4B12-89BD-4ADC51C918AF}'), 100)
PKEY_Contact_HomeAddressState = (IID('{C89A23D0-7D6D-4EB8-87D4-776A82D493E5}'), 100)
PKEY_Contact_HomeAddressStreet = (IID('{0ADEF160-DB3F-4308-9A21-06237B16FA2A}'), 100)
PKEY_Contact_HomeFaxNumber = (IID('{660E04D6-81AB-4977-A09F-82313113AB26}'), 100)
PKEY_Contact_HomeTelephone = (IID('{176DC63C-2688-4E89-8143-A347800F25E9}'), 20)
PKEY_Contact_IMAddress = (IID('{D68DBD8A-3374-4B81-9972-3EC30682DB3D}'), 100)
PKEY_Contact_Initials = (IID('{F3D8F40D-50CB-44A2-9718-40CB9119495D}'), 100)
PKEY_Contact_JA_CompanyNamePhonetic = (IID('{897B3694-FE9E-43E6-8066-260F590C0100}'), 2)
PKEY_Contact_JA_FirstNamePhonetic = (IID('{897B3694-FE9E-43E6-8066-260F590C0100}'), 3)
PKEY_Contact_JA_LastNamePhonetic = (IID('{897B3694-FE9E-43E6-8066-260F590C0100}'), 4)
PKEY_Contact_JobTitle = (IID('{176DC63C-2688-4E89-8143-A347800F25E9}'), 6)
PKEY_Contact_Label = (IID('{97B0AD89-DF49-49CC-834E-660974FD755B}'), 100)
PKEY_Contact_LastName = (IID('{8F367200-C270-457C-B1D4-E07C5BCD90C7}'), 100)
PKEY_Contact_MailingAddress = (IID('{C0AC206A-827E-4650-95AE-77E2BB74FCC9}'), 100)
PKEY_Contact_MiddleName = (IID('{176DC63C-2688-4E89-8143-A347800F25E9}'), 71)
PKEY_Contact_MobileTelephone = (IID('{176DC63C-2688-4E89-8143-A347800F25E9}'), 35)
PKEY_Contact_NickName = (IID('{176DC63C-2688-4E89-8143-A347800F25E9}'), 74)
PKEY_Contact_OfficeLocation = (IID('{176DC63C-2688-4E89-8143-A347800F25E9}'), 7)
PKEY_Contact_OtherAddress = (IID('{508161FA-313B-43D5-83A1-C1ACCF68622C}'), 100)
PKEY_Contact_OtherAddressCity = (IID('{6E682923-7F7B-4F0C-A337-CFCA296687BF}'), 100)
PKEY_Contact_OtherAddressCountry = (IID('{8F167568-0AAE-4322-8ED9-6055B7B0E398}'), 100)
PKEY_Contact_OtherAddressPostalCode = (IID('{95C656C1-2ABF-4148-9ED3-9EC602E3B7CD}'), 100)
PKEY_Contact_OtherAddressPostOfficeBox = (IID('{8B26EA41-058F-43F6-AECC-4035681CE977}'), 100)
PKEY_Contact_OtherAddressState = (IID('{71B377D6-E570-425F-A170-809FAE73E54E}'), 100)
PKEY_Contact_OtherAddressStreet = (IID('{FF962609-B7D6-4999-862D-95180D529AEA}'), 100)
PKEY_Contact_PagerTelephone = (IID('{D6304E01-F8F5-4F45-8B15-D024A6296789}'), 100)
PKEY_Contact_PersonalTitle = (IID('{176DC63C-2688-4E89-8143-A347800F25E9}'), 69)
PKEY_Contact_PrimaryAddressCity = (IID('{C8EA94F0-A9E3-4969-A94B-9C62A95324E0}'), 100)
PKEY_Contact_PrimaryAddressCountry = (IID('{E53D799D-0F3F-466E-B2FF-74634A3CB7A4}'), 100)
PKEY_Contact_PrimaryAddressPostalCode = (IID('{18BBD425-ECFD-46EF-B612-7B4A6034EDA0}'), 100)
PKEY_Contact_PrimaryAddressPostOfficeBox = (IID('{DE5EF3C7-46E1-484E-9999-62C5308394C1}'), 100)
PKEY_Contact_PrimaryAddressState = (IID('{F1176DFE-7138-4640-8B4C-AE375DC70A6D}'), 100)
PKEY_Contact_PrimaryAddressStreet = (IID('{63C25B20-96BE-488F-8788-C09C407AD812}'), 100)
PKEY_Contact_PrimaryEmailAddress = (IID('{176DC63C-2688-4E89-8143-A347800F25E9}'), 48)
PKEY_Contact_PrimaryTelephone = (IID('{176DC63C-2688-4E89-8143-A347800F25E9}'), 25)
PKEY_Contact_Profession = (IID('{7268AF55-1CE4-4F6E-A41F-B6E4EF10E4A9}'), 100)
PKEY_Contact_SpouseName = (IID('{9D2408B6-3167-422B-82B0-F583B7A7CFE3}'), 100)
PKEY_Contact_Suffix = (IID('{176DC63C-2688-4E89-8143-A347800F25E9}'), 73)
PKEY_Contact_TelexNumber = (IID('{C554493C-C1F7-40C1-A76C-EF8C0614003E}'), 100)
PKEY_Contact_TTYTDDTelephone = (IID('{AAF16BAC-2B55-45E6-9F6D-415EB94910DF}'), 100)
PKEY_Contact_WebPage = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 18)
PKEY_AcquisitionID = (IID('{65A98875-3C80-40AB-ABBC-EFDAF77DBEE2}'), 100)
PKEY_ApplicationName = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 18)
PKEY_Author = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 4)
PKEY_Capacity = (IID('{9B174B35-40FF-11D2-A27E-00C04FC30871}'), 3)
PKEY_Category = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 2)
PKEY_Comment = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 6)
PKEY_Company = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 15)
PKEY_ComputerName = (IID('{28636AA6-953D-11D2-B5D6-00C04FD918D0}'), 5)
PKEY_ContainedItems = (IID('{28636AA6-953D-11D2-B5D6-00C04FD918D0}'), 29)
PKEY_ContentStatus = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 27)
PKEY_ContentType = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 26)
PKEY_Copyright = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 11)
PKEY_DateAccessed = (IID('{B725F130-47EF-101A-A5F1-02608C9EEBAC}'), 16)
PKEY_DateAcquired = (IID('{2CBAA8F5-D81F-47CA-B17A-F8D822300131}'), 100)
PKEY_DateArchived = (IID('{43F8D7B7-A444-4F87-9383-52271C9B915C}'), 100)
PKEY_DateCompleted = (IID('{72FAB781-ACDA-43E5-B155-B2434F85E678}'), 100)
PKEY_DateCreated = (IID('{B725F130-47EF-101A-A5F1-02608C9EEBAC}'), 15)
PKEY_DateImported = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 18258)
PKEY_DateModified = (IID('{B725F130-47EF-101A-A5F1-02608C9EEBAC}'), 14)
PKEY_DueDate = (IID('{3F8472B5-E0AF-4DB2-8071-C53FE76AE7CE}'), 100)
PKEY_EndDate = (IID('{C75FAA05-96FD-49E7-9CB4-9F601082D553}'), 100)
PKEY_FileAllocationSize = (IID('{B725F130-47EF-101A-A5F1-02608C9EEBAC}'), 18)
PKEY_FileAttributes = (IID('{B725F130-47EF-101A-A5F1-02608C9EEBAC}'), 13)
PKEY_FileCount = (IID('{28636AA6-953D-11D2-B5D6-00C04FD918D0}'), 12)
PKEY_FileDescription = (IID('{0CEF7D53-FA64-11D1-A203-0000F81FEDEE}'), 3)
PKEY_FileExtension = (IID('{E4F10A3C-49E6-405D-8288-A23BD4EEAA6C}'), 100)
PKEY_FileFRN = (IID('{B725F130-47EF-101A-A5F1-02608C9EEBAC}'), 21)
PKEY_FileName = (IID('{41CF5AE0-F75A-4806-BD87-59C7D9248EB9}'), 100)
PKEY_FileOwner = (IID('{9B174B34-40FF-11D2-A27E-00C04FC30871}'), 4)
PKEY_FileVersion = (IID('{0CEF7D53-FA64-11D1-A203-0000F81FEDEE}'), 4)
PKEY_FindData = (IID('{28636AA6-953D-11D2-B5D6-00C04FD918D0}'), 0)
PKEY_FlagColor = (IID('{67DF94DE-0CA7-4D6F-B792-053A3E4F03CF}'), 100)
PKEY_FlagColorText = (IID('{45EAE747-8E2A-40AE-8CBF-CA52ABA6152A}'), 100)
PKEY_FlagStatus = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 12)
PKEY_FlagStatusText = (IID('{DC54FD2E-189D-4871-AA01-08C2F57A4ABC}'), 100)
PKEY_FreeSpace = (IID('{9B174B35-40FF-11D2-A27E-00C04FC30871}'), 2)
PKEY_Identity = (IID('{A26F4AFC-7346-4299-BE47-EB1AE613139F}'), 100)
PKEY_Importance = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 11)
PKEY_ImportanceText = (IID('{A3B29791-7713-4E1D-BB40-17DB85F01831}'), 100)
PKEY_IsAttachment = (IID('{F23F425C-71A1-4FA8-922F-678EA4A60408}'), 100)
PKEY_IsDeleted = (IID('{5CDA5FC8-33EE-4FF3-9094-AE7BD8868C4D}'), 100)
PKEY_IsFlagged = (IID('{5DA84765-E3FF-4278-86B0-A27967FBDD03}'), 100)
PKEY_IsFlaggedComplete = (IID('{A6F360D2-55F9-48DE-B909-620E090A647C}'), 100)
PKEY_IsIncomplete = (IID('{346C8BD1-2E6A-4C45-89A4-61B78E8E700F}'), 100)
PKEY_IsRead = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 10)
PKEY_IsSendToTarget = (IID('{28636AA6-953D-11D2-B5D6-00C04FD918D0}'), 33)
PKEY_IsShared = (IID('{EF884C5B-2BFE-41BB-AAE5-76EEDF4F9902}'), 100)
PKEY_ItemAuthors = (IID('{D0A04F0A-462A-48A4-BB2F-3706E88DBD7D}'), 100)
PKEY_ItemDate = (IID('{F7DB74B4-4287-4103-AFBA-F1B13DCD75CF}'), 100)
PKEY_ItemFolderNameDisplay = (IID('{B725F130-47EF-101A-A5F1-02608C9EEBAC}'), 2)
PKEY_ItemFolderPathDisplay = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 6)
PKEY_ItemFolderPathDisplayNarrow = (IID('{DABD30ED-0043-4789-A7F8-D013A4736622}'), 100)
PKEY_ItemName = (IID('{6B8DA074-3B5C-43BC-886F-0A2CDCE00B6F}'), 100)
PKEY_ItemNameDisplay = (IID('{B725F130-47EF-101A-A5F1-02608C9EEBAC}'), 10)
PKEY_ItemNamePrefix = (IID('{D7313FF1-A77A-401C-8C99-3DBDD68ADD36}'), 100)
PKEY_ItemParticipants = (IID('{D4D0AA16-9948-41A4-AA85-D97FF9646993}'), 100)
PKEY_ItemPathDisplay = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 7)
PKEY_ItemPathDisplayNarrow = (IID('{28636AA6-953D-11D2-B5D6-00C04FD918D0}'), 8)
PKEY_ItemType = (IID('{28636AA6-953D-11D2-B5D6-00C04FD918D0}'), 11)
PKEY_ItemTypeText = (IID('{B725F130-47EF-101A-A5F1-02608C9EEBAC}'), 4)
PKEY_ItemUrl = (IID('{49691C90-7E17-101A-A91C-08002B2ECDA9}'), 9)
PKEY_Keywords = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 5)
PKEY_Kind = (IID('{1E3EE840-BC2B-476C-8237-2ACD1A839B22}'), 3)
PKEY_KindText = (IID('{F04BEF95-C585-4197-A2B7-DF46FDC9EE6D}'), 100)
PKEY_Language = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 28)
PKEY_MileageInformation = (IID('{FDF84370-031A-4ADD-9E91-0D775F1C6605}'), 100)
PKEY_MIMEType = (IID('{0B63E350-9CCC-11D0-BCDB-00805FCCCE04}'), 5)
PKEY_Null = (IID('{00000000-0000-0000-0000-000000000000}'), 0)
PKEY_OfflineAvailability = (IID('{A94688B6-7D9F-4570-A648-E3DFC0AB2B3F}'), 100)
PKEY_OfflineStatus = (IID('{6D24888F-4718-4BDA-AFED-EA0FB4386CD8}'), 100)
PKEY_OriginalFileName = (IID('{0CEF7D53-FA64-11D1-A203-0000F81FEDEE}'), 6)
PKEY_ParentalRating = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 21)
PKEY_ParentalRatingReason = (IID('{10984E0A-F9F2-4321-B7EF-BAF195AF4319}'), 100)
PKEY_ParentalRatingsOrganization = (IID('{A7FE0840-1344-46F0-8D37-52ED712A4BF9}'), 100)
PKEY_ParsingBindContext = (IID('{DFB9A04D-362F-4CA3-B30B-0254B17B5B84}'), 100)
PKEY_ParsingName = (IID('{28636AA6-953D-11D2-B5D6-00C04FD918D0}'), 24)
PKEY_ParsingPath = (IID('{28636AA6-953D-11D2-B5D6-00C04FD918D0}'), 30)
PKEY_PerceivedType = (IID('{28636AA6-953D-11D2-B5D6-00C04FD918D0}'), 9)
PKEY_PercentFull = (IID('{9B174B35-40FF-11D2-A27E-00C04FC30871}'), 5)
PKEY_Priority = (IID('{9C1FCF74-2D97-41BA-B4AE-CB2E3661A6E4}'), 5)
PKEY_PriorityText = (IID('{D98BE98B-B86B-4095-BF52-9D23B2E0A752}'), 100)
PKEY_Project = (IID('{39A7F922-477C-48DE-8BC8-B28441E342E3}'), 100)
PKEY_ProviderItemID = (IID('{F21D9941-81F0-471A-ADEE-4E74B49217ED}'), 100)
PKEY_Rating = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 9)
PKEY_RatingText = (IID('{90197CA7-FD8F-4E8C-9DA3-B57E1E609295}'), 100)
PKEY_Sensitivity = (IID('{F8D3F6AC-4874-42CB-BE59-AB454B30716A}'), 100)
PKEY_SensitivityText = (IID('{D0C7F054-3F72-4725-8527-129A577CB269}'), 100)
PKEY_SFGAOFlags = (IID('{28636AA6-953D-11D2-B5D6-00C04FD918D0}'), 25)
PKEY_SharedWith = (IID('{EF884C5B-2BFE-41BB-AAE5-76EEDF4F9902}'), 200)
PKEY_ShareUserRating = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 12)
PKEY_Shell_OmitFromView = (IID('{DE35258C-C695-4CBC-B982-38B0AD24CED0}'), 2)
PKEY_SimpleRating = (IID('{A09F084E-AD41-489F-8076-AA5BE3082BCA}'), 100)
PKEY_Size = (IID('{B725F130-47EF-101A-A5F1-02608C9EEBAC}'), 12)
PKEY_SoftwareUsed = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 305)
PKEY_SourceItem = (IID('{668CDFA5-7A1B-4323-AE4B-E527393A1D81}'), 100)
PKEY_StartDate = (IID('{48FD6EC8-8A12-4CDF-A03E-4EC5A511EDDE}'), 100)
PKEY_Status = (IID('{000214A1-0000-0000-C000-000000000046}'), 9)
PKEY_Subject = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 3)
PKEY_Thumbnail = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 17)
PKEY_ThumbnailCacheId = (IID('{446D16B1-8DAD-4870-A748-402EA43D788C}'), 100)
PKEY_ThumbnailStream = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 27)
PKEY_Title = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 2)
PKEY_TotalFileSize = (IID('{28636AA6-953D-11D2-B5D6-00C04FD918D0}'), 14)
PKEY_Trademarks = (IID('{0CEF7D53-FA64-11D1-A203-0000F81FEDEE}'), 9)
PKEY_Document_ByteCount = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 4)
PKEY_Document_CharacterCount = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 16)
PKEY_Document_ClientID = (IID('{276D7BB0-5B34-4FB0-AA4B-158ED12A1809}'), 100)
PKEY_Document_Contributor = (IID('{F334115E-DA1B-4509-9B3D-119504DC7ABB}'), 100)
PKEY_Document_DateCreated = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 12)
PKEY_Document_DatePrinted = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 11)
PKEY_Document_DateSaved = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 13)
PKEY_Document_Division = (IID('{1E005EE6-BF27-428B-B01C-79676ACD2870}'), 100)
PKEY_Document_DocumentID = (IID('{E08805C8-E395-40DF-80D2-54F0D6C43154}'), 100)
PKEY_Document_HiddenSlideCount = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 9)
PKEY_Document_LastAuthor = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 8)
PKEY_Document_LineCount = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 5)
PKEY_Document_Manager = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 14)
PKEY_Document_MultimediaClipCount = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 10)
PKEY_Document_NoteCount = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 8)
PKEY_Document_PageCount = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 14)
PKEY_Document_ParagraphCount = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 6)
PKEY_Document_PresentationFormat = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 3)
PKEY_Document_RevisionNumber = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 9)
PKEY_Document_Security = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 19)
PKEY_Document_SlideCount = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 7)
PKEY_Document_Template = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 7)
PKEY_Document_TotalEditingTime = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 10)
PKEY_Document_Version = (IID('{D5CDD502-2E9C-101B-9397-08002B2CF9AE}'), 29)
PKEY_Document_WordCount = (IID('{F29F85E0-4FF9-1068-AB91-08002B27B3D9}'), 15)
PKEY_DRM_DatePlayExpires = (IID('{AEAC19E4-89AE-4508-B9B7-BB867ABEE2ED}'), 6)
PKEY_DRM_DatePlayStarts = (IID('{AEAC19E4-89AE-4508-B9B7-BB867ABEE2ED}'), 5)
PKEY_DRM_Description = (IID('{AEAC19E4-89AE-4508-B9B7-BB867ABEE2ED}'), 3)
PKEY_DRM_IsProtected = (IID('{AEAC19E4-89AE-4508-B9B7-BB867ABEE2ED}'), 2)
PKEY_DRM_PlayCount = (IID('{AEAC19E4-89AE-4508-B9B7-BB867ABEE2ED}'), 4)
PKEY_GPS_Altitude = (IID('{827EDB4F-5B73-44A7-891D-FDFFABEA35CA}'), 100)
PKEY_GPS_AltitudeDenominator = (IID('{78342DCB-E358-4145-AE9A-6BFE4E0F9F51}'), 100)
PKEY_GPS_AltitudeNumerator = (IID('{2DAD1EB7-816D-40D3-9EC3-C9773BE2AADE}'), 100)
PKEY_GPS_AltitudeRef = (IID('{46AC629D-75EA-4515-867F-6DC4321C5844}'), 100)
PKEY_GPS_AreaInformation = (IID('{972E333E-AC7E-49F1-8ADF-A70D07A9BCAB}'), 100)
PKEY_GPS_Date = (IID('{3602C812-0F3B-45F0-85AD-603468D69423}'), 100)
PKEY_GPS_DestBearing = (IID('{C66D4B3C-E888-47CC-B99F-9DCA3EE34DEA}'), 100)
PKEY_GPS_DestBearingDenominator = (IID('{7ABCF4F8-7C3F-4988-AC91-8D2C2E97ECA5}'), 100)
PKEY_GPS_DestBearingNumerator = (IID('{BA3B1DA9-86EE-4B5D-A2A4-A271A429F0CF}'), 100)
PKEY_GPS_DestBearingRef = (IID('{9AB84393-2A0F-4B75-BB22-7279786977CB}'), 100)
PKEY_GPS_DestDistance = (IID('{A93EAE04-6804-4F24-AC81-09B266452118}'), 100)
PKEY_GPS_DestDistanceDenominator = (IID('{9BC2C99B-AC71-4127-9D1C-2596D0D7DCB7}'), 100)
PKEY_GPS_DestDistanceNumerator = (IID('{2BDA47DA-08C6-4FE1-80BC-A72FC517C5D0}'), 100)
PKEY_GPS_DestDistanceRef = (IID('{ED4DF2D3-8695-450B-856F-F5C1C53ACB66}'), 100)
PKEY_GPS_DestLatitude = (IID('{9D1D7CC5-5C39-451C-86B3-928E2D18CC47}'), 100)
PKEY_GPS_DestLatitudeDenominator = (IID('{3A372292-7FCA-49A7-99D5-E47BB2D4E7AB}'), 100)
PKEY_GPS_DestLatitudeNumerator = (IID('{ECF4B6F6-D5A6-433C-BB92-4076650FC890}'), 100)
PKEY_GPS_DestLatitudeRef = (IID('{CEA820B9-CE61-4885-A128-005D9087C192}'), 100)
PKEY_GPS_DestLongitude = (IID('{47A96261-CB4C-4807-8AD3-40B9D9DBC6BC}'), 100)
PKEY_GPS_DestLongitudeDenominator = (IID('{425D69E5-48AD-4900-8D80-6EB6B8D0AC86}'), 100)
PKEY_GPS_DestLongitudeNumerator = (IID('{A3250282-FB6D-48D5-9A89-DBCACE75CCCF}'), 100)
PKEY_GPS_DestLongitudeRef = (IID('{182C1EA6-7C1C-4083-AB4B-AC6C9F4ED128}'), 100)
PKEY_GPS_Differential = (IID('{AAF4EE25-BD3B-4DD7-BFC4-47F77BB00F6D}'), 100)
PKEY_GPS_DOP = (IID('{0CF8FB02-1837-42F1-A697-A7017AA289B9}'), 100)
PKEY_GPS_DOPDenominator = (IID('{A0BE94C5-50BA-487B-BD35-0654BE8881ED}'), 100)
PKEY_GPS_DOPNumerator = (IID('{47166B16-364F-4AA0-9F31-E2AB3DF449C3}'), 100)
PKEY_GPS_ImgDirection = (IID('{16473C91-D017-4ED9-BA4D-B6BAA55DBCF8}'), 100)
PKEY_GPS_ImgDirectionDenominator = (IID('{10B24595-41A2-4E20-93C2-5761C1395F32}'), 100)
PKEY_GPS_ImgDirectionNumerator = (IID('{DC5877C7-225F-45F7-BAC7-E81334B6130A}'), 100)
PKEY_GPS_ImgDirectionRef = (IID('{A4AAA5B7-1AD0-445F-811A-0F8F6E67F6B5}'), 100)
PKEY_GPS_Latitude = (IID('{8727CFFF-4868-4EC6-AD5B-81B98521D1AB}'), 100)
PKEY_GPS_LatitudeDenominator = (IID('{16E634EE-2BFF-497B-BD8A-4341AD39EEB9}'), 100)
PKEY_GPS_LatitudeNumerator = (IID('{7DDAAAD1-CCC8-41AE-B750-B2CB8031AEA2}'), 100)
PKEY_GPS_LatitudeRef = (IID('{029C0252-5B86-46C7-ACA0-2769FFC8E3D4}'), 100)
PKEY_GPS_Longitude = (IID('{C4C4DBB2-B593-466B-BBDA-D03D27D5E43A}'), 100)
PKEY_GPS_LongitudeDenominator = (IID('{BE6E176C-4534-4D2C-ACE5-31DEDAC1606B}'), 100)
PKEY_GPS_LongitudeNumerator = (IID('{02B0F689-A914-4E45-821D-1DDA452ED2C4}'), 100)
PKEY_GPS_LongitudeRef = (IID('{33DCF22B-28D5-464C-8035-1EE9EFD25278}'), 100)
PKEY_GPS_MapDatum = (IID('{2CA2DAE6-EDDC-407D-BEF1-773942ABFA95}'), 100)
PKEY_GPS_MeasureMode = (IID('{A015ED5D-AAEA-4D58-8A86-3C586920EA0B}'), 100)
PKEY_GPS_ProcessingMethod = (IID('{59D49E61-840F-4AA9-A939-E2099B7F6399}'), 100)
PKEY_GPS_Satellites = (IID('{467EE575-1F25-4557-AD4E-B8B58B0D9C15}'), 100)
PKEY_GPS_Speed = (IID('{DA5D0862-6E76-4E1B-BABD-70021BD25494}'), 100)
PKEY_GPS_SpeedDenominator = (IID('{7D122D5A-AE5E-4335-8841-D71E7CE72F53}'), 100)
PKEY_GPS_SpeedNumerator = (IID('{ACC9CE3D-C213-4942-8B48-6D0820F21C6D}'), 100)
PKEY_GPS_SpeedRef = (IID('{ECF7F4C9-544F-4D6D-9D98-8AD79ADAF453}'), 100)
PKEY_GPS_Status = (IID('{125491F4-818F-46B2-91B5-D537753617B2}'), 100)
PKEY_GPS_Track = (IID('{76C09943-7C33-49E3-9E7E-CDBA872CFADA}'), 100)
PKEY_GPS_TrackDenominator = (IID('{C8D1920C-01F6-40C0-AC86-2F3A4AD00770}'), 100)
PKEY_GPS_TrackNumerator = (IID('{702926F4-44A6-43E1-AE71-45627116893B}'), 100)
PKEY_GPS_TrackRef = (IID('{35DBE6FE-44C3-4400-AAAE-D2C799C407E8}'), 100)
PKEY_GPS_VersionID = (IID('{22704DA4-C6B2-4A99-8E56-F16DF8C92599}'), 100)
PKEY_Image_BitDepth = (IID('{6444048F-4C8B-11D1-8B70-080036B11A03}'), 7)
PKEY_Image_ColorSpace = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 40961)
PKEY_Image_CompressedBitsPerPixel = (IID('{364B6FA9-37AB-482A-BE2B-AE02F60D4318}'), 100)
PKEY_Image_CompressedBitsPerPixelDenominator = (IID('{1F8844E1-24AD-4508-9DFD-5326A415CE02}'), 100)
PKEY_Image_CompressedBitsPerPixelNumerator = (IID('{D21A7148-D32C-4624-8900-277210F79C0F}'), 100)
PKEY_Image_Compression = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 259)
PKEY_Image_CompressionText = (IID('{3F08E66F-2F44-4BB9-A682-AC35D2562322}'), 100)
PKEY_Image_Dimensions = (IID('{6444048F-4C8B-11D1-8B70-080036B11A03}'), 13)
PKEY_Image_HorizontalResolution = (IID('{6444048F-4C8B-11D1-8B70-080036B11A03}'), 5)
PKEY_Image_HorizontalSize = (IID('{6444048F-4C8B-11D1-8B70-080036B11A03}'), 3)
PKEY_Image_ImageID = (IID('{10DABE05-32AA-4C29-BF1A-63E2D220587F}'), 100)
PKEY_Image_ResolutionUnit = (IID('{19B51FA6-1F92-4A5C-AB48-7DF0ABD67444}'), 100)
PKEY_Image_VerticalResolution = (IID('{6444048F-4C8B-11D1-8B70-080036B11A03}'), 6)
PKEY_Image_VerticalSize = (IID('{6444048F-4C8B-11D1-8B70-080036B11A03}'), 4)
PKEY_Journal_Contacts = (IID('{DEA7C82C-1D89-4A66-9427-A4E3DEBABCB1}'), 100)
PKEY_Journal_EntryType = (IID('{95BEB1FC-326D-4644-B396-CD3ED90E6DDF}'), 100)
PKEY_Link_Comment = (IID('{B9B4B3FC-2B51-4A42-B5D8-324146AFCF25}'), 5)
PKEY_Link_DateVisited = (IID('{5CBF2787-48CF-4208-B90E-EE5E5D420294}'), 23)
PKEY_Link_Description = (IID('{5CBF2787-48CF-4208-B90E-EE5E5D420294}'), 21)
PKEY_Link_Status = (IID('{B9B4B3FC-2B51-4A42-B5D8-324146AFCF25}'), 3)
PKEY_Link_TargetExtension = (IID('{7A7D76F4-B630-4BD7-95FF-37CC51A975C9}'), 2)
PKEY_Link_TargetParsingPath = (IID('{B9B4B3FC-2B51-4A42-B5D8-324146AFCF25}'), 2)
PKEY_Link_TargetSFGAOFlags = (IID('{B9B4B3FC-2B51-4A42-B5D8-324146AFCF25}'), 8)
PKEY_Media_AuthorUrl = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 32)
PKEY_Media_AverageLevel = (IID('{09EDD5B6-B301-43C5-9990-D00302EFFD46}'), 100)
PKEY_Media_ClassPrimaryID = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 13)
PKEY_Media_ClassSecondaryID = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 14)
PKEY_Media_CollectionGroupID = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 24)
PKEY_Media_CollectionID = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 25)
PKEY_Media_ContentDistributor = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 18)
PKEY_Media_ContentID = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 26)
PKEY_Media_CreatorApplication = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 27)
PKEY_Media_CreatorApplicationVersion = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 28)
PKEY_Media_DateEncoded = (IID('{2E4B640D-5019-46D8-8881-55414CC5CAA0}'), 100)
PKEY_Media_DateReleased = (IID('{DE41CC29-6971-4290-B472-F59F2E2F31E2}'), 100)
PKEY_Media_Duration = (IID('{64440490-4C8B-11D1-8B70-080036B11A03}'), 3)
PKEY_Media_DVDID = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 15)
PKEY_Media_EncodedBy = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 36)
PKEY_Media_EncodingSettings = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 37)
PKEY_Media_FrameCount = (IID('{6444048F-4C8B-11D1-8B70-080036B11A03}'), 12)
PKEY_Media_MCDI = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 16)
PKEY_Media_MetadataContentProvider = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 17)
PKEY_Media_Producer = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 22)
PKEY_Media_PromotionUrl = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 33)
PKEY_Media_ProtectionType = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 38)
PKEY_Media_ProviderRating = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 39)
PKEY_Media_ProviderStyle = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 40)
PKEY_Media_Publisher = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 30)
PKEY_Media_SubscriptionContentId = (IID('{9AEBAE7A-9644-487D-A92C-657585ED751A}'), 100)
PKEY_Media_SubTitle = (IID('{56A3372E-CE9C-11D2-9F0E-006097C686F6}'), 38)
PKEY_Media_UniqueFileIdentifier = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 35)
PKEY_Media_UserNoAutoInfo = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 41)
PKEY_Media_UserWebUrl = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 34)
PKEY_Media_Writer = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 23)
PKEY_Media_Year = (IID('{56A3372E-CE9C-11D2-9F0E-006097C686F6}'), 5)
PKEY_Message_AttachmentContents = (IID('{3143BF7C-80A8-4854-8880-E2E40189BDD0}'), 100)
PKEY_Message_AttachmentNames = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 21)
PKEY_Message_BccAddress = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 2)
PKEY_Message_BccName = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 3)
PKEY_Message_CcAddress = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 4)
PKEY_Message_CcName = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 5)
PKEY_Message_ConversationID = (IID('{DC8F80BD-AF1E-4289-85B6-3DFC1B493992}'), 100)
PKEY_Message_ConversationIndex = (IID('{DC8F80BD-AF1E-4289-85B6-3DFC1B493992}'), 101)
PKEY_Message_DateReceived = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 20)
PKEY_Message_DateSent = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 19)
PKEY_Message_FromAddress = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 13)
PKEY_Message_FromName = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 14)
PKEY_Message_HasAttachments = (IID('{9C1FCF74-2D97-41BA-B4AE-CB2E3661A6E4}'), 8)
PKEY_Message_IsFwdOrReply = (IID('{9A9BC088-4F6D-469E-9919-E705412040F9}'), 100)
PKEY_Message_MessageClass = (IID('{CD9ED458-08CE-418F-A70E-F912C7BB9C5C}'), 103)
PKEY_Message_SenderAddress = (IID('{0BE1C8E7-1981-4676-AE14-FDD78F05A6E7}'), 100)
PKEY_Message_SenderName = (IID('{0DA41CFA-D224-4A18-AE2F-596158DB4B3A}'), 100)
PKEY_Message_Store = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 15)
PKEY_Message_ToAddress = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 16)
PKEY_Message_ToDoTitle = (IID('{BCCC8A3C-8CEF-42E5-9B1C-C69079398BC7}'), 100)
PKEY_Message_ToName = (IID('{E3E0584C-B788-4A5A-BB20-7F5A44C9ACDD}'), 17)
PKEY_Music_AlbumArtist = (IID('{56A3372E-CE9C-11D2-9F0E-006097C686F6}'), 13)
PKEY_Music_AlbumTitle = (IID('{56A3372E-CE9C-11D2-9F0E-006097C686F6}'), 4)
PKEY_Music_Artist = (IID('{56A3372E-CE9C-11D2-9F0E-006097C686F6}'), 2)
PKEY_Music_BeatsPerMinute = (IID('{56A3372E-CE9C-11D2-9F0E-006097C686F6}'), 35)
PKEY_Music_Composer = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 19)
PKEY_Music_Conductor = (IID('{56A3372E-CE9C-11D2-9F0E-006097C686F6}'), 36)
PKEY_Music_ContentGroupDescription = (IID('{56A3372E-CE9C-11D2-9F0E-006097C686F6}'), 33)
PKEY_Music_Genre = (IID('{56A3372E-CE9C-11D2-9F0E-006097C686F6}'), 11)
PKEY_Music_InitialKey = (IID('{56A3372E-CE9C-11D2-9F0E-006097C686F6}'), 34)
PKEY_Music_Lyrics = (IID('{56A3372E-CE9C-11D2-9F0E-006097C686F6}'), 12)
PKEY_Music_Mood = (IID('{56A3372E-CE9C-11D2-9F0E-006097C686F6}'), 39)
PKEY_Music_PartOfSet = (IID('{56A3372E-CE9C-11D2-9F0E-006097C686F6}'), 37)
PKEY_Music_Period = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 31)
PKEY_Music_SynchronizedLyrics = (IID('{6B223B6A-162E-4AA9-B39F-05D678FC6D77}'), 100)
PKEY_Music_TrackNumber = (IID('{56A3372E-CE9C-11D2-9F0E-006097C686F6}'), 7)
PKEY_Note_Color = (IID('{4776CAFA-BCE4-4CB1-A23E-265E76D8EB11}'), 100)
PKEY_Note_ColorText = (IID('{46B4E8DE-CDB2-440D-885C-1658EB65B914}'), 100)
PKEY_Photo_Aperture = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 37378)
PKEY_Photo_ApertureDenominator = (IID('{E1A9A38B-6685-46BD-875E-570DC7AD7320}'), 100)
PKEY_Photo_ApertureNumerator = (IID('{0337ECEC-39FB-4581-A0BD-4C4CC51E9914}'), 100)
PKEY_Photo_Brightness = (IID('{1A701BF6-478C-4361-83AB-3701BB053C58}'), 100)
PKEY_Photo_BrightnessDenominator = (IID('{6EBE6946-2321-440A-90F0-C043EFD32476}'), 100)
PKEY_Photo_BrightnessNumerator = (IID('{9E7D118F-B314-45A0-8CFB-D654B917C9E9}'), 100)
PKEY_Photo_CameraManufacturer = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 271)
PKEY_Photo_CameraModel = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 272)
PKEY_Photo_CameraSerialNumber = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 273)
PKEY_Photo_Contrast = (IID('{2A785BA9-8D23-4DED-82E6-60A350C86A10}'), 100)
PKEY_Photo_ContrastText = (IID('{59DDE9F2-5253-40EA-9A8B-479E96C6249A}'), 100)
PKEY_Photo_DateTaken = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 36867)
PKEY_Photo_DigitalZoom = (IID('{F85BF840-A925-4BC2-B0C4-8E36B598679E}'), 100)
PKEY_Photo_DigitalZoomDenominator = (IID('{745BAF0E-E5C1-4CFB-8A1B-D031A0A52393}'), 100)
PKEY_Photo_DigitalZoomNumerator = (IID('{16CBB924-6500-473B-A5BE-F1599BCBE413}'), 100)
PKEY_Photo_Event = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 18248)
PKEY_Photo_EXIFVersion = (IID('{D35F743A-EB2E-47F2-A286-844132CB1427}'), 100)
PKEY_Photo_ExposureBias = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 37380)
PKEY_Photo_ExposureBiasDenominator = (IID('{AB205E50-04B7-461C-A18C-2F233836E627}'), 100)
PKEY_Photo_ExposureBiasNumerator = (IID('{738BF284-1D87-420B-92CF-5834BF6EF9ED}'), 100)
PKEY_Photo_ExposureIndex = (IID('{967B5AF8-995A-46ED-9E11-35B3C5B9782D}'), 100)
PKEY_Photo_ExposureIndexDenominator = (IID('{93112F89-C28B-492F-8A9D-4BE2062CEE8A}'), 100)
PKEY_Photo_ExposureIndexNumerator = (IID('{CDEDCF30-8919-44DF-8F4C-4EB2FFDB8D89}'), 100)
PKEY_Photo_ExposureProgram = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 34850)
PKEY_Photo_ExposureProgramText = (IID('{FEC690B7-5F30-4646-AE47-4CAAFBA884A3}'), 100)
PKEY_Photo_ExposureTime = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 33434)
PKEY_Photo_ExposureTimeDenominator = (IID('{55E98597-AD16-42E0-B624-21599A199838}'), 100)
PKEY_Photo_ExposureTimeNumerator = (IID('{257E44E2-9031-4323-AC38-85C552871B2E}'), 100)
PKEY_Photo_Flash = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 37385)
PKEY_Photo_FlashEnergy = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 41483)
PKEY_Photo_FlashEnergyDenominator = (IID('{D7B61C70-6323-49CD-A5FC-C84277162C97}'), 100)
PKEY_Photo_FlashEnergyNumerator = (IID('{FCAD3D3D-0858-400F-AAA3-2F66CCE2A6BC}'), 100)
PKEY_Photo_FlashManufacturer = (IID('{AABAF6C9-E0C5-4719-8585-57B103E584FE}'), 100)
PKEY_Photo_FlashModel = (IID('{FE83BB35-4D1A-42E2-916B-06F3E1AF719E}'), 100)
PKEY_Photo_FlashText = (IID('{6B8B68F6-200B-47EA-8D25-D8050F57339F}'), 100)
PKEY_Photo_FNumber = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 33437)
PKEY_Photo_FNumberDenominator = (IID('{E92A2496-223B-4463-A4E3-30EABBA79D80}'), 100)
PKEY_Photo_FNumberNumerator = (IID('{1B97738A-FDFC-462F-9D93-1957E08BE90C}'), 100)
PKEY_Photo_FocalLength = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 37386)
PKEY_Photo_FocalLengthDenominator = (IID('{305BC615-DCA1-44A5-9FD4-10C0BA79412E}'), 100)
PKEY_Photo_FocalLengthInFilm = (IID('{A0E74609-B84D-4F49-B860-462BD9971F98}'), 100)
PKEY_Photo_FocalLengthNumerator = (IID('{776B6B3B-1E3D-4B0C-9A0E-8FBAF2A8492A}'), 100)
PKEY_Photo_FocalPlaneXResolution = (IID('{CFC08D97-C6F7-4484-89DD-EBEF4356FE76}'), 100)
PKEY_Photo_FocalPlaneXResolutionDenominator = (IID('{0933F3F5-4786-4F46-A8E8-D64DD37FA521}'), 100)
PKEY_Photo_FocalPlaneXResolutionNumerator = (IID('{DCCB10AF-B4E2-4B88-95F9-031B4D5AB490}'), 100)
PKEY_Photo_FocalPlaneYResolution = (IID('{4FFFE4D0-914F-4AC4-8D6F-C9C61DE169B1}'), 100)
PKEY_Photo_FocalPlaneYResolutionDenominator = (IID('{1D6179A6-A876-4031-B013-3347B2B64DC8}'), 100)
PKEY_Photo_FocalPlaneYResolutionNumerator = (IID('{A2E541C5-4440-4BA8-867E-75CFC06828CD}'), 100)
PKEY_Photo_GainControl = (IID('{FA304789-00C7-4D80-904A-1E4DCC7265AA}'), 100)
PKEY_Photo_GainControlDenominator = (IID('{42864DFD-9DA4-4F77-BDED-4AAD7B256735}'), 100)
PKEY_Photo_GainControlNumerator = (IID('{8E8ECF7C-B7B8-4EB8-A63F-0EE715C96F9E}'), 100)
PKEY_Photo_GainControlText = (IID('{C06238B2-0BF9-4279-A723-25856715CB9D}'), 100)
PKEY_Photo_ISOSpeed = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 34855)
PKEY_Photo_LensManufacturer = (IID('{E6DDCAF7-29C5-4F0A-9A68-D19412EC7090}'), 100)
PKEY_Photo_LensModel = (IID('{E1277516-2B5F-4869-89B1-2E585BD38B7A}'), 100)
PKEY_Photo_LightSource = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 37384)
PKEY_Photo_MakerNote = (IID('{FA303353-B659-4052-85E9-BCAC79549B84}'), 100)
PKEY_Photo_MakerNoteOffset = (IID('{813F4124-34E6-4D17-AB3E-6B1F3C2247A1}'), 100)
PKEY_Photo_MaxAperture = (IID('{08F6D7C2-E3F2-44FC-AF1E-5AA5C81A2D3E}'), 100)
PKEY_Photo_MaxApertureDenominator = (IID('{C77724D4-601F-46C5-9B89-C53F93BCEB77}'), 100)
PKEY_Photo_MaxApertureNumerator = (IID('{C107E191-A459-44C5-9AE6-B952AD4B906D}'), 100)
PKEY_Photo_MeteringMode = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 37383)
PKEY_Photo_MeteringModeText = (IID('{F628FD8C-7BA8-465A-A65B-C5AA79263A9E}'), 100)
PKEY_Photo_Orientation = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 274)
PKEY_Photo_OrientationText = (IID('{A9EA193C-C511-498A-A06B-58E2776DCC28}'), 100)
PKEY_Photo_PhotometricInterpretation = (IID('{341796F1-1DF9-4B1C-A564-91BDEFA43877}'), 100)
PKEY_Photo_PhotometricInterpretationText = (IID('{821437D6-9EAB-4765-A589-3B1CBBD22A61}'), 100)
PKEY_Photo_ProgramMode = (IID('{6D217F6D-3F6A-4825-B470-5F03CA2FBE9B}'), 100)
PKEY_Photo_ProgramModeText = (IID('{7FE3AA27-2648-42F3-89B0-454E5CB150C3}'), 100)
PKEY_Photo_RelatedSoundFile = (IID('{318A6B45-087F-4DC2-B8CC-05359551FC9E}'), 100)
PKEY_Photo_Saturation = (IID('{49237325-A95A-4F67-B211-816B2D45D2E0}'), 100)
PKEY_Photo_SaturationText = (IID('{61478C08-B600-4A84-BBE4-E99C45F0A072}'), 100)
PKEY_Photo_Sharpness = (IID('{FC6976DB-8349-4970-AE97-B3C5316A08F0}'), 100)
PKEY_Photo_SharpnessText = (IID('{51EC3F47-DD50-421D-8769-334F50424B1E}'), 100)
PKEY_Photo_ShutterSpeed = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 37377)
PKEY_Photo_ShutterSpeedDenominator = (IID('{E13D8975-81C7-4948-AE3F-37CAE11E8FF7}'), 100)
PKEY_Photo_ShutterSpeedNumerator = (IID('{16EA4042-D6F4-4BCA-8349-7C78D30FB333}'), 100)
PKEY_Photo_SubjectDistance = (IID('{14B81DA1-0135-4D31-96D9-6CBFC9671A99}'), 37382)
PKEY_Photo_SubjectDistanceDenominator = (IID('{0C840A88-B043-466D-9766-D4B26DA3FA77}'), 100)
PKEY_Photo_SubjectDistanceNumerator = (IID('{8AF4961C-F526-43E5-AA81-DB768219178D}'), 100)
PKEY_Photo_TranscodedForSync = (IID('{9A8EBB75-6458-4E82-BACB-35C0095B03BB}'), 100)
PKEY_Photo_WhiteBalance = (IID('{EE3D3D8A-5381-4CFA-B13B-AAF66B5F4EC9}'), 100)
PKEY_Photo_WhiteBalanceText = (IID('{6336B95E-C7A7-426D-86FD-7AE3D39C84B4}'), 100)
PKEY_PropGroup_Advanced = (IID('{900A403B-097B-4B95-8AE2-071FDAEEB118}'), 100)
PKEY_PropGroup_Audio = (IID('{2804D469-788F-48AA-8570-71B9C187E138}'), 100)
PKEY_PropGroup_Calendar = (IID('{9973D2B5-BFD8-438A-BA94-5349B293181A}'), 100)
PKEY_PropGroup_Camera = (IID('{DE00DE32-547E-4981-AD4B-542F2E9007D8}'), 100)
PKEY_PropGroup_Contact = (IID('{DF975FD3-250A-4004-858F-34E29A3E37AA}'), 100)
PKEY_PropGroup_Content = (IID('{D0DAB0BA-368A-4050-A882-6C010FD19A4F}'), 100)
PKEY_PropGroup_Description = (IID('{8969B275-9475-4E00-A887-FF93B8B41E44}'), 100)
PKEY_PropGroup_FileSystem = (IID('{E3A7D2C1-80FC-4B40-8F34-30EA111BDC2E}'), 100)
PKEY_PropGroup_General = (IID('{CC301630-B192-4C22-B372-9F4C6D338E07}'), 100)
PKEY_PropGroup_GPS = (IID('{F3713ADA-90E3-4E11-AAE5-FDC17685B9BE}'), 100)
PKEY_PropGroup_Image = (IID('{E3690A87-0FA8-4A2A-9A9F-FCE8827055AC}'), 100)
PKEY_PropGroup_Media = (IID('{61872CF7-6B5E-4B4B-AC2D-59DA84459248}'), 100)
PKEY_PropGroup_MediaAdvanced = (IID('{8859A284-DE7E-4642-99BA-D431D044B1EC}'), 100)
PKEY_PropGroup_Message = (IID('{7FD7259D-16B4-4135-9F97-7C96ECD2FA9E}'), 100)
PKEY_PropGroup_Music = (IID('{68DD6094-7216-40F1-A029-43FE7127043F}'), 100)
PKEY_PropGroup_Origin = (IID('{2598D2FB-5569-4367-95DF-5CD3A177E1A5}'), 100)
PKEY_PropGroup_PhotoAdvanced = (IID('{0CB2BF5A-9EE7-4A86-8222-F01E07FDADAF}'), 100)
PKEY_PropGroup_RecordedTV = (IID('{E7B33238-6584-4170-A5C0-AC25EFD9DA56}'), 100)
PKEY_PropGroup_Video = (IID('{BEBE0920-7671-4C54-A3EB-49FDDFC191EE}'), 100)
PKEY_PropList_ConflictPrompt = (IID('{C9944A21-A406-48FE-8225-AEC7E24C211B}'), 11)
PKEY_PropList_ExtendedTileInfo = (IID('{C9944A21-A406-48FE-8225-AEC7E24C211B}'), 9)
PKEY_PropList_FileOperationPrompt = (IID('{C9944A21-A406-48FE-8225-AEC7E24C211B}'), 10)
PKEY_PropList_FullDetails = (IID('{C9944A21-A406-48FE-8225-AEC7E24C211B}'), 2)
PKEY_PropList_InfoTip = (IID('{C9944A21-A406-48FE-8225-AEC7E24C211B}'), 4)
PKEY_PropList_NonPersonal = (IID('{49D1091F-082E-493F-B23F-D2308AA9668C}'), 100)
PKEY_PropList_PreviewDetails = (IID('{C9944A21-A406-48FE-8225-AEC7E24C211B}'), 8)
PKEY_PropList_PreviewTitle = (IID('{C9944A21-A406-48FE-8225-AEC7E24C211B}'), 6)
PKEY_PropList_QuickTip = (IID('{C9944A21-A406-48FE-8225-AEC7E24C211B}'), 5)
PKEY_PropList_TileInfo = (IID('{C9944A21-A406-48FE-8225-AEC7E24C211B}'), 3)
PKEY_PropList_XPDetailsPanel = (IID('{F2275480-F782-4291-BD94-F13693513AEC}'), 0)
PKEY_RecordedTV_ChannelNumber = (IID('{6D748DE2-8D38-4CC3-AC60-F009B057C557}'), 7)
PKEY_RecordedTV_Credits = (IID('{6D748DE2-8D38-4CC3-AC60-F009B057C557}'), 4)
PKEY_RecordedTV_DateContentExpires = (IID('{6D748DE2-8D38-4CC3-AC60-F009B057C557}'), 15)
PKEY_RecordedTV_EpisodeName = (IID('{6D748DE2-8D38-4CC3-AC60-F009B057C557}'), 2)
PKEY_RecordedTV_IsATSCContent = (IID('{6D748DE2-8D38-4CC3-AC60-F009B057C557}'), 16)
PKEY_RecordedTV_IsClosedCaptioningAvailable = (IID('{6D748DE2-8D38-4CC3-AC60-F009B057C557}'), 12)
PKEY_RecordedTV_IsDTVContent = (IID('{6D748DE2-8D38-4CC3-AC60-F009B057C557}'), 17)
PKEY_RecordedTV_IsHDContent = (IID('{6D748DE2-8D38-4CC3-AC60-F009B057C557}'), 18)
PKEY_RecordedTV_IsRepeatBroadcast = (IID('{6D748DE2-8D38-4CC3-AC60-F009B057C557}'), 13)
PKEY_RecordedTV_IsSAP = (IID('{6D748DE2-8D38-4CC3-AC60-F009B057C557}'), 14)
PKEY_RecordedTV_NetworkAffiliation = (IID('{2C53C813-FB63-4E22-A1AB-0B331CA1E273}'), 100)
PKEY_RecordedTV_OriginalBroadcastDate = (IID('{4684FE97-8765-4842-9C13-F006447B178C}'), 100)
PKEY_RecordedTV_ProgramDescription = (IID('{6D748DE2-8D38-4CC3-AC60-F009B057C557}'), 3)
PKEY_RecordedTV_RecordingTime = (IID('{A5477F61-7A82-4ECA-9DDE-98B69B2479B3}'), 100)
PKEY_RecordedTV_StationCallSign = (IID('{6D748DE2-8D38-4CC3-AC60-F009B057C557}'), 5)
PKEY_RecordedTV_StationName = (IID('{1B5439E7-EBA1-4AF8-BDD7-7AF1D4549493}'), 100)
PKEY_Search_AutoSummary = (IID('{560C36C0-503A-11CF-BAA1-00004C752A9A}'), 2)
PKEY_Search_ContainerHash = (IID('{BCEEE283-35DF-4D53-826A-F36A3EEFC6BE}'), 100)
PKEY_Search_Contents = (IID('{B725F130-47EF-101A-A5F1-02608C9EEBAC}'), 19)
PKEY_Search_EntryID = (IID('{49691C90-7E17-101A-A91C-08002B2ECDA9}'), 5)
PKEY_Search_GatherTime = (IID('{0B63E350-9CCC-11D0-BCDB-00805FCCCE04}'), 8)
PKEY_Search_IsClosedDirectory = (IID('{0B63E343-9CCC-11D0-BCDB-00805FCCCE04}'), 23)
PKEY_Search_IsFullyContained = (IID('{0B63E343-9CCC-11D0-BCDB-00805FCCCE04}'), 24)
PKEY_Search_QueryFocusedSummary = (IID('{560C36C0-503A-11CF-BAA1-00004C752A9A}'), 3)
PKEY_Search_Rank = (IID('{49691C90-7E17-101A-A91C-08002B2ECDA9}'), 3)
PKEY_Search_Store = (IID('{A06992B3-8CAF-4ED7-A547-B259E32AC9FC}'), 100)
PKEY_Search_UrlToIndex = (IID('{0B63E343-9CCC-11D0-BCDB-00805FCCCE04}'), 2)
PKEY_Search_UrlToIndexWithModificationTime = (IID('{0B63E343-9CCC-11D0-BCDB-00805FCCCE04}'), 12)
PKEY_DescriptionID = (IID('{28636AA6-953D-11D2-B5D6-00C04FD918D0}'), 2)
PKEY_Link_TargetSFGAOFlagsStrings = (IID('{D6942081-D53B-443D-AD47-5E059D9CD27A}'), 3)
PKEY_Link_TargetUrl = (IID('{5CBF2787-48CF-4208-B90E-EE5E5D420294}'), 2)
PKEY_Shell_SFGAOFlagsStrings = (IID('{D6942081-D53B-443D-AD47-5E059D9CD27A}'), 2)
PKEY_Software_DateLastUsed = (IID('{841E4F90-FF59-4D16-8947-E81BBFFAB36D}'), 16)
PKEY_Software_ProductName = (IID('{0CEF7D53-FA64-11D1-A203-0000F81FEDEE}'), 7)
PKEY_Sync_Comments = (IID('{7BD5533E-AF15-44DB-B8C8-BD6624E1D032}'), 13)
PKEY_Sync_ConflictDescription = (IID('{CE50C159-2FB8-41FD-BE68-D3E042E274BC}'), 4)
PKEY_Sync_ConflictFirstLocation = (IID('{CE50C159-2FB8-41FD-BE68-D3E042E274BC}'), 6)
PKEY_Sync_ConflictSecondLocation = (IID('{CE50C159-2FB8-41FD-BE68-D3E042E274BC}'), 7)
PKEY_Sync_HandlerCollectionID = (IID('{7BD5533E-AF15-44DB-B8C8-BD6624E1D032}'), 2)
PKEY_Sync_HandlerID = (IID('{7BD5533E-AF15-44DB-B8C8-BD6624E1D032}'), 3)
PKEY_Sync_HandlerName = (IID('{CE50C159-2FB8-41FD-BE68-D3E042E274BC}'), 2)
PKEY_Sync_HandlerType = (IID('{7BD5533E-AF15-44DB-B8C8-BD6624E1D032}'), 8)
PKEY_Sync_HandlerTypeLabel = (IID('{7BD5533E-AF15-44DB-B8C8-BD6624E1D032}'), 9)
PKEY_Sync_ItemID = (IID('{7BD5533E-AF15-44DB-B8C8-BD6624E1D032}'), 6)
PKEY_Sync_ItemName = (IID('{CE50C159-2FB8-41FD-BE68-D3E042E274BC}'), 3)
PKEY_Task_BillingInformation = (IID('{D37D52C6-261C-4303-82B3-08B926AC6F12}'), 100)
PKEY_Task_CompletionStatus = (IID('{084D8A0A-E6D5-40DE-BF1F-C8820E7C877C}'), 100)
PKEY_Task_Owner = (IID('{08C7CC5F-60F2-4494-AD75-55E3E0B5ADD0}'), 100)
PKEY_Video_Compression = (IID('{64440491-4C8B-11D1-8B70-080036B11A03}'), 10)
PKEY_Video_Director = (IID('{64440492-4C8B-11D1-8B70-080036B11A03}'), 20)
PKEY_Video_EncodingBitrate = (IID('{64440491-4C8B-11D1-8B70-080036B11A03}'), 8)
PKEY_Video_FourCC = (IID('{64440491-4C8B-11D1-8B70-080036B11A03}'), 44)
PKEY_Video_FrameHeight = (IID('{64440491-4C8B-11D1-8B70-080036B11A03}'), 4)
PKEY_Video_FrameRate = (IID('{64440491-4C8B-11D1-8B70-080036B11A03}'), 6)
PKEY_Video_FrameWidth = (IID('{64440491-4C8B-11D1-8B70-080036B11A03}'), 3)
PKEY_Video_HorizontalAspectRatio = (IID('{64440491-4C8B-11D1-8B70-080036B11A03}'), 42)
PKEY_Video_SampleSize = (IID('{64440491-4C8B-11D1-8B70-080036B11A03}'), 9)
PKEY_Video_StreamName = (IID('{64440491-4C8B-11D1-8B70-080036B11A03}'), 2)
PKEY_Video_StreamNumber = (IID('{64440491-4C8B-11D1-8B70-080036B11A03}'), 11)
PKEY_Video_TotalBitrate = (IID('{64440491-4C8B-11D1-8B70-080036B11A03}'), 43)
PKEY_Video_VerticalAspectRatio = (IID('{64440491-4C8B-11D1-8B70-080036B11A03}'), 45)
PKEY_Volume_FileSystem = (IID('{9B174B35-40FF-11D2-A27E-00C04FC30871}'), 4)
PKEY_Volume_IsMappedDrive = (IID('{149C0B69-2C2D-48FC-808F-D318D78C4636}'), 2)
PKEY_Volume_IsRoot = (IID('{9B174B35-40FF-11D2-A27E-00C04FC30871}'), 10)
PKEY_AppUserModel_RelaunchCommand = (IID('{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}'), 2)
PKEY_AppUserModel_RelaunchIconResource = (IID('{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}'), 3)
PKEY_AppUserModel_RelaunchDisplayNameResource = (IID('{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}'), 4)
PKEY_AppUserModel_ID = (IID('{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}'), 5)
PKEY_AppUserModel_IsDestListSeparator = (IID('{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}'), 6)
PKEY_AppUserModel_ExcludeFromShowInNewInstall = (IID('{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}'), 8)
PKEY_AppUserModel_PreventPinning = (IID('{9F4C2855-9F79-4B39-A8D0-E1D42DE1D5F3}'), 9)
# PKA_FLAGS, used with IPropertyChange
PKA_SET = 0
PKA_APPEND = 1
PKA_DELETE = 2
| mit |
ivanhorvath/openshift-tools | openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_utils/src/class/yedit.py | 7 | 24645 | # flake8: noqa
# pylint: skip-file
class YeditException(Exception):
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def remove_entry(data, key, index=None, value=None, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
if value is not None:
data.pop(value)
elif index is not None:
raise YeditException("remove_entry for a dictionary does not have an index {}".format(index))
else:
data.clear()
return True
elif key == '' and isinstance(data, list):
ind = None
if value is not None:
try:
ind = data.index(value)
except ValueError:
return False
elif index is not None:
ind = index
else:
del data[:]
if ind is not None:
data.pop(ind)
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yfd.write(contents)
fcntl.flock(yfd, fcntl.LOCK_UN)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
if self.content_type == 'yaml':
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
elif self.content_type == 'json':
Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True))
else:
raise YeditException('Unsupported content_type: {}.'.format(self.content_type) +
'Please specify a content_type of yaml or json.')
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path, index=None, value=None):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
try:
# AUDIT:maybe-no-member makes sense due to different yaml libraries
# pylint: disable=maybe-no-member
curr_value = yaml.safe_load(invalue, Loader=yaml.RoundTripLoader)
except AttributeError:
curr_value = yaml.safe_load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
content_type=params['content_type'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'], params['index'], params['value'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
| apache-2.0 |
afh/cmakedash | cmakedash.py | 1 | 1690 | #!/usr/bin/env python
#
# cmakedash - a dash docset generator for CMake
import os
import re
import subprocess
from bs4 import BeautifulSoup, NavigableString, Tag
from docsetgenerator import DocsetGenerator
class CMakeDocsetGenerator (DocsetGenerator):
def __init__(self):
DocsetGenerator.__init__(self)
self.docsetName = 'CMake'
self.iconFilename = 'icon.tiff'
def helpFilename(self):
return os.path.join(self.documentsPath(), 'index.html')
def dashFeedVersion(self):
cmakeVersion = subprocess.check_output('cmake --version'.split()).split()
return cmakeVersion[2]
def generateHtml(self):
os.system("cmake --help-html > '%s'" % (self.helpFilename()))
def generateIndex(self):
page = open(self.helpFilename()).read()
soup = BeautifulSoup(page)
any = re.compile('.*')
for tag in soup.find_all('a', {'href':any}):
name = tag.text.strip()
if len(name) > 0:
path = tag.attrs['href'].strip()
if path.startswith('#command'):
stype = 'Command'
elif path.startswith('#opt'):
stype = 'Option'
elif path.startswith('#variable'):
stype = 'Variable'
elif path.startswith('#module'):
stype = 'Module'
elif path.startswith('#prop_') or path.startswith('#property'):
stype = 'Property'
elif path.startswith('http'):
continue
else:
if self.verbose: print 'Skipping %s' % (path)
continue
path = 'index.html%s' % (path)
self.addIndexEntry(name, stype, path)
if __name__ == '__main__':
generator = CMakeDocsetGenerator()
args = generator.getargs()
generator.run(args)
| mit |
dgoedkoop/QGIS | python/plugins/MetaSearch/dialogs/manageconnectionsdialog.py | 36 | 6685 | # -*- coding: utf-8 -*-
###############################################################################
#
# CSW Client
# ---------------------------------------------------------
# QGIS Catalog Service client.
#
# Copyright (C) 2010 NextGIS (http://nextgis.org),
# Alexander Bruy (alexander.bruy@gmail.com),
# Maxim Dubinin (sim@gis-lab.info)
#
# Copyright (C) 2014 Tom Kralidis (tomkralidis@gmail.com)
#
# This source is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This code is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
###############################################################################
import xml.etree.ElementTree as etree
from qgis.core import QgsSettings
from qgis.PyQt.QtWidgets import QDialog, QDialogButtonBox, QFileDialog, QListWidgetItem, QMessageBox
from MetaSearch.util import (get_connections_from_file, get_ui_class,
prettify_xml)
BASE_CLASS = get_ui_class('manageconnectionsdialog.ui')
class ManageConnectionsDialog(QDialog, BASE_CLASS):
"""manage connections"""
def __init__(self, mode):
"""init dialog"""
QDialog.__init__(self)
self.setupUi(self)
self.settings = QgsSettings()
self.filename = None
self.mode = mode # 0 - save, 1 - load
self.btnBrowse.clicked.connect(self.select_file)
self.manage_gui()
def manage_gui(self):
"""manage interface"""
if self.mode == 1:
self.label.setText(self.tr('Load from file'))
self.buttonBox.button(QDialogButtonBox.Ok).setText(self.tr('Load'))
else:
self.label.setText(self.tr('Save to file'))
self.buttonBox.button(QDialogButtonBox.Ok).setText(self.tr('Save'))
self.populate()
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
def select_file(self):
"""select file ops"""
label = self.tr('eXtensible Markup Language (*.xml *.XML)')
if self.mode == 0:
slabel = self.tr('Save Connections')
self.filename, filter = QFileDialog.getSaveFileName(self, slabel,
'.', label)
else:
slabel = self.tr('Load Connections')
self.filename, selected_filter = QFileDialog.getOpenFileName(self, slabel,
'.', label)
if not self.filename:
return
# ensure the user never omitted the extension from the file name
if not self.filename.lower().endswith('.xml'):
self.filename = '%s.xml' % self.filename
self.leFileName.setText(self.filename)
if self.mode == 1:
self.populate()
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(True)
def populate(self):
"""populate connections list from settings"""
if self.mode == 0:
self.settings.beginGroup('/MetaSearch/')
keys = self.settings.childGroups()
for key in keys:
item = QListWidgetItem(self.listConnections)
item.setText(key)
self.settings.endGroup()
else: # populate connections list from file
doc = get_connections_from_file(self, self.filename)
if doc is None:
self.filename = None
self.leFileName.clear()
self.listConnections.clear()
return
for csw in doc.findall('csw'):
item = QListWidgetItem(self.listConnections)
item.setText(csw.attrib.get('name'))
def save(self, connections):
"""save connections ops"""
doc = etree.Element('qgsCSWConnections')
doc.attrib['version'] = '1.0'
for conn in connections:
url = self.settings.value('/MetaSearch/%s/url' % conn)
if url is not None:
connection = etree.SubElement(doc, 'csw')
connection.attrib['name'] = conn
connection.attrib['url'] = url
# write to disk
with open(self.filename, 'w') as fileobj:
fileobj.write(prettify_xml(etree.tostring(doc)))
QMessageBox.information(self, self.tr('Save Connections'),
self.tr('Saved to {0}.').format(self.filename))
self.reject()
def load(self, items):
"""load connections"""
self.settings.beginGroup('/MetaSearch/')
keys = self.settings.childGroups()
self.settings.endGroup()
exml = etree.parse(self.filename).getroot()
for csw in exml.findall('csw'):
conn_name = csw.attrib.get('name')
# process only selected connections
if conn_name not in items:
continue
# check for duplicates
if conn_name in keys:
label = self.tr('File {0} exists. Overwrite?').format(conn_name)
res = QMessageBox.warning(self, self.tr('Loading Connections'),
label,
QMessageBox.Yes | QMessageBox.No)
if res != QMessageBox.Yes:
continue
# no dups detected or overwrite is allowed
url = '/MetaSearch/%s/url' % conn_name
self.settings.setValue(url, csw.attrib.get('url'))
def accept(self):
"""accept connections"""
selection = self.listConnections.selectedItems()
if len(selection) == 0:
return
items = []
for sel in selection:
items.append(sel.text())
if self.mode == 0: # save
self.save(items)
else: # load
self.load(items)
self.filename = None
self.leFileName.clear()
self.listConnections.clear()
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
def reject(self):
"""back out of manage connections dialogue"""
QDialog.reject(self)
| gpl-2.0 |
hj3938/panda3d | direct/src/showbase/BulletinBoardWatcher.py | 14 | 2085 | """Undocumented Module"""
__all__ = ['BulletinBoardWatcher']
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.PythonUtil import Functor, makeList
from direct.showbase import DirectObject
class BulletinBoardWatcher(DirectObject.DirectObject):
""" This class allows you to wait for a set of posts to be made to (or
removed from) the bulletin board, and gives you a notification when all
posts have been made. Values of posts are not examined."""
notify = DirectNotifyGlobal.directNotify.newCategory('BulletinBoardWatcher')
def __init__(self, name, postNames, callback, removeNames=None):
self.notify.debug('__init__: %s, %s, %s' % (name, postNames, callback))
if removeNames is None:
removeNames = []
self.name = name
self.postNames = makeList(postNames)
self.removeNames = makeList(removeNames)
self.callback = callback
self.waitingOn = set()
for name in self.postNames:
if not bboard.has(name):
eventName = bboard.getEvent(name)
self.waitingOn.add(eventName)
self.acceptOnce(eventName, Functor(self._handleEvent, eventName))
for name in self.removeNames:
if bboard.has(name):
eventName = bboard.getRemoveEvent(name)
self.waitingOn.add(eventName)
self.acceptOnce(eventName, Functor(self._handleEvent, eventName))
self._checkDone()
def destroy(self):
self.ignoreAll()
if hasattr(self, 'callback'):
del self.callback
del self.waitingOn
def isDone(self):
return len(self.waitingOn) == 0
def _checkDone(self):
if self.isDone():
self.notify.debug('%s: done' % self.name)
self.callback()
self.destroy()
def _handleEvent(self, eventName):
self.notify.debug('%s: handlePost(%s)' % (self.name, eventName))
assert eventName in self.waitingOn
self.waitingOn.remove(eventName)
self._checkDone()
| bsd-3-clause |
Dino0631/RedRain-Bot | lib/youtube_dl/extractor/lego.py | 42 | 6137 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
unescapeHTML,
parse_duration,
get_element_by_class,
)
class LEGOIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?lego\.com/(?P<locale>[^/]+)/(?:[^/]+/)*videos/(?:[^/]+/)*[^/?#]+-(?P<id>[0-9a-f]+)'
_TESTS = [{
'url': 'http://www.lego.com/en-us/videos/themes/club/blocumentary-kawaguchi-55492d823b1b4d5e985787fa8c2973b1',
'md5': 'f34468f176cfd76488767fc162c405fa',
'info_dict': {
'id': '55492d823b1b4d5e985787fa8c2973b1',
'ext': 'mp4',
'title': 'Blocumentary Great Creations: Akiyuki Kawaguchi',
'description': 'Blocumentary Great Creations: Akiyuki Kawaguchi',
},
}, {
# geo-restricted but the contentUrl contain a valid url
'url': 'http://www.lego.com/nl-nl/videos/themes/nexoknights/episode-20-kingdom-of-heroes-13bdc2299ab24d9685701a915b3d71e7##sp=399',
'md5': '4c3fec48a12e40c6e5995abc3d36cc2e',
'info_dict': {
'id': '13bdc2299ab24d9685701a915b3d71e7',
'ext': 'mp4',
'title': 'Aflevering 20 - Helden van het koninkrijk',
'description': 'md5:8ee499aac26d7fa8bcb0cedb7f9c3941',
},
}, {
# special characters in title
'url': 'http://www.lego.com/en-us/starwars/videos/lego-star-wars-force-surprise-9685ee9d12e84ff38e84b4e3d0db533d',
'info_dict': {
'id': '9685ee9d12e84ff38e84b4e3d0db533d',
'ext': 'mp4',
'title': 'Force Surprise – LEGO® Star Wars™ Microfighters',
'description': 'md5:9c673c96ce6f6271b88563fe9dc56de3',
},
'params': {
'skip_download': True,
},
}]
_BITRATES = [256, 512, 1024, 1536, 2560]
def _real_extract(self, url):
locale, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, video_id)
title = get_element_by_class('video-header', webpage).strip()
progressive_base = 'https://lc-mediaplayerns-live-s.legocdn.com/'
streaming_base = 'http://legoprod-f.akamaihd.net/'
content_url = self._html_search_meta('contentUrl', webpage)
path = self._search_regex(
r'(?:https?:)?//[^/]+/(?:[iz]/s/)?public/(.+)_[0-9,]+\.(?:mp4|webm)',
content_url, 'video path', default=None)
if not path:
player_url = self._proto_relative_url(self._search_regex(
r'<iframe[^>]+src="((?:https?)?//(?:www\.)?lego\.com/[^/]+/mediaplayer/video/[^"]+)',
webpage, 'player url', default=None))
if not player_url:
base_url = self._proto_relative_url(self._search_regex(
r'data-baseurl="([^"]+)"', webpage, 'base url',
default='http://www.lego.com/%s/mediaplayer/video/' % locale))
player_url = base_url + video_id
player_webpage = self._download_webpage(player_url, video_id)
video_data = self._parse_json(unescapeHTML(self._search_regex(
r"video='([^']+)'", player_webpage, 'video data')), video_id)
progressive_base = self._search_regex(
r'data-video-progressive-url="([^"]+)"',
player_webpage, 'progressive base', default='https://lc-mediaplayerns-live-s.legocdn.com/')
streaming_base = self._search_regex(
r'data-video-streaming-url="([^"]+)"',
player_webpage, 'streaming base', default='http://legoprod-f.akamaihd.net/')
item_id = video_data['ItemId']
net_storage_path = video_data.get('NetStoragePath') or '/'.join([item_id[:2], item_id[2:4]])
base_path = '_'.join([item_id, video_data['VideoId'], video_data['Locale'], compat_str(video_data['VideoVersion'])])
path = '/'.join([net_storage_path, base_path])
streaming_path = ','.join(map(lambda bitrate: compat_str(bitrate), self._BITRATES))
formats = self._extract_akamai_formats(
'%si/s/public/%s_,%s,.mp4.csmil/master.m3u8' % (streaming_base, path, streaming_path), video_id)
m3u8_formats = list(filter(
lambda f: f.get('protocol') == 'm3u8_native' and f.get('vcodec') != 'none',
formats))
if len(m3u8_formats) == len(self._BITRATES):
self._sort_formats(m3u8_formats)
for bitrate, m3u8_format in zip(self._BITRATES, m3u8_formats):
progressive_base_url = '%spublic/%s_%d.' % (progressive_base, path, bitrate)
mp4_f = m3u8_format.copy()
mp4_f.update({
'url': progressive_base_url + 'mp4',
'format_id': m3u8_format['format_id'].replace('hls', 'mp4'),
'protocol': 'http',
})
web_f = {
'url': progressive_base_url + 'webm',
'format_id': m3u8_format['format_id'].replace('hls', 'webm'),
'width': m3u8_format['width'],
'height': m3u8_format['height'],
'tbr': m3u8_format.get('tbr'),
'ext': 'webm',
}
formats.extend([web_f, mp4_f])
else:
for bitrate in self._BITRATES:
for ext in ('web', 'mp4'):
formats.append({
'format_id': '%s-%s' % (ext, bitrate),
'url': '%spublic/%s_%d.%s' % (progressive_base, path, bitrate, ext),
'tbr': bitrate,
'ext': ext,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': self._html_search_meta('description', webpage),
'thumbnail': self._html_search_meta('thumbnail', webpage),
'duration': parse_duration(self._html_search_meta('duration', webpage)),
'formats': formats,
}
| gpl-3.0 |
HiSPARC/station-software | user/python/Lib/site-packages/pip/_vendor/chardet/euckrfreq.py | 342 | 13546 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKR_CHAR_TO_FREQ_ORDER = (
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
)
| gpl-3.0 |
slohse/ansible | lib/ansible/modules/network/f5/bigip_device_httpd.py | 9 | 22210 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_device_httpd
short_description: Manage HTTPD related settings on BIG-IP
description:
- Manages HTTPD related settings on the BIG-IP. These settings are interesting
to change when you want to set GUI timeouts and other TMUI related settings.
version_added: 2.5
options:
allow:
description:
- Specifies, if you have enabled HTTPD access, the IP address or address
range for other systems that can communicate with this system.
- To specify all addresses, use the value C(all).
- IP address can be specified, such as 172.27.1.10.
- IP rangees can be specified, such as 172.27.*.* or 172.27.0.0/255.255.0.0.
auth_name:
description:
- Sets the BIG-IP authentication realm name.
auth_pam_idle_timeout:
description:
- Sets the GUI timeout for automatic logout, in seconds.
auth_pam_validate_ip:
description:
- Sets the authPamValidateIp setting.
type: bool
auth_pam_dashboard_timeout:
description:
- Sets whether or not the BIG-IP dashboard will timeout.
type: bool
fast_cgi_timeout:
description:
- Sets the timeout of FastCGI.
hostname_lookup:
description:
- Sets whether or not to display the hostname, if possible.
type: bool
log_level:
description:
- Sets the minimum httpd log level.
choices: ['alert', 'crit', 'debug', 'emerg', 'error', 'info', 'notice', 'warn']
max_clients:
description:
- Sets the maximum number of clients that can connect to the GUI at once.
redirect_http_to_https:
description:
- Whether or not to redirect http requests to the GUI to https.
type: bool
ssl_port:
description:
- The HTTPS port to listen on.
ssl_cipher_suite:
description:
- Specifies the ciphers that the system uses.
- The values in the suite are separated by colons (:).
- Can be specified in either a string or list form. The list form is the
recommended way to provide the cipher suite. See examples for usage.
- Use the value C(default) to set the cipher suite to the system default.
This value is equivalent to specifying a list of C(ECDHE-RSA-AES128-GCM-SHA256,
ECDHE-RSA-AES256-GCM-SHA384,ECDHE-RSA-AES128-SHA,ECDHE-RSA-AES256-SHA,
ECDHE-RSA-AES128-SHA256,ECDHE-RSA-AES256-SHA384,ECDHE-ECDSA-AES128-GCM-SHA256,
ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-ECDSA-AES128-SHA,ECDHE-ECDSA-AES256-SHA,
ECDHE-ECDSA-AES128-SHA256,ECDHE-ECDSA-AES256-SHA384,AES128-GCM-SHA256,
AES256-GCM-SHA384,AES128-SHA,AES256-SHA,AES128-SHA256,AES256-SHA256,
ECDHE-RSA-DES-CBC3-SHA,ECDHE-ECDSA-DES-CBC3-SHA,DES-CBC3-SHA).
version_added: 2.6
ssl_protocols:
description:
- The list of SSL protocols to accept on the management console.
- A space-separated list of tokens in the format accepted by the Apache
mod_ssl SSLProtocol directive.
- Can be specified in either a string or list form. The list form is the
recommended way to provide the cipher suite. See examples for usage.
- Use the value C(default) to set the SSL protocols to the system default.
This value is equivalent to specifying a list of C(all,-SSLv2,-SSLv3).
version_added: 2.6
notes:
- Requires the requests Python package on the host. This is as easy as
C(pip install requests).
requirements:
- requests
extends_documentation_fragment: f5
author:
- Joe Reifel (@JoeReifel)
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Set the BIG-IP authentication realm name
bigip_device_httpd:
auth_name: BIG-IP
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Set the auth pam timeout to 3600 seconds
bigip_device_httpd:
auth_pam_idle_timeout: 1200
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Set the validate IP settings
bigip_device_httpd:
auth_pam_validate_ip: on
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Set SSL cipher suite by list
bigip_device_httpd:
password: secret
server: lb.mydomain.com
user: admin
ssl_cipher_suite:
- ECDHE-RSA-AES128-GCM-SHA256
- ECDHE-RSA-AES256-GCM-SHA384
- ECDHE-RSA-AES128-SHA
- AES256-SHA256
delegate_to: localhost
- name: Set SSL cipher suite by string
bigip_device_httpd:
password: secret
server: lb.mydomain.com
user: admin
ssl_cipher_suite: ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA:AES256-SHA256
delegate_to: localhost
- name: Set SSL protocols by list
bigip_device_httpd:
password: secret
server: lb.mydomain.com
user: admin
ssl_protocols:
- all
- -SSLv2
- -SSLv3
delegate_to: localhost
- name: Set SSL protocols by string
bigip_device_httpd:
password: secret
server: lb.mydomain.com
user: admin
ssl_cipher_suite: all -SSLv2 -SSLv3
delegate_to: localhost
'''
RETURN = r'''
auth_pam_idle_timeout:
description: The new number of seconds for GUI timeout.
returned: changed
type: string
sample: 1200
auth_name:
description: The new authentication realm name.
returned: changed
type: string
sample: 'foo'
auth_pam_validate_ip:
description: The new authPamValidateIp setting.
returned: changed
type: bool
sample: on
auth_pam_dashboard_timeout:
description: Whether or not the BIG-IP dashboard will timeout.
returned: changed
type: bool
sample: off
fast_cgi_timeout:
description: The new timeout of FastCGI.
returned: changed
type: int
sample: 500
hostname_lookup:
description: Whether or not to display the hostname, if possible.
returned: changed
type: bool
sample: on
log_level:
description: The new minimum httpd log level.
returned: changed
type: string
sample: crit
max_clients:
description: The new maximum number of clients that can connect to the GUI at once.
returned: changed
type: int
sample: 20
redirect_http_to_https:
description: Whether or not to redirect http requests to the GUI to https.
returned: changed
type: bool
sample: on
ssl_port:
description: The new HTTPS port to listen on.
returned: changed
type: int
sample: 10443
ssl_cipher_suite:
description: The new ciphers that the system uses.
returned: changed
type: string
sample: ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA
ssl_protocols:
description: The new list of SSL protocols to accept on the management console.
returned: changed
type: string
sample: all -SSLv2 -SSLv3
'''
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
class Parameters(AnsibleF5Parameters):
api_map = {
'authPamIdleTimeout': 'auth_pam_idle_timeout',
'authPamValidateIp': 'auth_pam_validate_ip',
'authName': 'auth_name',
'authPamDashboardTimeout': 'auth_pam_dashboard_timeout',
'fastcgiTimeout': 'fast_cgi_timeout',
'hostnameLookup': 'hostname_lookup',
'logLevel': 'log_level',
'maxClients': 'max_clients',
'redirectHttpToHttps': 'redirect_http_to_https',
'sslPort': 'ssl_port',
'sslCiphersuite': 'ssl_cipher_suite',
'sslProtocol': 'ssl_protocols'
}
api_attributes = [
'authPamIdleTimeout', 'authPamValidateIp', 'authName', 'authPamDashboardTimeout',
'fastcgiTimeout', 'hostnameLookup', 'logLevel', 'maxClients', 'sslPort',
'redirectHttpToHttps', 'allow', 'sslCiphersuite', 'sslProtocol'
]
returnables = [
'auth_pam_idle_timeout', 'auth_pam_validate_ip', 'auth_name',
'auth_pam_dashboard_timeout', 'fast_cgi_timeout', 'hostname_lookup',
'log_level', 'max_clients', 'redirect_http_to_https', 'ssl_port',
'allow', 'ssl_cipher_suite', 'ssl_protocols'
]
updatables = [
'auth_pam_idle_timeout', 'auth_pam_validate_ip', 'auth_name',
'auth_pam_dashboard_timeout', 'fast_cgi_timeout', 'hostname_lookup',
'log_level', 'max_clients', 'redirect_http_to_https', 'ssl_port',
'allow', 'ssl_cipher_suite', 'ssl_protocols'
]
_ciphers = "ECDHE-RSA-AES128-GCM-SHA256:" \
"ECDHE-RSA-AES256-GCM-SHA384:" \
"ECDHE-RSA-AES128-SHA:" \
"ECDHE-RSA-AES256-SHA:" \
"ECDHE-RSA-AES128-SHA256:" \
"ECDHE-RSA-AES256-SHA384:" \
"ECDHE-ECDSA-AES128-GCM-SHA256:" \
"ECDHE-ECDSA-AES256-GCM-SHA384:" \
"ECDHE-ECDSA-AES128-SHA:" \
"ECDHE-ECDSA-AES256-SHA:" \
"ECDHE-ECDSA-AES128-SHA256:" \
"ECDHE-ECDSA-AES256-SHA384:" \
"AES128-GCM-SHA256:" \
"AES256-GCM-SHA384:" \
"AES128-SHA:" \
"AES256-SHA:" \
"AES128-SHA256:" \
"AES256-SHA256:" \
"ECDHE-RSA-DES-CBC3-SHA:" \
"ECDHE-ECDSA-DES-CBC3-SHA:" \
"DES-CBC3-SHA"
_protocols = 'all -SSLv2 -SSLv3'
@property
def auth_pam_idle_timeout(self):
if self._values['auth_pam_idle_timeout'] is None:
return None
return int(self._values['auth_pam_idle_timeout'])
@property
def fast_cgi_timeout(self):
if self._values['fast_cgi_timeout'] is None:
return None
return int(self._values['fast_cgi_timeout'])
@property
def max_clients(self):
if self._values['max_clients'] is None:
return None
return int(self._values['max_clients'])
@property
def ssl_port(self):
if self._values['ssl_port'] is None:
return None
return int(self._values['ssl_port'])
class ModuleParameters(Parameters):
@property
def auth_pam_validate_ip(self):
if self._values['auth_pam_validate_ip'] is None:
return None
if self._values['auth_pam_validate_ip']:
return "on"
return "off"
@property
def auth_pam_dashboard_timeout(self):
if self._values['auth_pam_dashboard_timeout'] is None:
return None
if self._values['auth_pam_dashboard_timeout']:
return "on"
return "off"
@property
def hostname_lookup(self):
if self._values['hostname_lookup'] is None:
return None
if self._values['hostname_lookup']:
return "on"
return "off"
@property
def redirect_http_to_https(self):
if self._values['redirect_http_to_https'] is None:
return None
if self._values['redirect_http_to_https']:
return "enabled"
return "disabled"
@property
def allow(self):
if self._values['allow'] is None:
return None
if self._values['allow'][0] == 'all':
return 'all'
if self._values['allow'][0] == '':
return ''
allow = self._values['allow']
result = list(set([str(x) for x in allow]))
result = sorted(result)
return result
@property
def ssl_cipher_suite(self):
if self._values['ssl_cipher_suite'] is None:
return None
if isinstance(self._values['ssl_cipher_suite'], string_types):
ciphers = self._values['ssl_cipher_suite'].strip()
else:
ciphers = self._values['ssl_cipher_suite']
if not ciphers:
raise F5ModuleError(
"ssl_cipher_suite may not be set to 'none'"
)
if ciphers == 'default':
ciphers = ':'.join(sorted(Parameters._ciphers.split(':')))
elif isinstance(self._values['ssl_cipher_suite'], string_types):
ciphers = ':'.join(sorted(ciphers.split(':')))
else:
ciphers = ':'.join(sorted(ciphers))
return ciphers
@property
def ssl_protocols(self):
if self._values['ssl_protocols'] is None:
return None
if isinstance(self._values['ssl_protocols'], string_types):
protocols = self._values['ssl_protocols'].strip()
else:
protocols = self._values['ssl_protocols']
if not protocols:
raise F5ModuleError(
"ssl_protocols may not be set to 'none'"
)
if protocols == 'default':
protocols = ' '.join(sorted(Parameters._protocols.split(' ')))
elif isinstance(protocols, string_types):
protocols = ' '.join(sorted(protocols.split(' ')))
else:
protocols = ' '.join(sorted(protocols))
return protocols
class ApiParameters(Parameters):
@property
def allow(self):
if self._values['allow'] is None:
return ''
if self._values['allow'][0] == 'All':
return 'all'
allow = self._values['allow']
result = list(set([str(x) for x in allow]))
result = sorted(result)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def ssl_cipher_suite(self):
default = ':'.join(sorted(Parameters._ciphers.split(':')))
if self._values['ssl_cipher_suite'] == default:
return 'default'
else:
return self._values['ssl_cipher_suite']
@property
def ssl_protocols(self):
default = ' '.join(sorted(Parameters._protocols.split(' ')))
if self._values['ssl_protocols'] == default:
return 'default'
else:
return self._values['ssl_protocols']
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def allow(self):
if self.want.allow is None:
return None
if self.want.allow == 'all' and self.have.allow == 'all':
return None
if self.want.allow == 'all':
return ['All']
if self.want.allow == '' and self.have.allow == '':
return None
if self.want.allow == '':
return []
if self.want.allow != self.have.allow:
return self.want.allow
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
result = dict()
changed = self.present()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
return self.update()
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/httpd".format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
except Exception as ex:
valid = [
'Remote end closed connection',
'Connection aborted',
]
# BIG-IP will kill your management connection when you change the HTTP
# redirect setting. So this catches that and handles it gracefully.
if 'redirectHttpToHttps' in params:
if any(i for i in valid if i in str(ex)):
# Wait for BIG-IP web server to settle after changing this
time.sleep(2)
return True
raise F5ModuleError(str(ex))
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/httpd".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
allow=dict(
type='list'
),
auth_name=dict(),
auth_pam_idle_timeout=dict(
type='int'
),
fast_cgi_timeout=dict(
type='int'
),
max_clients=dict(
type='int'
),
ssl_port=dict(
type='int'
),
auth_pam_validate_ip=dict(
type='bool'
),
auth_pam_dashboard_timeout=dict(
type='bool'
),
hostname_lookup=dict(
type='bool'
),
log_level=dict(
choices=[
'alert', 'crit', 'debug', 'emerg',
'error', 'info', 'notice', 'warn'
]
),
redirect_http_to_https=dict(
type='bool'
),
ssl_cipher_suite=dict(type='raw'),
ssl_protocols=dict(type='raw')
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
client = F5RestClient(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
AladdinSonni/youtube-dl | youtube_dl/extractor/appleconnect.py | 139 | 1848 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
str_to_int,
ExtractorError
)
class AppleConnectIE(InfoExtractor):
_VALID_URL = r'https?://itunes\.apple\.com/\w{0,2}/?post/idsa\.(?P<id>[\w-]+)'
_TEST = {
'url': 'https://itunes.apple.com/us/post/idsa.4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
'md5': '10d0f2799111df4cb1c924520ca78f98',
'info_dict': {
'id': '4ab17a39-2720-11e5-96c5-a5b38f6c42d3',
'ext': 'm4v',
'title': 'Energy',
'uploader': 'Drake',
'thumbnail': 'http://is5.mzstatic.com/image/thumb/Video5/v4/78/61/c5/7861c5fa-ad6d-294b-1464-cf7605b911d6/source/1920x1080sr.jpg',
'upload_date': '20150710',
'timestamp': 1436545535,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
try:
video_json = self._html_search_regex(
r'class="auc-video-data">(\{.*?\})', webpage, 'json')
except ExtractorError:
raise ExtractorError('This post doesn\'t contain a video', expected=True)
video_data = self._parse_json(video_json, video_id)
timestamp = str_to_int(self._html_search_regex(r'data-timestamp="(\d+)"', webpage, 'timestamp'))
like_count = str_to_int(self._html_search_regex(r'(\d+) Loves', webpage, 'like count'))
return {
'id': video_id,
'url': video_data['sslSrc'],
'title': video_data['title'],
'description': video_data['description'],
'uploader': video_data['artistName'],
'thumbnail': video_data['artworkUrl'],
'timestamp': timestamp,
'like_count': like_count,
}
| unlicense |
fidomason/kbengine | kbe/src/lib/python/Tools/pynche/Main.py | 116 | 6406 | """Pynche -- The PYthon Natural Color and Hue Editor.
Contact: %(AUTHNAME)s
Email: %(AUTHEMAIL)s
Version: %(__version__)s
Pynche is based largely on a similar color editor I wrote years ago for the
SunView window system. That editor was called ICE: the Interactive Color
Editor. I'd always wanted to port the editor to X but didn't feel like
hacking X and C code to do it. Fast forward many years, to where Python +
Tkinter provides such a nice programming environment, with enough power, that
I finally buckled down and implemented it. I changed the name because these
days, too many other systems have the acronym `ICE'.
This program currently requires Python 2.2 with Tkinter.
Usage: %(PROGRAM)s [-d file] [-i file] [-X] [-v] [-h] [initialcolor]
Where:
--database file
-d file
Alternate location of a color database file
--initfile file
-i file
Alternate location of the initialization file. This file contains a
persistent database of the current Pynche options and color. This
means that Pynche restores its option settings and current color when
it restarts, using this file (unless the -X option is used). The
default is ~/.pynche
--ignore
-X
Ignore the initialization file when starting up. Pynche will still
write the current option settings to this file when it quits.
--version
-v
print the version number and exit
--help
-h
print this message
initialcolor
initial color, as a color name or #RRGGBB format
"""
__version__ = '1.4.1'
import sys
import os
import getopt
import ColorDB
from PyncheWidget import PyncheWidget
from Switchboard import Switchboard
from StripViewer import StripViewer
from ChipViewer import ChipViewer
from TypeinViewer import TypeinViewer
PROGRAM = sys.argv[0]
AUTHNAME = 'Barry Warsaw'
AUTHEMAIL = 'barry@python.org'
# Default locations of rgb.txt or other textual color database
RGB_TXT = [
# Solaris OpenWindows
'/usr/openwin/lib/rgb.txt',
# Linux
'/usr/lib/X11/rgb.txt',
# The X11R6.4 rgb.txt file
os.path.join(sys.path[0], 'X/rgb.txt'),
# add more here
]
# Do this because PyncheWidget.py wants to get at the interpolated docstring
# too, for its Help menu.
def docstring():
return __doc__ % globals()
def usage(code, msg=''):
print(docstring())
if msg:
print(msg)
sys.exit(code)
def initial_color(s, colordb):
# function called on every color
def scan_color(s, colordb=colordb):
try:
r, g, b = colordb.find_byname(s)
except ColorDB.BadColor:
try:
r, g, b = ColorDB.rrggbb_to_triplet(s)
except ColorDB.BadColor:
return None, None, None
return r, g, b
#
# First try the passed in color
r, g, b = scan_color(s)
if r is None:
# try the same color with '#' prepended, since some shells require
# this to be escaped, which is a pain
r, g, b = scan_color('#' + s)
if r is None:
print('Bad initial color, using gray50:', s)
r, g, b = scan_color('gray50')
if r is None:
usage(1, 'Cannot find an initial color to use')
# does not return
return r, g, b
def build(master=None, initialcolor=None, initfile=None, ignore=None,
dbfile=None):
# create all output widgets
s = Switchboard(not ignore and initfile)
# defer to the command line chosen color database, falling back to the one
# in the .pynche file.
if dbfile is None:
dbfile = s.optiondb().get('DBFILE')
# find a parseable color database
colordb = None
files = RGB_TXT[:]
if dbfile is None:
dbfile = files.pop()
while colordb is None:
try:
colordb = ColorDB.get_colordb(dbfile)
except (KeyError, IOError):
pass
if colordb is None:
if not files:
break
dbfile = files.pop(0)
if not colordb:
usage(1, 'No color database file found, see the -d option.')
s.set_colordb(colordb)
# create the application window decorations
app = PyncheWidget(__version__, s, master=master)
w = app.window()
# these built-in viewers live inside the main Pynche window
s.add_view(StripViewer(s, w))
s.add_view(ChipViewer(s, w))
s.add_view(TypeinViewer(s, w))
# get the initial color as components and set the color on all views. if
# there was no initial color given on the command line, use the one that's
# stored in the option database
if initialcolor is None:
optiondb = s.optiondb()
red = optiondb.get('RED')
green = optiondb.get('GREEN')
blue = optiondb.get('BLUE')
# but if there wasn't any stored in the database, use grey50
if red is None or blue is None or green is None:
red, green, blue = initial_color('grey50', colordb)
else:
red, green, blue = initial_color(initialcolor, colordb)
s.update_views(red, green, blue)
return app, s
def run(app, s):
try:
app.start()
except KeyboardInterrupt:
pass
def main():
try:
opts, args = getopt.getopt(
sys.argv[1:],
'hd:i:Xv',
['database=', 'initfile=', 'ignore', 'help', 'version'])
except getopt.error as msg:
usage(1, msg)
if len(args) == 0:
initialcolor = None
elif len(args) == 1:
initialcolor = args[0]
else:
usage(1)
ignore = False
dbfile = None
initfile = os.path.expanduser('~/.pynche')
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-v', '--version'):
print("""\
Pynche -- The PYthon Natural Color and Hue Editor.
Contact: %(AUTHNAME)s
Email: %(AUTHEMAIL)s
Version: %(__version__)s""" % globals())
sys.exit(0)
elif opt in ('-d', '--database'):
dbfile = arg
elif opt in ('-X', '--ignore'):
ignore = True
elif opt in ('-i', '--initfile'):
initfile = arg
app, sb = build(initialcolor=initialcolor,
initfile=initfile,
ignore=ignore,
dbfile=dbfile)
run(app, sb)
sb.save_views()
if __name__ == '__main__':
main()
| lgpl-3.0 |
johnson1228/pymatgen | pymatgen/cli/pmg_analyze.py | 9 | 4753 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import os
import re
import logging
import multiprocessing
from tabulate import tabulate
from pymatgen.io.vasp import Outcar
from pymatgen.apps.borg.hive import SimpleVaspToComputedEntryDrone, \
VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
"""
A master convenience script with many tools for vasp and structure analysis.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "4.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "Aug 13 2016"
SAVE_FILE = "vasp_data.gz"
def get_energies(rootdir, reanalyze, verbose, detailed, sort, fmt):
"""
Doc string.
"""
if verbose:
logformat = "%(relativeCreated)d msecs : %(message)s"
logging.basicConfig(level=logging.INFO, format=logformat)
if not detailed:
drone = SimpleVaspToComputedEntryDrone(inc_structure=True)
else:
drone = VaspToComputedEntryDrone(inc_structure=True,
data=["filename",
"initial_structure"])
ncpus = multiprocessing.cpu_count()
logging.info("Detected {} cpus".format(ncpus))
queen = BorgQueen(drone, number_of_drones=ncpus)
if os.path.exists(SAVE_FILE) and not reanalyze:
msg = "Using previously assimilated data from {}.".format(SAVE_FILE) \
+ " Use -r to force re-analysis."
queen.load_data(SAVE_FILE)
else:
if ncpus > 1:
queen.parallel_assimilate(rootdir)
else:
queen.serial_assimilate(rootdir)
msg = "Analysis results saved to {} for faster ".format(SAVE_FILE) + \
"subsequent loading."
queen.save_data(SAVE_FILE)
entries = queen.get_data()
if sort == "energy_per_atom":
entries = sorted(entries, key=lambda x: x.energy_per_atom)
elif sort == "filename":
entries = sorted(entries, key=lambda x: x.data["filename"])
all_data = []
for e in entries:
if not detailed:
delta_vol = "{:.2f}".format(e.data["delta_volume"] * 100)
else:
delta_vol = e.structure.volume / \
e.data["initial_structure"].volume - 1
delta_vol = "{:.2f}".format(delta_vol * 100)
all_data.append((e.data["filename"].replace("./", ""),
re.sub(r"\s+", "", e.composition.formula),
"{:.5f}".format(e.energy),
"{:.5f}".format(e.energy_per_atom),
delta_vol))
if len(all_data) > 0:
headers = ("Directory", "Formula", "Energy", "E/Atom", "% vol chg")
print(tabulate(all_data, headers=headers, tablefmt=fmt))
print("")
print(msg)
else:
print("No valid vasp run found.")
os.unlink(SAVE_FILE)
def get_magnetizations(mydir, ion_list):
data = []
max_row = 0
for (parent, subdirs, files) in os.walk(mydir):
for f in files:
if re.match(r"OUTCAR*", f):
try:
row = []
fullpath = os.path.join(parent, f)
outcar = Outcar(fullpath)
mags = outcar.magnetization
mags = [m["tot"] for m in mags]
all_ions = list(range(len(mags)))
row.append(fullpath.lstrip("./"))
if ion_list:
all_ions = ion_list
for ion in all_ions:
row.append(str(mags[ion]))
data.append(row)
if len(all_ions) > max_row:
max_row = len(all_ions)
except:
pass
for d in data:
if len(d) < max_row + 1:
d.extend([""] * (max_row + 1 - len(d)))
headers = ["Filename"]
for i in range(max_row):
headers.append(str(i))
print(tabulate(data, headers))
def analyze(args):
default_energies = not (args.get_energies or args.ion_list)
if args.get_energies or default_energies:
for d in args.directories:
get_energies(d, args.reanalyze, args.verbose,
args.detailed, args.sort, args.format)
if args.ion_list:
if args.ion_list[0] == "All":
ion_list = None
else:
(start, end) = [int(i) for i in re.split(r"-", args.ion_list[0])]
ion_list = list(range(start, end + 1))
for d in args.directories:
get_magnetizations(d, ion_list)
| mit |
rapidhere/open-hackathon | open-hackathon-server/src/hackathon/expr/alauda_docker_expr_starter.py | 6 | 1452 | # -*- coding: utf-8 -*-
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
sys.path.append("..")
from docker_expr_starter import DockerExprStarter
from hackathon import RequiredFeature
class AlaudaDockerStarter(DockerExprStarter):
docker = RequiredFeature("alauda_docker_proxy")
def _get_docker_proxy(self):
return self.docker
| mit |
hjanime/VisTrails | vistrails/db/versions/v0_9_3/domain/workflow.py | 1 | 7277 | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from auto_gen import DBWorkflow as _DBWorkflow
from auto_gen import DBAbstractionRef, DBModule, DBGroup
from id_scope import IdScope
import copy
class DBWorkflow(_DBWorkflow):
def __init__(self, *args, **kwargs):
_DBWorkflow.__init__(self, *args, **kwargs)
self.objects = {}
self.tmp_id = IdScope(1,
{DBAbstractionRef.vtType: DBModule.vtType,
DBGroup.vtType: DBModule.vtType})
def __copy__(self):
return DBWorkflow.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = _DBWorkflow.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = DBWorkflow
# need to go through and reset the index to the copied objects
cp.build_index()
cp.tmp_id = copy.copy(self.tmp_id)
return cp
@staticmethod
def update_version(old_obj, trans_dict, new_obj=None):
if new_obj is None:
new_obj = DBWorkflow()
new_obj = _DBWorkflow.update_version(old_obj, trans_dict, new_obj)
new_obj.update_id_scope()
new_obj.build_index()
return new_obj
def update_id_scope(self):
pass
_vtTypeMap = {'abstractionRef': 'module', 'group': 'module'}
def build_index(self):
g = self._vtTypeMap.get
self.objects = dict(((g(o.vtType, o.vtType), o._db_id), o)
for (o,_,_) in self.db_children())
def add_to_index(self, object):
obj_type = self._vtTypeMap.get(object.vtType, object.vtType)
self.objects[(obj_type, object.getPrimaryKey())] = object
def delete_from_index(self, object):
obj_type = self._vtTypeMap.get(object.vtType, object.vtType)
del self.objects[(obj_type, object.getPrimaryKey())]
def capitalizeOne(self, str):
return str[0].upper() + str[1:]
def db_print_objects(self):
for k,v in self.objects.iteritems():
print '%s: %s' % (k, v)
def db_has_object(self, type, id):
return (type, id) in self.objects
def db_get_object(self, type, id):
return self.objects[(type, id)]
def db_add_object(self, object, parent_obj_type=None,
parent_obj_id=None, parent_obj=None):
if parent_obj is None:
if parent_obj_type is None or parent_obj_id is None:
parent_obj = self
else:
if parent_obj_type == 'abstractionRef' or \
parent_obj_type == 'group':
parent_obj_type = 'module'
try:
parent_obj = self.objects[(parent_obj_type, parent_obj_id)]
except KeyError:
msg = "Cannot find object of type '%s' with id '%s'" % \
(parent_obj_type, parent_obj_id)
raise Exception(msg)
if object.vtType == 'abstractionRef' or object.vtType == 'group':
obj_type = 'module'
else:
obj_type = object.vtType
funname = 'db_add_' + obj_type
obj_copy = copy.copy(object)
getattr(parent_obj, funname)(obj_copy)
self.add_to_index(obj_copy)
def db_change_object(self, old_id, object, parent_obj_type=None,
parent_obj_id=None, parent_obj=None):
if parent_obj is None:
if parent_obj_type is None or parent_obj_id is None:
parent_obj = self
else:
if parent_obj_type == 'abstractionRef' or \
parent_obj_type == 'group':
parent_obj_type = 'module'
try:
parent_obj = self.objects[(parent_obj_type, parent_obj_id)]
except KeyError:
msg = "Cannot find object of type '%s' with id '%s'" % \
(parent_obj_type, parent_obj_id)
raise Exception(msg)
self.db_delete_object(old_id, object.vtType, None, None, parent_obj)
self.db_add_object(object, None, None, parent_obj)
def db_delete_object(self, obj_id, obj_type, parent_obj_type=None,
parent_obj_id=None, parent_obj=None):
if parent_obj is None:
if parent_obj_type is None or parent_obj_id is None:
parent_obj = self
else:
if parent_obj_type == 'abstractionRef' or \
parent_obj_type == 'group':
parent_obj_type = 'module'
try:
parent_obj = self.objects[(parent_obj_type, parent_obj_id)]
except KeyError:
msg = "Cannot find object of type '%s' with id '%s'" % \
(parent_obj_type, parent_obj_id)
raise Exception(msg)
if obj_type == 'abstractionRef' or obj_type == 'group':
obj_type = 'module'
funname = 'db_get_' + obj_type
if hasattr(parent_obj, funname):
object = getattr(parent_obj, funname)(obj_id)
else:
attr_name = 'db_' + obj_type
object = getattr(parent_obj, attr_name)
funname = 'db_delete_' + obj_type
getattr(parent_obj, funname)(object)
self.delete_from_index(object)
| bsd-3-clause |
axbaretto/beam | sdks/python/.tox/py27gcp/lib/python2.7/site-packages/pyasn1_modules/rfc1157.py | 127 | 3285 | #
# SNMPv1 message syntax
#
# ASN.1 source from:
# http://www.ietf.org/rfc/rfc1157.txt
#
# Sample captures from:
# http://wiki.wireshark.org/SampleCaptures/
#
from pyasn1.type import univ, namedtype, namedval, tag, constraint
from pyasn1_modules import rfc1155
class Version(univ.Integer):
namedValues = namedval.NamedValues(
('version-1', 0)
)
defaultValue = 0
class Community(univ.OctetString): pass
class RequestID(univ.Integer): pass
class ErrorStatus(univ.Integer):
namedValues = namedval.NamedValues(
('noError', 0),
('tooBig', 1),
('noSuchName', 2),
('badValue', 3),
('readOnly', 4),
('genErr', 5)
)
class ErrorIndex(univ.Integer): pass
class VarBind(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name', rfc1155.ObjectName()),
namedtype.NamedType('value', rfc1155.ObjectSyntax())
)
class VarBindList(univ.SequenceOf):
componentType = VarBind()
class _RequestBase(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('request-id', RequestID()),
namedtype.NamedType('error-status', ErrorStatus()),
namedtype.NamedType('error-index', ErrorIndex()),
namedtype.NamedType('variable-bindings', VarBindList())
)
class GetRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
)
class GetNextRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
)
class GetResponsePDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
)
class SetRequestPDU(_RequestBase):
tagSet = _RequestBase.tagSet.tagImplicitly(
tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
)
class TrapPDU(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('enterprise', univ.ObjectIdentifier()),
namedtype.NamedType('agent-addr', rfc1155.NetworkAddress()),
namedtype.NamedType('generic-trap', univ.Integer().clone(namedValues=namedval.NamedValues(('coldStart', 0), ('warmStart', 1), ('linkDown', 2), ('linkUp', 3), ('authenticationFailure', 4), ('egpNeighborLoss', 5), ('enterpriseSpecific', 6)))),
namedtype.NamedType('specific-trap', univ.Integer()),
namedtype.NamedType('time-stamp', rfc1155.TimeTicks()),
namedtype.NamedType('variable-bindings', VarBindList())
)
class Pdus(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('get-request', GetRequestPDU()),
namedtype.NamedType('get-next-request', GetNextRequestPDU()),
namedtype.NamedType('get-response', GetResponsePDU()),
namedtype.NamedType('set-request', SetRequestPDU()),
namedtype.NamedType('trap', TrapPDU())
)
class Message(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('community', Community()),
namedtype.NamedType('data', Pdus())
)
| apache-2.0 |
Wynjones1/psemu | deps/googletest/googlemock/scripts/fuse_gmock_files.py | 242 | 8631 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gmock_files.py v0.1.0
Fuses Google Mock and Google Test source code into two .h files and a .cc file.
SYNOPSIS
fuse_gmock_files.py [GMOCK_ROOT_DIR] OUTPUT_DIR
Scans GMOCK_ROOT_DIR for Google Mock and Google Test source
code, assuming Google Test is in the GMOCK_ROOT_DIR/../googletest
directory, and generates three files:
OUTPUT_DIR/gtest/gtest.h, OUTPUT_DIR/gmock/gmock.h, and
OUTPUT_DIR/gmock-gtest-all.cc. Then you can build your tests
by adding OUTPUT_DIR to the include search path and linking
with OUTPUT_DIR/gmock-gtest-all.cc. These three files contain
everything you need to use Google Mock. Hence you can
"install" Google Mock by copying them to wherever you want.
GMOCK_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gmock_files.py fused_gmock
./fuse_gmock_files.py path/to/unpacked/gmock fused_gmock
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Mock or Google Test headers. Please
report any problems to googlemock@googlegroups.com. You can read
http://code.google.com/p/googlemock/wiki/CookBook for more
information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Mock root directory.
DEFAULT_GMOCK_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# We need to call into googletest/scripts/fuse_gtest_files.py.
sys.path.append(os.path.join(DEFAULT_GMOCK_ROOT_DIR, '../googletest/scripts'))
import fuse_gtest_files
gtest = fuse_gtest_files
# Regex for matching '#include "gmock/..."'.
INCLUDE_GMOCK_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gmock/.+)"')
# Where to find the source seed files.
GMOCK_H_SEED = 'include/gmock/gmock.h'
GMOCK_ALL_CC_SEED = 'src/gmock-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GMOCK_H_OUTPUT = 'gmock/gmock.h'
GMOCK_GTEST_ALL_CC_OUTPUT = 'gmock-gtest-all.cc'
def GetGTestRootDir(gmock_root):
"""Returns the root directory of Google Test."""
return os.path.join(gmock_root, '../googletest')
def ValidateGMockRootDir(gmock_root):
"""Makes sure gmock_root points to a valid gmock root directory.
The function aborts the program on failure.
"""
gtest.ValidateGTestRootDir(GetGTestRootDir(gmock_root))
gtest.VerifyFileExists(gmock_root, GMOCK_H_SEED)
gtest.VerifyFileExists(gmock_root, GMOCK_ALL_CC_SEED)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
gtest.VerifyOutputFile(output_dir, gtest.GTEST_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT)
def FuseGMockH(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock/gmock.h in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gmock headers we've processed.
def ProcessFile(gmock_header_path):
"""Processes the given gmock header file."""
# We don't process the same header twice.
if gmock_header_path in processed_files:
return
processed_files.add(gmock_header_path)
# Reads each line in the given gmock header.
for line in file(os.path.join(gmock_root, gmock_header_path), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/foo.h"'. We translate it to
# "gtest/gtest.h", regardless of what foo is, since all
# gtest headers are fused into gtest/gtest.h.
# There is no need to #include gtest.h twice.
if not gtest.GTEST_H_SEED in processed_files:
processed_files.add(gtest.GTEST_H_SEED)
output_file.write('#include "%s"\n' % (gtest.GTEST_H_OUTPUT,))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_H_SEED)
output_file.close()
def FuseGMockAllCcToFile(gmock_root, output_file):
"""Scans folder gmock_root to fuse gmock-all.cc into output_file."""
processed_files = sets.Set()
def ProcessFile(gmock_source_file):
"""Processes the given gmock source file."""
# We don't process the same #included file twice.
if gmock_source_file in processed_files:
return
processed_files.add(gmock_source_file)
# Reads each line in the given gmock source file.
for line in file(os.path.join(gmock_root, gmock_source_file), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/foo.h"'. We treat it as '#include
# "gmock/gmock.h"', as all other gmock headers are being fused
# into gmock.h and cannot be #included directly.
# There is no need to #include "gmock/gmock.h" more than once.
if not GMOCK_H_SEED in processed_files:
processed_files.add(GMOCK_H_SEED)
output_file.write('#include "%s"\n' % (GMOCK_H_OUTPUT,))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."'.
# There is no need to #include gtest.h as it has been
# #included by gtest-all.cc.
pass
else:
m = gtest.INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_ALL_CC_SEED)
def FuseGMockGTestAllCc(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock-gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT), 'w')
# First, fuse gtest-all.cc into gmock-gtest-all.cc.
gtest.FuseGTestAllCcToFile(GetGTestRootDir(gmock_root), output_file)
# Next, append fused gmock-all.cc to gmock-gtest-all.cc.
FuseGMockAllCcToFile(gmock_root, output_file)
output_file.close()
def FuseGMock(gmock_root, output_dir):
"""Fuses gtest.h, gmock.h, and gmock-gtest-all.h."""
ValidateGMockRootDir(gmock_root)
ValidateOutputDir(output_dir)
gtest.FuseGTestH(GetGTestRootDir(gmock_root), output_dir)
FuseGMockH(gmock_root, output_dir)
FuseGMockGTestAllCc(gmock_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gmock_files.py OUTPUT_DIR
FuseGMock(DEFAULT_GMOCK_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gmock_files.py GMOCK_ROOT_DIR OUTPUT_DIR
FuseGMock(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| mit |
julen/django-allauth | allauth/socialaccount/fields.py | 69 | 2092 | # Courtesy of django-social-auth
import json
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import six
try:
from django.utils.encoding import smart_unicode as smart_text
except ImportError:
from django.utils.encoding import smart_text
class JSONField(six.with_metaclass(models.SubfieldBase,
models.TextField)):
"""Simple JSON field that stores python structures as JSON strings
on database.
"""
def to_python(self, value):
"""
Convert the input JSON value into python structures, raises
django.core.exceptions.ValidationError if the data can't be converted.
"""
if self.blank and not value:
return None
if isinstance(value, six.string_types):
try:
return json.loads(value)
except Exception as e:
raise ValidationError(str(e))
else:
return value
def validate(self, value, model_instance):
"""Check value is a valid JSON string, raise ValidationError on
error."""
if isinstance(value, six.string_types):
super(JSONField, self).validate(value, model_instance)
try:
json.loads(value)
except Exception as e:
raise ValidationError(str(e))
def get_prep_value(self, value):
"""Convert value to JSON string before save"""
try:
return json.dumps(value)
except Exception as e:
raise ValidationError(str(e))
def value_to_string(self, obj):
"""Return value from object converted to string properly"""
return smart_text(self.get_prep_value(self._get_val_from_obj(obj)))
def value_from_object(self, obj):
"""Return value dumped to string."""
return self.get_prep_value(self._get_val_from_obj(obj))
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^allauth\.socialaccount\.fields\.JSONField"])
except:
pass
| mit |
wreckJ/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/gdal/geometries.py | 388 | 26357 | """
The OGRGeometry is a wrapper for using the OGR Geometry class
(see http://www.gdal.org/ogr/classOGRGeometry.html). OGRGeometry
may be instantiated when reading geometries from OGR Data Sources
(e.g. SHP files), or when given OGC WKT (a string).
While the 'full' API is not present yet, the API is "pythonic" unlike
the traditional and "next-generation" OGR Python bindings. One major
advantage OGR Geometries have over their GEOS counterparts is support
for spatial reference systems and their transformation.
Example:
>>> from django.contrib.gis.gdal import OGRGeometry, OGRGeomType, SpatialReference
>>> wkt1, wkt2 = 'POINT(-90 30)', 'POLYGON((0 0, 5 0, 5 5, 0 5)'
>>> pnt = OGRGeometry(wkt1)
>>> print pnt
POINT (-90 30)
>>> mpnt = OGRGeometry(OGRGeomType('MultiPoint'), SpatialReference('WGS84'))
>>> mpnt.add(wkt1)
>>> mpnt.add(wkt1)
>>> print mpnt
MULTIPOINT (-90 30,-90 30)
>>> print mpnt.srs.name
WGS 84
>>> print mpnt.srs.proj
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> mpnt.transform_to(SpatialReference('NAD27'))
>>> print mpnt.proj
+proj=longlat +ellps=clrk66 +datum=NAD27 +no_defs
>>> print mpnt
MULTIPOINT (-89.999930378602485 29.999797886557641,-89.999930378602485 29.999797886557641)
The OGRGeomType class is to make it easy to specify an OGR geometry type:
>>> from django.contrib.gis.gdal import OGRGeomType
>>> gt1 = OGRGeomType(3) # Using an integer for the type
>>> gt2 = OGRGeomType('Polygon') # Using a string
>>> gt3 = OGRGeomType('POLYGON') # It's case-insensitive
>>> print gt1 == 3, gt1 == 'Polygon' # Equivalence works w/non-OGRGeomType objects
True
"""
# Python library requisites.
import sys
from binascii import a2b_hex
from ctypes import byref, string_at, c_char_p, c_double, c_ubyte, c_void_p
# Getting GDAL prerequisites
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.envelope import Envelope, OGREnvelope
from django.contrib.gis.gdal.error import OGRException, OGRIndexError, SRSException
from django.contrib.gis.gdal.geomtype import OGRGeomType
from django.contrib.gis.gdal.libgdal import GEOJSON, GDAL_VERSION
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform
# Getting the ctypes prototype functions that interface w/the GDAL C library.
from django.contrib.gis.gdal.prototypes import geom as capi, srs as srs_api
# For recognizing geometry input.
from django.contrib.gis.geometry.regex import hex_regex, wkt_regex, json_regex
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_G_* routines are relevant here.
#### OGRGeometry Class ####
class OGRGeometry(GDALBase):
"Generally encapsulates an OGR geometry."
def __init__(self, geom_input, srs=None):
"Initializes Geometry on either WKT or an OGR pointer as input."
str_instance = isinstance(geom_input, basestring)
# If HEX, unpack input to to a binary buffer.
if str_instance and hex_regex.match(geom_input):
geom_input = buffer(a2b_hex(geom_input.upper()))
str_instance = False
# Constructing the geometry,
if str_instance:
# Checking if unicode
if isinstance(geom_input, unicode):
# Encoding to ASCII, WKT or HEX doesn't need any more.
geom_input = geom_input.encode('ascii')
wkt_m = wkt_regex.match(geom_input)
json_m = json_regex.match(geom_input)
if wkt_m:
if wkt_m.group('srid'):
# If there's EWKT, set the SRS w/value of the SRID.
srs = int(wkt_m.group('srid'))
if wkt_m.group('type').upper() == 'LINEARRING':
# OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.
# See http://trac.osgeo.org/gdal/ticket/1992.
g = capi.create_geom(OGRGeomType(wkt_m.group('type')).num)
capi.import_wkt(g, byref(c_char_p(wkt_m.group('wkt'))))
else:
g = capi.from_wkt(byref(c_char_p(wkt_m.group('wkt'))), None, byref(c_void_p()))
elif json_m:
if GEOJSON:
g = capi.from_json(geom_input)
else:
raise NotImplementedError('GeoJSON input only supported on GDAL 1.5+.')
else:
# Seeing if the input is a valid short-hand string
# (e.g., 'Point', 'POLYGON').
ogr_t = OGRGeomType(geom_input)
g = capi.create_geom(OGRGeomType(geom_input).num)
elif isinstance(geom_input, buffer):
# WKB was passed in
g = capi.from_wkb(str(geom_input), None, byref(c_void_p()), len(geom_input))
elif isinstance(geom_input, OGRGeomType):
# OGRGeomType was passed in, an empty geometry will be created.
g = capi.create_geom(geom_input.num)
elif isinstance(geom_input, self.ptr_type):
# OGR pointer (c_void_p) was the input.
g = geom_input
else:
raise OGRException('Invalid input type for OGR Geometry construction: %s' % type(geom_input))
# Now checking the Geometry pointer before finishing initialization
# by setting the pointer for the object.
if not g:
raise OGRException('Cannot create OGR Geometry from input: %s' % str(geom_input))
self.ptr = g
# Assigning the SpatialReference object to the geometry, if valid.
if bool(srs): self.srs = srs
# Setting the class depending upon the OGR Geometry Type
self.__class__ = GEO_CLASSES[self.geom_type.num]
def __del__(self):
"Deletes this Geometry."
if self._ptr: capi.destroy_geom(self._ptr)
# Pickle routines
def __getstate__(self):
srs = self.srs
if srs:
srs = srs.wkt
else:
srs = None
return str(self.wkb), srs
def __setstate__(self, state):
wkb, srs = state
ptr = capi.from_wkb(wkb, None, byref(c_void_p()), len(wkb))
if not ptr: raise OGRException('Invalid OGRGeometry loaded from pickled state.')
self.ptr = ptr
self.srs = srs
@classmethod
def from_bbox(cls, bbox):
"Constructs a Polygon from a bounding box (4-tuple)."
x0, y0, x1, y1 = bbox
return OGRGeometry( 'POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (
x0, y0, x0, y1, x1, y1, x1, y0, x0, y0) )
### Geometry set-like operations ###
# g = g1 | g2
def __or__(self, other):
"Returns the union of the two geometries."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
def __eq__(self, other):
"Is this Geometry equal to the other?"
if isinstance(other, OGRGeometry):
return self.equals(other)
else:
return False
def __ne__(self, other):
"Tests for inequality."
return not (self == other)
def __str__(self):
"WKT is used for the string representation."
return self.wkt
#### Geometry Properties ####
@property
def dimension(self):
"Returns 0 for points, 1 for lines, and 2 for surfaces."
return capi.get_dims(self.ptr)
def _get_coord_dim(self):
"Returns the coordinate dimension of the Geometry."
if isinstance(self, GeometryCollection) and GDAL_VERSION < (1, 5, 2):
# On GDAL versions prior to 1.5.2, there exists a bug in which
# the coordinate dimension of geometry collections is always 2:
# http://trac.osgeo.org/gdal/ticket/2334
# Here we workaround by returning the coordinate dimension of the
# first geometry in the collection instead.
if len(self):
return capi.get_coord_dim(capi.get_geom_ref(self.ptr, 0))
return capi.get_coord_dim(self.ptr)
def _set_coord_dim(self, dim):
"Sets the coordinate dimension of this Geometry."
if not dim in (2, 3):
raise ValueError('Geometry dimension must be either 2 or 3')
capi.set_coord_dim(self.ptr, dim)
coord_dim = property(_get_coord_dim, _set_coord_dim)
@property
def geom_count(self):
"The number of elements in this Geometry."
return capi.get_geom_count(self.ptr)
@property
def point_count(self):
"Returns the number of Points in this Geometry."
return capi.get_point_count(self.ptr)
@property
def num_points(self):
"Alias for `point_count` (same name method in GEOS API.)"
return self.point_count
@property
def num_coords(self):
"Alais for `point_count`."
return self.point_count
@property
def geom_type(self):
"Returns the Type for this Geometry."
return OGRGeomType(capi.get_geom_type(self.ptr))
@property
def geom_name(self):
"Returns the Name of this Geometry."
return capi.get_geom_name(self.ptr)
@property
def area(self):
"Returns the area for a LinearRing, Polygon, or MultiPolygon; 0 otherwise."
return capi.get_area(self.ptr)
@property
def envelope(self):
"Returns the envelope for this Geometry."
# TODO: Fix Envelope() for Point geometries.
return Envelope(capi.get_envelope(self.ptr, byref(OGREnvelope())))
@property
def extent(self):
"Returns the envelope as a 4-tuple, instead of as an Envelope object."
return self.envelope.tuple
#### SpatialReference-related Properties ####
# The SRS property
def _get_srs(self):
"Returns the Spatial Reference for this Geometry."
try:
srs_ptr = capi.get_geom_srs(self.ptr)
return SpatialReference(srs_api.clone_srs(srs_ptr))
except SRSException:
return None
def _set_srs(self, srs):
"Sets the SpatialReference for this geometry."
# Do not have to clone the `SpatialReference` object pointer because
# when it is assigned to this `OGRGeometry` it's internal OGR
# reference count is incremented, and will likewise be released
# (decremented) when this geometry's destructor is called.
if isinstance(srs, SpatialReference):
srs_ptr = srs.ptr
elif isinstance(srs, (int, long, basestring)):
sr = SpatialReference(srs)
srs_ptr = sr.ptr
else:
raise TypeError('Cannot assign spatial reference with object of type: %s' % type(srs))
capi.assign_srs(self.ptr, srs_ptr)
srs = property(_get_srs, _set_srs)
# The SRID property
def _get_srid(self):
srs = self.srs
if srs: return srs.srid
return None
def _set_srid(self, srid):
if isinstance(srid, (int, long)):
self.srs = srid
else:
raise TypeError('SRID must be set with an integer.')
srid = property(_get_srid, _set_srid)
#### Output Methods ####
@property
def geos(self):
"Returns a GEOSGeometry object from this OGRGeometry."
from django.contrib.gis.geos import GEOSGeometry
return GEOSGeometry(self.wkb, self.srid)
@property
def gml(self):
"Returns the GML representation of the Geometry."
return capi.to_gml(self.ptr)
@property
def hex(self):
"Returns the hexadecimal representation of the WKB (a string)."
return str(self.wkb).encode('hex').upper()
#return b2a_hex(self.wkb).upper()
@property
def json(self):
"""
Returns the GeoJSON representation of this Geometry (requires
GDAL 1.5+).
"""
if GEOJSON:
return capi.to_json(self.ptr)
else:
raise NotImplementedError('GeoJSON output only supported on GDAL 1.5+.')
geojson = json
@property
def kml(self):
"Returns the KML representation of the Geometry."
if GEOJSON:
return capi.to_kml(self.ptr, None)
else:
raise NotImplementedError('KML output only supported on GDAL 1.5+.')
@property
def wkb_size(self):
"Returns the size of the WKB buffer."
return capi.get_wkbsize(self.ptr)
@property
def wkb(self):
"Returns the WKB representation of the Geometry."
if sys.byteorder == 'little':
byteorder = 1 # wkbNDR (from ogr_core.h)
else:
byteorder = 0 # wkbXDR
sz = self.wkb_size
# Creating the unsigned character buffer, and passing it in by reference.
buf = (c_ubyte * sz)()
wkb = capi.to_wkb(self.ptr, byteorder, byref(buf))
# Returning a buffer of the string at the pointer.
return buffer(string_at(buf, sz))
@property
def wkt(self):
"Returns the WKT representation of the Geometry."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def ewkt(self):
"Returns the EWKT representation of the Geometry."
srs = self.srs
if srs and srs.srid:
return 'SRID=%s;%s' % (srs.srid, self.wkt)
else:
return self.wkt
#### Geometry Methods ####
def clone(self):
"Clones this OGR Geometry."
return OGRGeometry(capi.clone_geom(self.ptr), self.srs)
def close_rings(self):
"""
If there are any rings within this geometry that have not been
closed, this routine will do so by adding the starting point at the
end.
"""
# Closing the open rings.
capi.geom_close_rings(self.ptr)
def transform(self, coord_trans, clone=False):
"""
Transforms this geometry to a different spatial reference system.
May take a CoordTransform object, a SpatialReference object, string
WKT or PROJ.4, and/or an integer SRID. By default nothing is returned
and the geometry is transformed in-place. However, if the `clone`
keyword is set, then a transformed clone of this geometry will be
returned.
"""
if clone:
klone = self.clone()
klone.transform(coord_trans)
return klone
# Have to get the coordinate dimension of the original geometry
# so it can be used to reset the transformed geometry's dimension
# afterwards. This is done because of GDAL bug (in versions prior
# to 1.7) that turns geometries 3D after transformation, see:
# http://trac.osgeo.org/gdal/changeset/17792
if GDAL_VERSION < (1, 7):
orig_dim = self.coord_dim
# Depending on the input type, use the appropriate OGR routine
# to perform the transformation.
if isinstance(coord_trans, CoordTransform):
capi.geom_transform(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, SpatialReference):
capi.geom_transform_to(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, (int, long, basestring)):
sr = SpatialReference(coord_trans)
capi.geom_transform_to(self.ptr, sr.ptr)
else:
raise TypeError('Transform only accepts CoordTransform, '
'SpatialReference, string, and integer objects.')
# Setting with original dimension, see comment above.
if GDAL_VERSION < (1, 7):
if isinstance(self, GeometryCollection):
# With geometry collections have to set dimension on
# each internal geometry reference, as the collection
# dimension isn't affected.
for i in xrange(len(self)):
internal_ptr = capi.get_geom_ref(self.ptr, i)
if orig_dim != capi.get_coord_dim(internal_ptr):
capi.set_coord_dim(internal_ptr, orig_dim)
else:
if self.coord_dim != orig_dim:
self.coord_dim = orig_dim
def transform_to(self, srs):
"For backwards-compatibility."
self.transform(srs)
#### Topology Methods ####
def _topology(self, func, other):
"""A generalized function for topology operations, takes a GDAL function and
the other geometry to perform the operation on."""
if not isinstance(other, OGRGeometry):
raise TypeError('Must use another OGRGeometry object for topology operations!')
# Returning the output of the given function with the other geometry's
# pointer.
return func(self.ptr, other.ptr)
def intersects(self, other):
"Returns True if this geometry intersects with the other."
return self._topology(capi.ogr_intersects, other)
def equals(self, other):
"Returns True if this geometry is equivalent to the other."
return self._topology(capi.ogr_equals, other)
def disjoint(self, other):
"Returns True if this geometry and the other are spatially disjoint."
return self._topology(capi.ogr_disjoint, other)
def touches(self, other):
"Returns True if this geometry touches the other."
return self._topology(capi.ogr_touches, other)
def crosses(self, other):
"Returns True if this geometry crosses the other."
return self._topology(capi.ogr_crosses, other)
def within(self, other):
"Returns True if this geometry is within the other."
return self._topology(capi.ogr_within, other)
def contains(self, other):
"Returns True if this geometry contains the other."
return self._topology(capi.ogr_contains, other)
def overlaps(self, other):
"Returns True if this geometry overlaps the other."
return self._topology(capi.ogr_overlaps, other)
#### Geometry-generation Methods ####
def _geomgen(self, gen_func, other=None):
"A helper routine for the OGR routines that generate geometries."
if isinstance(other, OGRGeometry):
return OGRGeometry(gen_func(self.ptr, other.ptr), self.srs)
else:
return OGRGeometry(gen_func(self.ptr), self.srs)
@property
def boundary(self):
"Returns the boundary of this geometry."
return self._geomgen(capi.get_boundary)
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points in
this Geometry.
"""
return self._geomgen(capi.geom_convex_hull)
def difference(self, other):
"""
Returns a new geometry consisting of the region which is the difference
of this geometry and the other.
"""
return self._geomgen(capi.geom_diff, other)
def intersection(self, other):
"""
Returns a new geometry consisting of the region of intersection of this
geometry and the other.
"""
return self._geomgen(capi.geom_intersection, other)
def sym_difference(self, other):
"""
Returns a new geometry which is the symmetric difference of this
geometry and the other.
"""
return self._geomgen(capi.geom_sym_diff, other)
def union(self, other):
"""
Returns a new geometry consisting of the region which is the union of
this geometry and the other.
"""
return self._geomgen(capi.geom_union, other)
# The subclasses for OGR Geometry.
class Point(OGRGeometry):
@property
def x(self):
"Returns the X coordinate for this Point."
return capi.getx(self.ptr, 0)
@property
def y(self):
"Returns the Y coordinate for this Point."
return capi.gety(self.ptr, 0)
@property
def z(self):
"Returns the Z coordinate for this Point."
if self.coord_dim == 3:
return capi.getz(self.ptr, 0)
@property
def tuple(self):
"Returns the tuple of this point."
if self.coord_dim == 2:
return (self.x, self.y)
elif self.coord_dim == 3:
return (self.x, self.y, self.z)
coords = tuple
class LineString(OGRGeometry):
def __getitem__(self, index):
"Returns the Point at the given index."
if index >= 0 and index < self.point_count:
x, y, z = c_double(), c_double(), c_double()
capi.get_point(self.ptr, index, byref(x), byref(y), byref(z))
dim = self.coord_dim
if dim == 1:
return (x.value,)
elif dim == 2:
return (x.value, y.value)
elif dim == 3:
return (x.value, y.value, z.value)
else:
raise OGRIndexError('index out of range: %s' % str(index))
def __iter__(self):
"Iterates over each point in the LineString."
for i in xrange(self.point_count):
yield self[i]
def __len__(self):
"The length returns the number of points in the LineString."
return self.point_count
@property
def tuple(self):
"Returns the tuple representation of this LineString."
return tuple([self[i] for i in xrange(len(self))])
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function.
"""
return [func(self.ptr, i) for i in xrange(len(self))]
@property
def x(self):
"Returns the X coordinates in a list."
return self._listarr(capi.getx)
@property
def y(self):
"Returns the Y coordinates in a list."
return self._listarr(capi.gety)
@property
def z(self):
"Returns the Z coordinates in a list."
if self.coord_dim == 3:
return self._listarr(capi.getz)
# LinearRings are used in Polygons.
class LinearRing(LineString): pass
class Polygon(OGRGeometry):
def __len__(self):
"The number of interior rings in this Polygon."
return self.geom_count
def __iter__(self):
"Iterates through each ring in the Polygon."
for i in xrange(self.geom_count):
yield self[i]
def __getitem__(self, index):
"Gets the ring at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
# Polygon Properties
@property
def shell(self):
"Returns the shell of this Polygon."
return self[0] # First ring is the shell
exterior_ring = shell
@property
def tuple(self):
"Returns a tuple of LinearRing coordinate tuples."
return tuple([self[i].tuple for i in xrange(self.geom_count)])
coords = tuple
@property
def point_count(self):
"The number of Points in this Polygon."
# Summing up the number of points in each ring of the Polygon.
return sum([self[i].point_count for i in xrange(self.geom_count)])
@property
def centroid(self):
"Returns the centroid (a Point) of this Polygon."
# The centroid is a Point, create a geometry for this.
p = OGRGeometry(OGRGeomType('Point'))
capi.get_centroid(self.ptr, p.ptr)
return p
# Geometry Collection base class.
class GeometryCollection(OGRGeometry):
"The Geometry Collection class."
def __getitem__(self, index):
"Gets the Geometry at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
def __iter__(self):
"Iterates over each Geometry."
for i in xrange(self.geom_count):
yield self[i]
def __len__(self):
"The number of geometries in this Geometry Collection."
return self.geom_count
def add(self, geom):
"Add the geometry to this Geometry Collection."
if isinstance(geom, OGRGeometry):
if isinstance(geom, self.__class__):
for g in geom: capi.add_geom(self.ptr, g.ptr)
else:
capi.add_geom(self.ptr, geom.ptr)
elif isinstance(geom, basestring):
tmp = OGRGeometry(geom)
capi.add_geom(self.ptr, tmp.ptr)
else:
raise OGRException('Must add an OGRGeometry.')
@property
def point_count(self):
"The number of Points in this Geometry Collection."
# Summing up the number of points in each geometry in this collection
return sum([self[i].point_count for i in xrange(self.geom_count)])
@property
def tuple(self):
"Returns a tuple representation of this Geometry Collection."
return tuple([self[i].tuple for i in xrange(self.geom_count)])
coords = tuple
# Multiple Geometry types.
class MultiPoint(GeometryCollection): pass
class MultiLineString(GeometryCollection): pass
class MultiPolygon(GeometryCollection): pass
# Class mapping dictionary (using the OGRwkbGeometryType as the key)
GEO_CLASSES = {1 : Point,
2 : LineString,
3 : Polygon,
4 : MultiPoint,
5 : MultiLineString,
6 : MultiPolygon,
7 : GeometryCollection,
101: LinearRing,
1 + OGRGeomType.wkb25bit : Point,
2 + OGRGeomType.wkb25bit : LineString,
3 + OGRGeomType.wkb25bit : Polygon,
4 + OGRGeomType.wkb25bit : MultiPoint,
5 + OGRGeomType.wkb25bit : MultiLineString,
6 + OGRGeomType.wkb25bit : MultiPolygon,
7 + OGRGeomType.wkb25bit : GeometryCollection,
}
| apache-2.0 |
cysnake4713/account-financial-tools | account_balance_line/account_move_line.py | 31 | 1570 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Vincent Renaville (Camptocamp)
# Copyright 2010-2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class account_move_line(orm.Model):
_inherit = "account.move.line"
def _line_balance(self, cr, uid, ids, field, arg, context=None):
res = {}
move_lines = self.read(cr, uid, ids,
['debit', 'credit'],
context=context)
for line in move_lines:
res[line['id']] = line['debit'] - line['credit']
return res
_columns = {
'line_balance': fields.function(
_line_balance, method=True,
string='Balance',
store=True),
}
| agpl-3.0 |
semanticize/semanticizest | semanticizest/parse_wikidump/__init__.py | 1 | 11223 | """Parsing utilities for Wikipedia database dumps."""
from __future__ import print_function
from os.path import basename
from bz2 import BZ2File
from collections import Counter, namedtuple
import gzip
from HTMLParser import HTMLParser
from itertools import chain
import logging
import re
import xml.etree.ElementTree as etree # don't use LXML, it's slower (!)
import six
from semanticizest._util import ngrams
from semanticizest._version import __version__
_logger = logging.getLogger(__name__)
Page = namedtuple("Page", ['page_id', 'title', 'content', 'redirect'])
def _get_namespace(tag):
try:
namespace = re.match(r"^{(.*?)}", tag).group(1)
except AttributeError:
namespace = ''
if not namespace.startswith("http://www.mediawiki.org/xml/export-"):
raise ValueError("namespace %r not recognized as MediaWiki dump"
% namespace)
return namespace
if six.PY3:
def _tounicode(s):
return s
else:
def _tounicode(s):
# Convert ASCII strings coming from xml.etree.
if isinstance(s, str):
s = s.decode('ascii')
return s
def extract_pages(f):
"""Extract pages from Wikimedia database dump.
Parameters
----------
f : file-like or str
Handle on Wikimedia article dump. May be any type supported by
etree.iterparse.
Returns
-------
pages : iterable over `Page`s
namedtuples containging the fields (page_id, title, content,
redirect_target) triples. In Python 2.x, may produce either
str or unicode strings.
"""
elems = etree.iterparse(f, events=["end"])
# We can't rely on the namespace for database dumps, since it's changed
# it every time a small modification to the format is made. So, determine
# those from the first element we find, which will be part of the metadata,
# and construct element paths.
_, elem = next(elems)
namespace = _get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
ns_path = "./{%(ns)s}ns" % ns_mapping
page_tag = "{%(ns)s}page" % ns_mapping
text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping
id_path = "./{%(ns)s}id" % ns_mapping
title_path = "./{%(ns)s}title" % ns_mapping
redir_path = "./{%(ns)s}redirect" % ns_mapping
for _, elem in elems:
if elem.tag == page_tag:
if elem.find(ns_path).text != '0':
continue
text = elem.find(text_path).text
if text is None:
# Empty article; these occur in Wikinews dumps.
continue
redir = elem.find(redir_path)
redir = (_tounicode(redir.attrib['title'])
if redir is not None else None)
text = _tounicode(text)
title = _tounicode(elem.find(title_path).text)
yield Page(int(elem.find(id_path).text), title, text, redir)
# Prune the element tree, as per
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
# We do this only for <page>s, since we need to inspect the
# ./revision/text element. That shouldn't matter since the pages
# comprise the bulk of the file.
elem.clear()
def _clean_link(l):
"""Clean links (anchor and titles)."""
l = l.strip()
l = re.sub(r'\s+', ' ', l)
return l
def extract_links(article):
"""Extract all (or most) links from article text (wiki syntax).
Returns an iterable over (target, anchor) pairs.
"""
links = re.findall(r"(\w*) \[\[ ([^]]+) \]\] (\w*)", article,
re.UNICODE | re.VERBOSE)
r = []
for before, l, after in links:
if '|' in l:
target, anchor = l.split('|', 1)
else:
target, anchor = l, l
# If the anchor contains a colon, assume it's a file or category link.
if ':' in target:
continue
# Some links contain newlines...
target = _clean_link(target)
anchor = _clean_link(anchor)
# Remove section links and normalize to the format used in <redirect>
# elements: uppercase first character, spaces instead of underscores.
target = target.split('#', 1)[0].replace('_', ' ')
if not target:
continue # section link
if not target[0].isupper():
target = target[0].upper() + target[1:]
anchor = before + anchor + after
r.append((target, anchor))
return r
_UNWANTED = re.compile(r"""
(:?
\{\{ .*? \}\}
| \{\| .*? \|\}
| ^[|!] .* $ # table content
| <math> .*? </math>
| <ref .*? > .*? </ref>
| <br\s*/>
| </?su[bp]\s*>
| \[\[ [^][:]* : (\[\[.*?\]\]|.)*? \]\] # media, categories
| =+ .*? =+ # headers
| ''+
| ^\* # list bullets
)
""", re.DOTALL | re.MULTILINE | re.UNICODE | re.VERBOSE)
_unescape_entities = HTMLParser().unescape
def clean_text(page):
"""Return the clean-ish running text parts of a page."""
return re.sub(_UNWANTED, "", _unescape_entities(page))
_LINK_SYNTAX = re.compile(r"""
(?:
\[\[
(?: [^]|]* \|)? # "target|" in [[target|anchor]]
|
\]\]
)
""", re.DOTALL | re.MULTILINE | re.VERBOSE)
def remove_links(page):
"""Remove links from clean_text output."""
page = re.sub(r'\]\]\[\[', ' ', page) # hack hack hack, see test
return re.sub(_LINK_SYNTAX, '', page)
def page_statistics(page, N, sentence_splitter=None, tokenizer=None):
"""Gather statistics from a single WP page.
The sentence_splitter should be a callable that splits text into sentences.
It defaults to an unspecified heuristic.
See ``parse_dump`` for the parameters.
Returns
-------
stats : (dict, dict)
The first dict maps (target, anchor) pairs to counts.
The second maps n-grams (up to N) to counts.
"""
if N is not None and not isinstance(N, int):
raise TypeError("expected integer or None for N, got %r" % N)
clean = clean_text(page)
link_counts = Counter(extract_links(clean))
if N:
no_links = remove_links(clean)
if sentence_splitter is None:
sentences = re.split(r'(?:\n{2,}|\.\s+)', no_links,
re.MULTILINE | re.UNICODE)
else:
sentences = [sentence
for paragraph in re.split('\n+', no_links)
for sentence in paragraph]
if tokenizer is None:
tokenizer = re.compile(r'\w+', re.UNICODE).findall
all_ngrams = chain.from_iterable(ngrams(tokenizer(sentence), N)
for sentence in sentences)
ngram_counts = Counter(all_ngrams)
else:
ngram_counts = None
return link_counts, ngram_counts
def _open(f):
if isinstance(f, six.string_types):
if f.endswith('.gz'):
return gzip.open(f)
elif f.endswith('.bz2'):
return BZ2File(f)
return open(f)
return f
def parse_dump(dump, db, N=7, sentence_splitter=None, tokenizer=None):
"""Parse Wikipedia database dump, return n-gram and link statistics.
Parameters
----------
dump : {file-like, str}
Path to or handle on a Wikipedia page dump, e.g.
'chowiki-20140919-pages-articles.xml.bz2'.
db : SQLite connection
Connection to database that will be used to store statistics.
N : integer
Maximum n-gram length. Set this to a false value to disable
n-gram counting; this disables some of the fancier statistics,
but baseline entity linking will still work.
sentence_splitter : callable, optional
Sentence splitter. Called on output of paragraph splitter
(strings).
tokenizer : callable, optional
Tokenizer. Called on output of sentence splitter (strings).
Must return iterable over strings.
"""
f = _open(dump)
redirects = {}
c = db.cursor()
# Store the semanticizer version for later reference
c.execute('''insert into parameters values ('version', ?);''',
(__version__,))
# Store the dump file name
c.execute('''insert into parameters values ('dump', ?);''',
(basename(dump),))
# Store the maximum ngram length, so we can use it later on
c.execute('''insert into parameters values ('N', ?);''', (str(N),))
# Temporary index to speed up insertion
c.execute('''create unique index target_anchor
on linkstats(ngram_id, target)''')
_logger.info("Processing articles")
for i, page in enumerate(extract_pages(f), 1):
if i % 10000 == 0:
_logger.info("%d articles done", i)
if page.redirect is not None:
redirects[page.title] = page.redirect
continue
link, ngram = page_statistics(page.content, N=N, tokenizer=tokenizer,
sentence_splitter=sentence_splitter)
# We don't count the n-grams within the links, but we need them
# in the table, so add them with zero count.
tokens = chain(six.iteritems(ngram or {}),
((anchor, 0) for _, anchor in six.iterkeys(link)))
tokens = list(tokens)
c.executemany('''insert or ignore into ngrams (ngram) values (?)''',
((g,) for g, _ in tokens))
c.executemany('''update ngrams set tf = tf + ?, df = df + 1
where ngram = ?''',
((count, token) for token, count in tokens))
c.executemany('''insert or ignore into linkstats values
((select id from ngrams where ngram = ?), ?, 0)''',
((anchor, target)
for target, anchor in six.iterkeys(link)))
c.executemany('''update linkstats set count = count + ?
where ngram_id = (select rowid from ngrams
where ngram = ?)''',
((count, anchor)
for (_, anchor), count in six.iteritems(link)))
db.commit()
_logger.info("Processing %d redirects", len(redirects))
for redir, target in redirects.items():
for anchor, count in c.execute('''select ngram_id, count from linkstats
where target = ?''', [redir]):
# TODO: combine the next two execute statements
c.execute('''insert or ignore into linkstats values (?, ?, 0)''',
[anchor, target])
c.execute('''update linkstats
set count = count + ?
where target = ? and ngram_id = ?''',
(count, target, anchor))
c.executemany('delete from linkstats where target = ?',
([redir] for redir in redirects))
_logger.info("Finalizing database")
c.executescript('''drop index target_anchor; vacuum;''')
_logger.info("Dump parsing done: processed %d articles", i)
db.commit()
| apache-2.0 |
pshchelo/heat | heat/tests/test_watch.py | 2 | 38660 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mox
from oslo_utils import timeutils
from heat.common import exception
from heat.engine import stack
from heat.engine import template
from heat.engine import watchrule
from heat.objects import watch_rule
from heat.tests import common
from heat.tests import utils
class WatchData(object):
def __init__(self, data, created_at):
self.created_at = created_at
self.data = {'test_metric': {'Value': data,
'Unit': 'Count'}}
class DummyAction(object):
signal = "DummyAction"
class WatchRuleTest(common.HeatTestCase):
stack_id = None
def setUpDatabase(self):
if self.stack_id is not None:
return
# Create a dummy stack in the DB as WatchRule instances
# must be associated with a stack
ctx = utils.dummy_context()
ctx.auth_token = 'abcd1234'
empty_tmpl = {'HeatTemplateFormatVersion': '2012-12-12'}
tmpl = template.Template(empty_tmpl)
stack_name = 'dummystack'
dummy_stack = stack.Stack(ctx, stack_name, tmpl)
dummy_stack.state_set(dummy_stack.CREATE, dummy_stack.COMPLETE,
'Testing')
dummy_stack.store()
self.stack_id = dummy_stack.id
def setUp(self):
super(WatchRuleTest, self).setUp()
self.setUpDatabase()
self.username = 'watchrule_test_user'
self.ctx = utils.dummy_context()
self.ctx.auth_token = 'abcd1234'
self.m.ReplayAll()
def _action_set_stubs(self, now, action_expected=True):
# Setup stubs for the action tests
timeutils.set_time_override(now)
self.addCleanup(timeutils.clear_time_override)
if action_expected:
dummy_action = DummyAction()
self.m.StubOutWithMock(stack.Stack, 'resource_by_refid')
stack.Stack.resource_by_refid(
mox.IgnoreArg()).MultipleTimes().AndReturn(dummy_action)
self.m.ReplayAll()
def test_minimum(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'Period': '300',
'Statistic': 'Minimum',
'ComparisonOperator': 'LessThanOrEqualToThreshold',
'Threshold': '50'}
now = timeutils.utcnow()
last = now - datetime.timedelta(seconds=320)
data = [WatchData(77, now - datetime.timedelta(seconds=100))]
data.append(WatchData(53, now - datetime.timedelta(seconds=150)))
# all > 50 -> NORMAL
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=data,
stack_id=self.stack_id,
last_evaluated=last)
new_state = self.wr.get_alarm_state()
self.assertEqual('NORMAL', new_state)
data.append(WatchData(25, now - datetime.timedelta(seconds=250)))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=data,
stack_id=self.stack_id,
last_evaluated=last)
new_state = self.wr.get_alarm_state()
self.assertEqual('ALARM', new_state)
def test_maximum(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'Period': '300',
'Statistic': 'Maximum',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '30'}
now = timeutils.utcnow()
last = now - datetime.timedelta(seconds=320)
data = [WatchData(7, now - datetime.timedelta(seconds=100))]
data.append(WatchData(23, now - datetime.timedelta(seconds=150)))
# all < 30 -> NORMAL
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=data,
stack_id=self.stack_id,
last_evaluated=last)
self.wr.now = now
new_state = self.wr.get_alarm_state()
self.assertEqual('NORMAL', new_state)
data.append(WatchData(35, now - datetime.timedelta(seconds=150)))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=data,
stack_id=self.stack_id,
last_evaluated=last)
self.wr.now = now
new_state = self.wr.get_alarm_state()
self.assertEqual('ALARM', new_state)
def test_samplecount(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'Period': '300',
'Statistic': 'SampleCount',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '3'}
now = timeutils.utcnow()
last = now - datetime.timedelta(seconds=320)
data = [WatchData(1, now - datetime.timedelta(seconds=100))]
data.append(WatchData(1, now - datetime.timedelta(seconds=150)))
# only 2 samples -> NORMAL
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=data,
stack_id=self.stack_id,
last_evaluated=last)
self.wr.now = now
new_state = self.wr.get_alarm_state()
self.assertEqual('NORMAL', new_state)
# only 3 samples -> ALARM
data.append(WatchData(1, now - datetime.timedelta(seconds=200)))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=data,
stack_id=self.stack_id,
last_evaluated=last)
self.wr.now = now
new_state = self.wr.get_alarm_state()
self.assertEqual('ALARM', new_state)
# only 3 samples (one old) -> NORMAL
data.pop(0)
data.append(WatchData(1, now - datetime.timedelta(seconds=400)))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=data,
stack_id=self.stack_id,
last_evaluated=last)
self.wr.now = now
new_state = self.wr.get_alarm_state()
self.assertEqual('NORMAL', new_state)
def test_sum(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'Period': '300',
'Statistic': 'Sum',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '100'}
now = timeutils.utcnow()
last = now - datetime.timedelta(seconds=320)
data = [WatchData(17, now - datetime.timedelta(seconds=100))]
data.append(WatchData(23, now - datetime.timedelta(seconds=150)))
# all < 40 -> NORMAL
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=data,
stack_id=self.stack_id,
last_evaluated=last)
self.wr.now = now
new_state = self.wr.get_alarm_state()
self.assertEqual('NORMAL', new_state)
# sum > 100 -> ALARM
data.append(WatchData(85, now - datetime.timedelta(seconds=150)))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=data,
stack_id=self.stack_id,
last_evaluated=last)
self.wr.now = now
new_state = self.wr.get_alarm_state()
self.assertEqual('ALARM', new_state)
def test_ave(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'Period': '300',
'Statistic': 'Average',
'ComparisonOperator': 'GreaterThanThreshold',
'Threshold': '100'}
now = timeutils.utcnow()
last = now - datetime.timedelta(seconds=320)
data = [WatchData(117, now - datetime.timedelta(seconds=100))]
data.append(WatchData(23, now - datetime.timedelta(seconds=150)))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=data,
stack_id=self.stack_id,
last_evaluated=last)
self.wr.now = now
new_state = self.wr.get_alarm_state()
self.assertEqual('NORMAL', new_state)
data.append(WatchData(195, now - datetime.timedelta(seconds=250)))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=data,
stack_id=self.stack_id,
last_evaluated=last)
self.wr.now = now
new_state = self.wr.get_alarm_state()
self.assertEqual('ALARM', new_state)
def test_load(self):
# Insert two dummy watch rules into the DB
rule = {u'EvaluationPeriods': u'1',
u'AlarmActions': [u'WebServerRestartPolicy'],
u'AlarmDescription': u'Restart the WikiDatabase',
u'Namespace': u'system/linux',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'MetricName': u'ServiceFailure'}
self.wr = []
self.wr.append(watchrule.WatchRule(context=self.ctx,
watch_name='HttpFailureAlarm',
rule=rule,
watch_data=[],
stack_id=self.stack_id,
state='NORMAL'))
self.wr[0].store()
self.wr.append(watchrule.WatchRule(context=self.ctx,
watch_name='AnotherWatch',
rule=rule,
watch_data=[],
stack_id=self.stack_id,
state='NORMAL'))
self.wr[1].store()
# Then use WatchRule.load() to retrieve each by name
# and check that the object properties match the data above
for wn in ('HttpFailureAlarm', 'AnotherWatch'):
wr = watchrule.WatchRule.load(self.ctx, wn)
self.assertIsInstance(wr, watchrule.WatchRule)
self.assertEqual(wn, wr.name)
self.assertEqual('NORMAL', wr.state)
self.assertEqual(rule, wr.rule)
self.assertEqual(datetime.timedelta(seconds=int(rule['Period'])),
wr.timeperiod)
def test_store(self):
rule = {u'EvaluationPeriods': u'1',
u'AlarmActions': [u'WebServerRestartPolicy'],
u'AlarmDescription': u'Restart the WikiDatabase',
u'Namespace': u'system/linux',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'MetricName': u'ServiceFailure'}
self.wr = watchrule.WatchRule(context=self.ctx, watch_name='storetest',
stack_id=self.stack_id, rule=rule)
self.wr.store()
dbwr = watch_rule.WatchRule.get_by_name(self.ctx, 'storetest')
self.assertIsNotNone(dbwr)
self.assertEqual('storetest', dbwr.name)
self.assertEqual(watchrule.WatchRule.NODATA, dbwr.state)
self.assertEqual(rule, dbwr.rule)
def test_evaluate(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'Period': '300',
'Statistic': 'Maximum',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '30'}
now = timeutils.utcnow()
timeutils.set_time_override(now)
self.addCleanup(timeutils.clear_time_override)
# It's not time to evaluate, so should stay NODATA
last = now - datetime.timedelta(seconds=299)
data = WatchData(25, now - datetime.timedelta(seconds=150))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=[data],
stack_id=self.stack_id,
last_evaluated=last)
actions = self.wr.evaluate()
self.assertEqual('NODATA', self.wr.state)
self.assertEqual([], actions)
# now - last == Period, so should set NORMAL
last = now - datetime.timedelta(seconds=300)
data = WatchData(25, now - datetime.timedelta(seconds=150))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=[data],
stack_id=self.stack_id,
last_evaluated=last)
actions = self.wr.evaluate()
self.assertEqual('NORMAL', self.wr.state)
self.assertEqual(now, self.wr.last_evaluated)
self.assertEqual([], actions)
# Now data breaches Threshold, so should set ALARM
last = now - datetime.timedelta(seconds=300)
data = WatchData(35, now - datetime.timedelta(seconds=150))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=[data],
stack_id=self.stack_id,
last_evaluated=last)
actions = self.wr.evaluate()
self.assertEqual('ALARM', self.wr.state)
self.assertEqual(now, self.wr.last_evaluated)
self.assertEqual([], actions)
def test_evaluate_suspend(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'Period': '300',
'Statistic': 'Maximum',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '30'}
now = timeutils.utcnow()
timeutils.set_time_override(now)
self.addCleanup(timeutils.clear_time_override)
# Now data breaches Threshold, but we're suspended
last = now - datetime.timedelta(seconds=300)
data = WatchData(35, now - datetime.timedelta(seconds=150))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=[data],
stack_id=self.stack_id,
last_evaluated=last)
self.wr.state_set(self.wr.SUSPENDED)
actions = self.wr.evaluate()
self.assertEqual(self.wr.SUSPENDED, self.wr.state)
self.assertEqual([], actions)
def test_evaluate_ceilometer_controlled(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'Period': '300',
'Statistic': 'Maximum',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '30'}
now = timeutils.utcnow()
timeutils.set_time_override(now)
self.addCleanup(timeutils.clear_time_override)
# Now data breaches Threshold, but we're suspended
last = now - datetime.timedelta(seconds=300)
data = WatchData(35, now - datetime.timedelta(seconds=150))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=[data],
stack_id=self.stack_id,
last_evaluated=last)
self.wr.state_set(self.wr.CEILOMETER_CONTROLLED)
actions = self.wr.evaluate()
self.assertEqual(self.wr.CEILOMETER_CONTROLLED, self.wr.state)
self.assertEqual([], actions)
def test_rule_actions_alarm_normal(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'AlarmActions': ['DummyAction'],
'Period': '300',
'Statistic': 'Maximum',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '30'}
now = timeutils.utcnow()
self._action_set_stubs(now, action_expected=False)
# Set data so rule evaluates to NORMAL state
last = now - datetime.timedelta(seconds=300)
data = WatchData(25, now - datetime.timedelta(seconds=150))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=[data],
stack_id=self.stack_id,
last_evaluated=last)
actions = self.wr.evaluate()
self.assertEqual('NORMAL', self.wr.state)
self.assertEqual([], actions)
self.m.VerifyAll()
def test_rule_actions_alarm_alarm(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'AlarmActions': ['DummyAction'],
'Period': '300',
'Statistic': 'Maximum',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '30'}
now = timeutils.utcnow()
self._action_set_stubs(now)
# Set data so rule evaluates to ALARM state
last = now - datetime.timedelta(seconds=300)
data = WatchData(35, now - datetime.timedelta(seconds=150))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=[data],
stack_id=self.stack_id,
last_evaluated=last)
actions = self.wr.evaluate()
self.assertEqual('ALARM', self.wr.state)
self.assertEqual(['DummyAction'], actions)
# re-set last_evaluated so the rule will be evaluated again.
last = now - datetime.timedelta(seconds=300)
self.wr.last_evaluated = last
actions = self.wr.evaluate()
self.assertEqual('ALARM', self.wr.state)
self.assertEqual(['DummyAction'], actions)
self.m.VerifyAll()
def test_rule_actions_alarm_two_actions(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'AlarmActions': ['DummyAction', 'AnotherDummyAction'],
'Period': '300',
'Statistic': 'Maximum',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '30'}
now = timeutils.utcnow()
self._action_set_stubs(now)
# Set data so rule evaluates to ALARM state
last = now - datetime.timedelta(seconds=300)
data = WatchData(35, now - datetime.timedelta(seconds=150))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=[data],
stack_id=self.stack_id,
last_evaluated=last)
actions = self.wr.evaluate()
self.assertEqual('ALARM', self.wr.state)
self.assertEqual(['DummyAction', 'DummyAction'], actions)
self.m.VerifyAll()
def test_rule_actions_ok_alarm(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'OKActions': ['DummyAction'],
'Period': '300',
'Statistic': 'Maximum',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '30'}
now = timeutils.utcnow()
self._action_set_stubs(now, action_expected=False)
# On creation the rule evaluates to NODATA state
last = now - datetime.timedelta(seconds=300)
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=[],
stack_id=self.stack_id,
last_evaluated=last)
actions = self.wr.evaluate()
self.assertEqual('NODATA', self.wr.state)
self.assertEqual([], actions)
# Move time forward and add data below threshold so we transition from
# ALARM -> NORMAL, so evaluate() should output a 'DummyAction'
now = now + datetime.timedelta(seconds=300)
self.m.VerifyAll()
self.m.UnsetStubs()
self._action_set_stubs(now)
data = WatchData(25, now - datetime.timedelta(seconds=150))
self.wr.watch_data = [data]
actions = self.wr.evaluate()
self.assertEqual('NORMAL', self.wr.state)
self.assertEqual(['DummyAction'], actions)
self.m.VerifyAll()
def test_rule_actions_nodata(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'InsufficientDataActions': ['DummyAction'],
'Period': '300',
'Statistic': 'Maximum',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '30'}
now = timeutils.utcnow()
self._action_set_stubs(now, action_expected=False)
# Set data so rule evaluates to ALARM state
last = now - datetime.timedelta(seconds=300)
data = WatchData(35, now - datetime.timedelta(seconds=150))
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=[data],
stack_id=self.stack_id,
last_evaluated=last)
actions = self.wr.evaluate()
self.assertEqual('ALARM', self.wr.state)
self.assertEqual([], actions)
# Move time forward and don't add data so we transition from
# ALARM -> NODATA, so evaluate() should output a 'DummyAction'
now = now + datetime.timedelta(seconds=300)
self.m.VerifyAll()
self.m.UnsetStubs()
self._action_set_stubs(now)
actions = self.wr.evaluate()
self.assertEqual('NODATA', self.wr.state)
self.assertEqual(['DummyAction'], actions)
self.m.VerifyAll()
def test_create_watch_data(self):
rule = {u'EvaluationPeriods': u'1',
u'AlarmDescription': u'test alarm',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'MetricName': u'CreateDataMetric'}
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name='create_data_test',
stack_id=self.stack_id, rule=rule)
self.wr.store()
data = {u'CreateDataMetric': {"Unit": "Counter",
"Value": "1",
"Dimensions": []}}
self.wr.create_watch_data(data)
obj_wr = watch_rule.WatchRule.get_by_name(self.ctx, 'create_data_test')
obj_wds = [wd for wd in obj_wr.watch_data]
self.assertEqual(data, obj_wds[0].data)
# Note, would be good to write another datapoint and check it
# but sqlite seems to not interpret the backreference correctly
# so dbwr.watch_data is always a list containing only the latest
# datapoint. In non-test use on mysql this is not the case, we
# correctly get a list of all datapoints where watch_rule_id ==
# watch_rule.id, so leave it as a single-datapoint test for now.
def test_create_watch_data_suspended(self):
rule = {u'EvaluationPeriods': u'1',
u'AlarmDescription': u'test alarm',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'MetricName': u'CreateDataMetric'}
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name='create_data_test',
stack_id=self.stack_id, rule=rule,
state=watchrule.WatchRule.SUSPENDED)
self.wr.store()
data = {u'CreateDataMetric': {"Unit": "Counter",
"Value": "1",
"Dimensions": []}}
self.wr.create_watch_data(data)
obj_wr = watch_rule.WatchRule.get_by_name(self.ctx, 'create_data_test')
obj_wds = [wd for wd in obj_wr.watch_data]
self.assertEqual([], obj_wds)
def test_create_watch_data_match(self):
rule = {u'EvaluationPeriods': u'1',
u'AlarmDescription': u'test alarm',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'Dimensions': [{u'Name': 'AutoScalingGroupName',
u'Value': 'group_x'}],
u'MetricName': u'CreateDataMetric'}
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name='create_data_test',
stack_id=self.stack_id, rule=rule)
self.wr.store()
data = {u'CreateDataMetric': {"Unit": "Counter",
"Value": "1",
"Dimensions": [{u'AutoScalingGroupName':
u'group_x'}]}}
self.assertTrue(watchrule.rule_can_use_sample(self.wr, data))
def test_create_watch_data_match_2(self):
rule = {u'EvaluationPeriods': u'1',
u'AlarmDescription': u'test alarm',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'Dimensions': [{u'Name': 'AutoScalingGroupName',
u'Value': 'group_x'}],
u'MetricName': u'CreateDataMetric'}
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name='create_data_test',
stack_id=self.stack_id, rule=rule)
self.wr.store()
data = {u'not_interesting': {"Unit": "Counter",
"Value": "1",
"Dimensions": [
{u'AutoScalingGroupName':
u'group_x'}]},
u'CreateDataMetric': {"Unit": "Counter",
"Value": "1",
"Dimensions": [
{u'AutoScalingGroupName':
u'group_x'}]}}
self.assertTrue(watchrule.rule_can_use_sample(self.wr, data))
def test_create_watch_data_match_3(self):
rule = {u'EvaluationPeriods': u'1',
u'AlarmDescription': u'test alarm',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'Dimensions': [{u'Name': 'AutoScalingGroupName',
u'Value': 'group_x'}],
u'MetricName': u'CreateDataMetric'}
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name='create_data_test',
stack_id=self.stack_id, rule=rule)
self.wr.store()
data = {u'CreateDataMetric': {"Unit": "Counter",
"Value": "1",
"Dimensions": [
{u'AutoScalingGroupName':
u'not_this'}]},
u'CreateDataMetric': {"Unit": "Counter",
"Value": "1",
"Dimensions": [
{u'AutoScalingGroupName':
u'group_x'}]}}
self.assertTrue(watchrule.rule_can_use_sample(self.wr, data))
def test_create_watch_data_not_match_metric(self):
rule = {u'EvaluationPeriods': u'1',
u'AlarmDescription': u'test alarm',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'Dimensions': [{u'Name': 'AutoScalingGroupName',
u'Value': 'group_x'}],
u'MetricName': u'CreateDataMetric'}
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name='create_data_test',
stack_id=self.stack_id, rule=rule)
self.wr.store()
data = {u'not_this': {"Unit": "Counter",
"Value": "1",
"Dimensions": [
{u'AutoScalingGroupName':
u'group_x'}]},
u'nor_this': {"Unit": "Counter",
"Value": "1",
"Dimensions": [
{u'AutoScalingGroupName':
u'group_x'}]}}
self.assertFalse(watchrule.rule_can_use_sample(self.wr, data))
def test_create_watch_data_not_match_dimensions(self):
rule = {u'EvaluationPeriods': u'1',
u'AlarmDescription': u'test alarm',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'Dimensions': [{u'Name': 'AutoScalingGroupName',
u'Value': 'group_x'}],
u'MetricName': u'CreateDataMetric'}
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name='create_data_test',
stack_id=self.stack_id, rule=rule)
self.wr.store()
data = {u'CreateDataMetric': {"Unit": "Counter",
"Value": "1",
"Dimensions": [
{u'AutoScalingGroupName':
u'not_this'}]},
u'CreateDataMetric': {"Unit": "Counter",
"Value": "1",
"Dimensions": [
{u'wrong_key':
u'group_x'}]}}
self.assertFalse(watchrule.rule_can_use_sample(self.wr, data))
def test_destroy(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'AlarmActions': ['DummyAction'],
'Period': '300',
'Statistic': 'Maximum',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '30'}
last = timeutils.utcnow()
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch_destroy",
rule=rule,
watch_data=[],
stack_id=self.stack_id,
last_evaluated=last)
self.wr.store()
check = watchrule.WatchRule.load(context=self.ctx,
watch_name="testwatch_destroy")
self.assertIsInstance(check, watchrule.WatchRule)
self.wr.destroy()
self.assertRaises(exception.WatchRuleNotFound,
watchrule.WatchRule.load, context=self.ctx,
watch_name="testwatch_destroy")
def test_state_set(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'AlarmActions': ['DummyAction'],
'Period': '300',
'Statistic': 'Maximum',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '30'}
last = timeutils.utcnow()
watcher = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch_set_state",
rule=rule,
watch_data=[],
stack_id=self.stack_id,
last_evaluated=last)
watcher.state_set(watcher.SUSPENDED)
self.assertEqual(watcher.SUSPENDED, watcher.state)
check = watchrule.WatchRule.load(context=self.ctx,
watch_name="testwatch_set_state")
self.assertEqual(watchrule.WatchRule.SUSPENDED, check.state)
def test_set_watch_state(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'AlarmActions': ['DummyAction'],
'Period': '300',
'Statistic': 'Maximum',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '30'}
now = timeutils.utcnow()
self._action_set_stubs(now)
# Set data so rule evaluates to ALARM state
last = now - datetime.timedelta(seconds=200)
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=[],
stack_id=self.stack_id,
last_evaluated=last)
actions = self.wr.set_watch_state(watchrule.WatchRule.NODATA)
self.assertEqual([], actions)
actions = self.wr.set_watch_state(watchrule.WatchRule.NORMAL)
self.assertEqual([], actions)
actions = self.wr.set_watch_state(watchrule.WatchRule.ALARM)
self.assertEqual(['DummyAction'], actions)
self.m.VerifyAll()
def test_set_watch_state_invalid(self):
rule = {'EvaluationPeriods': '1',
'MetricName': 'test_metric',
'AlarmActions': ['DummyAction'],
'Period': '300',
'Statistic': 'Maximum',
'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
'Threshold': '30'}
now = timeutils.utcnow()
last = now - datetime.timedelta(seconds=200)
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name="testwatch",
rule=rule,
watch_data=[],
stack_id=self.stack_id,
last_evaluated=last)
self.assertRaises(ValueError, self.wr.set_watch_state, None)
self.assertRaises(ValueError, self.wr.set_watch_state, "BADSTATE")
| apache-2.0 |
v2-labs/OSX-ShaderMan | prototype.py | 4 | 54765 | """ Main code for ShaderMan.Next. Still called prototype.py for reasons I don't understand by myself :) """
import os
import glob
import sys
import inspect
import logging
curpath = os.path.dirname(inspect.getfile(sys._getframe(0)))
if curpath=="":
curpath=os.getcwd()
sys.path.append(curpath)
import wx
from wx import glcanvas
productname = "ShaderMan.Next"
try:
from OpenGL.GL import *
except ImportError:
print "The OpenGL extensions do not appear to be installed."
print "This application cannot run."
sys.exit(1)
from core import node # non-visual elements of DAG
from core.panel import * # visual representations of DAG elements and connections
from core.utils import * # some util functions I'm using
from core.shared import * # global variables
#from core.properties import PropertiesFrame # window being used to edit properties of nodes; will be replaced with prefs_window.py when ready
import core.prefs_window as prefs
from core.prefs_window import PropertiesFrame
from core.edit_window import EditDialog # we're using pretty simple edit window to edit the source code for nodes
from core.node_draw import InitNodeDraw # initialization of node paint code - we need to load the settings for colors, fonts etc
import wx.py.shell
class ShellFrame(wx.MiniFrame):
def __init__(self, parent, title, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE):
wx.MiniFrame.__init__(self, parent, -1, title, pos, size, style)
parent.shell = wx.py.shell.Shell(self, -1)
wx.EVT_CLOSE(self, self.onCloseShell)
def onCloseShell(self, evt):
self.Show(False)
class NodeCanvasBase(glcanvas.GLCanvas):
def __init__(self, parent):
glcanvas.GLCanvas.__init__(self, parent, -1)
self.init = False
self.parent = parent
# initial mouse position
self.lastx = self.x = self.lasty = self.y = 30
self.size = None
self.selected = None
self.selection = None
self.temparrow = None
self.insidePreview = False
self.SetSize((200, 200))
self.markedPanels = []
self.menuPanel = None
self.panx = self.pany = 0
self.zoom = 1
self.mx = self.my = self.mlastx = self.mlasty = 0 # for panning the canvas
msz = wx.Display(0).GetGeometry()[2:]
self.maxsize = (-msz[0], -msz[1], msz[0]*2, msz[1]*2)
#print self.maxsize
self.tax = self.tay = self.tax2 = self.tay2 = 0
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseDown)
self.Bind(wx.EVT_LEFT_UP, self.OnMouseUp)
self.Bind(wx.EVT_MOTION, self.OnMouseMotion)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel) # Zoom is partially implemented, but I don't like it.
self.Bind(wx.EVT_MIDDLE_DOWN, self.OnMiddleMouseDown)
self.Bind(wx.EVT_MIDDLE_UP, self.OnMiddleMouseUp)
self.popupID1 = wx.NewId()
self.popupID2 = wx.NewId()
self.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
def CorrectPosition(self, event): # return the event.GetPosition corrected by panx, pany
x, y = event.GetPosition()
return (x+self.panx, y+self.pany)
def OnMiddleMouseDown(self, event):
self.CaptureMouse()
self.mx, self.my = self.mlastx, self.mlasty = event.GetPosition()
def OnMiddleMouseUp(self, event):
if self.HasCapture():
self.ReleaseMouse()
self.Refresh(False)
def OnMouseWheel(self, event):
zoomFactor = event.GetWheelRotation() / event.GetWheelDelta()
if zoomFactor > 0:
self.doZoom(1.0/zoomFactor)
elif zoomFactor < 0:
self.doZoom(zoomFactor)
def doZoom(self, factor): # TODO check with users if they actually like this implementation...
#self.zoom += factor*0.05
z = float(settings.get('fontsize', 10))
z += factor
z = min(20.0, max(4.0, z))
#print z
settings['fontsize'] = str(z)
InitNodeDraw()
for obj in panels+arrows:
obj.refreshFont()
self.Refresh(True)
#event.Skip()
#self.InitGL()
#self.Refresh(False)
def OnEraseBackground(self, event):
pass # Do nothing, to avoid flashing on Windows
def OnSize(self, event):
self.size = self.GetClientSize()
if self.GetContext():
try:
self.SetCurrent()
except:
pass
if self.size.width>0: # <0 happened on Mac sometimes
try:
glViewport(0, 0, self.size.width, self.size.height)
self.InitGL()
except:
pass
self.Refresh(True)
self.Update()
event.Skip()
def OnPaint(self, event):
dc = wx.ClientDC(self)
self.SetCurrent()
if not self.init:
self.InitGL()
self.init = True
self.OnDraw()
event.Skip()
def OnContextMenu(self, event):
# connection menu
self.popupID1 = wx.NewId()
self.popupID2 = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnMenuDeleteConnection, id=self.popupID1)
# panel menu
self.popupID3 = wx.NewId()
self.popupID4 = wx.NewId()
self.popupID5 = wx.NewId()
self.popupID6 = wx.NewId()
self.popupID7 = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnMenuSwitchParameters, id=self.popupID3)
self.Bind(wx.EVT_MENU, self.OnMenuSwitchIcon, id=self.popupID5)
self.Bind(wx.EVT_MENU, self.OnMenuEditCode, id=self.popupID6)
self.Bind(wx.EVT_MENU, self.OnMenuCreateGroup, id=self.popupID7)
# group menu
self.popupID8 = wx.NewId()
self.popupID9 = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnMenuSwitchGroupParameters, id=self.popupID8)
self.Bind(wx.EVT_MENU, self.OnMenuSwitchGroupExpanded, id=self.popupID9)
# different menus for different places to click
pos = self.CorrectPosition(event)
pos = self.ScreenToClient(pos)
x, y = pos
self.menuConnection = None
self.menuPanel = None
self.menuGroup = None
menuType = 0 # 0=insert brick; 1=group operations; 2=panel operations; 3=connection operations
for g in groups:
if g.insideHeader(x, y):
self.menuGroup = g
menuType = 1
for e in panels:
if e.inside(x, y):
self.menuPanel = e
menuType = 2
# There can be multiple outputs from the single out cell, so we don't support it now. Definitely will in the future, so TODO.
#outtest = node_draw.IsArrowStart(e.node, wx.ClientDC(self), x, y)
#if -1 != outtest:
#pname = e.node.out_params[outtest-1]["name"]
#connection = e.node.out_connections.get(pname, None)
#if (connection != None): # there's connection
#self.menuConnection = connection[0]
intest = node_draw.IsArrowEnd(e.node, wx.ClientDC(self), x, y)
if intest>0: # it's -1 if there's nothing or 0 if it's header - but we don't need header workaround here
pname = e.node.in_params[intest-1]["name"]
connection = e.node.in_connections.get(pname, None)
if (connection != None): # there's connection
self.menuConnection = connection # we somehow need to pass the information on the selected connection outside...
menuType = 3
menu = wx.Menu()
if menuType == 3:
item = wx.MenuItem(menu, self.popupID1, "Delete connection")
menu.AppendItem(item)
item.Enable(True)
menu.AppendSeparator()
item = wx.MenuItem(menu, self.popupID2, "Empty")
menu.AppendItem(item)
item.Enable(False)
if menuType == 2:
if self.menuPanel != None: # we've got the panel selected, so we show it's menu
item = wx.MenuItem(menu, -1, self.menuPanel.node.name)
menu.AppendItem(item)
item.Enable(False)
menu.AppendSeparator()
item = wx.MenuItem(menu, self.popupID3, "Show parameters", kind=wx.ITEM_CHECK)
menu.AppendItem(item)
item.Check(self.menuPanel.showParameters)
item.Enable(True)
item = wx.MenuItem(menu, self.popupID4, "Show preview", kind=wx.ITEM_CHECK)
menu.AppendItem(item)
item.Enable(False) # TODO implement preview
menu.AppendSeparator()
item = wx.MenuItem(menu, self.popupID5, "Iconic mode", kind=wx.ITEM_CHECK)
menu.AppendItem(item)
item.Check(self.menuPanel.iconicMode)
item.Enable(self.menuPanel.node.icon != "")
menu.AppendSeparator()
item = wx.MenuItem(menu, self.popupID7, "Create group")
menu.AppendItem(item)
menu.AppendSeparator()
item = wx.MenuItem(menu, self.popupID6, "Edit code")
menu.AppendItem(item)
if menuType == 1:
if self.menuGroup != None:
item = wx.MenuItem(menu, -1, "group%s" % self.menuGroup.id)
menu.AppendItem(item)
item.Enable(False)
menu.AppendSeparator()
item = wx.MenuItem(menu, self.popupID8, "Show parameters", kind=wx.ITEM_CHECK)
menu.AppendItem(item)
item.Check(self.menuGroup.showParameters)
item.Enable(True)
item = wx.MenuItem(menu, self.popupID9, "Show expanded", kind=wx.ITEM_CHECK)
menu.AppendItem(item)
item.Check(self.menuGroup.expanded)
item.Enable(True)
if menuType == 0:
menu = self.parent.brickMenu
self.PopupMenu(menu)
if menu is not self.parent.brickMenu:
menu.Destroy()
self.Refresh(True)
def OnMenuSwitchGroupParameters(self, event):
if self.menuGroup != None:
self.menuGroup.showParameters = event.Checked()
self.Refresh(True)
def OnMenuSwitchGroupExpanded(self, event):
if self.menuGroup != None:
self.menuGroup.SwitchExpanded(event.Checked())
#self.menuGroup.expanded = event.Checked()
#self.menuGroup.calcXY()
#self.menuGroup.updatePanels()
self.Refresh(True)
def OnMenuSwitchIcon(self, event):
if self.menuPanel != None:
ar = self.markedPanels
if self.menuPanel not in ar:
ar.append(self.menuPanel)
for p in ar:
if p.node.icon != "":
p.iconicMode = event.Checked()
p.refreshFont()
for c in p.node.in_connections.itervalues():
c.arrow.refreshFont()
for c in p.node.out_connections.itervalues():
for c2 in c:
c2.arrow.refreshFont()
self.Refresh(True)
del ar
def OnMenuSwitchParameters(self, event):
if self.menuPanel != None:
ar = self.markedPanels
if self.menuPanel not in ar:
ar.append(self.menuPanel)
for p in ar:
p.showParameters = event.Checked()
p.refreshFont()
for c in p.node.in_connections.itervalues():
c.arrow.refreshFont()
for c in p.node.out_connections.itervalues():
for c2 in c:
c2.arrow.refreshFont()
self.Refresh(True)
del ar
def OnMenuCreateGroup(self, event):
if self.menuPanel != None:
group = Group(self.parent)
ar = self.markedPanels
if self.menuPanel not in ar:
ar.append(self.menuPanel)
for p in ar:
group.AddPanel(p)
groups.append(group)
self.Refresh(True)
del ar
def OnMenuEditCode(self, event):
if self.menuPanel != None:
editCode = True
dlg = EditDialog(self, "Edit code for %s" % self.menuPanel.node.name)
c = self.menuPanel.node.code
pc = self.menuPanel.node.precode
if c != "":
ac = c
else:
ac = pc
editCode = False
dlg.SetValue(ac)
if dlg.ShowModal() == wx.ID_OK:
if editCode:
self.menuPanel.node.code = dlg.GetValue()
else:
self.menuPanel.node.precode = dlg.GetValue()
del dlg
if self.CUpdateMenuItem.IsChecked():
wx.PostEvent(self.parent, wx.CommandEvent(wx.wxEVT_COMMAND_MENU_SELECTED, frm.ID_ACTION))
def ActuallyDeleteConnection(self, connection):
if connection != None:
if connection.outputNode != None:
del connection.outputNode.in_connections[connection.outputName]
if connection.inputNode != None:
arr = connection.inputNode.out_connections[connection.inputName]
SafelyDelete(arr, connection)
a = None
for b in arrows:
if b.connection == connection:
a = b
if a != None:
SafelyDelete(arrows, a)
del a
SafelyDelete(connections, connection)
del connection
if self.CUpdateMenuItem.IsChecked():
wx.PostEvent(self.parent, wx.CommandEvent(wx.wxEVT_COMMAND_MENU_SELECTED, frm.ID_ACTION))
def OnMenuDeleteConnection(self, event):
self.ActuallyDeleteConnection(self.menuConnection)
#def OnMenuDoNothing(self, event):
#pass
def OnLeftDClick(self, event):
x, y = self.CorrectPosition(event)#event.GetPosition()
for p in panels:
if p.inside(x, y):
node = p.node
self.parent.pform.AssignNode(node)
self.parent.pform.Show(True)
self.parent.pform.Raise()
for g in groups:
if g.insideHeader(x, y):
g.SwitchExpanded(not g.expanded)
def CoordinateInsidePreview(self, x, y):
offx = self.size.width+self.panx
offy = self.size.height+self.pany
xw = self.maxsize[2]-self.maxsize[0];
yw = self.maxsize[3]-self.maxsize[1];
xw23 = xw*2/3
yw23 = yw*2/3
x1 = offx+(self.maxsize[0]-xw23)/30
x2 = offx+(self.maxsize[2]-xw23)/30
y1 = offy+(self.maxsize[1]-yw23)/30
y2 = offy+(self.maxsize[3]-yw23)/30
return (x in range(x1, x2)) and ((y in range(y1, y2))) # BTW, this only works with integers :)
def OnMouseDown(self, event):
self.CaptureMouse()
self.x, self.y = self.lastx, self.lasty = self.CorrectPosition(event)#event.GetPosition()
self.mx, self.my = event.GetPosition()
self.insidePreview = self.CoordinateInsidePreview(self.x, self.y)
if self.insidePreview: # code duplication alert :) see the OnMouseMove
offx = self.size.width+self.panx
offy = self.size.height+self.pany
xw23 = (self.maxsize[2]-self.maxsize[0])*2/3
yw23 = (self.maxsize[3]-self.maxsize[1])*2/3
dx, dy = self.CorrectPosition(event)
dx = (dx - offx)*30+xw23
dy = (dy - offy)*30+yw23
self.panx = dx
self.pany = dy
self.InitGL()
self.Refresh(False)
return
self.selected = None
self.selection = None
self.temparrow = None
for g in groups:
if g.insideHeader(self.x, self.y):
self.selected = g
dx = self.x - g.getLeft()
dy = self.y - g.getTop()
self.selected.delta = ((dx, dy))
if g not in self.markedPanels:
del self.markedPanels[:]
self.markedPanels.append(g)
return
for e in panels:
if e.inside(self.x, self.y):
if -1 == node_draw.IsArrowStart(e.node, wx.ClientDC(self), self.x, self.y):
self.selected = e
dx = self.x - e.x
dy = self.y - e.y
self.selected.delta = ((dx, dy))
if e not in self.markedPanels:
del self.markedPanels[:]
self.markedPanels.append(e)
self.parent.pform.AssignNode(e.node)
if self.parent.pform.IsShown():
self.parent.pform.Raise()
else:
self.temparrow = Arrow(self)
self.tax = self.tax2 = self.x
self.tay = self.tay2 = self.y
self.startpanel = e
if (self.selected == None) and (self.temparrow == None):
self.selection = (self.x, self.y, self.x, self.y)
self.Refresh(False)
hatetheglobals = -1
hatemenu = None
def ManuallyAskCallback(self, event):
node = self.stoppanel.node
count = 1
global hatetheglobals
menuItem = hatemenu.FindItemById(event.GetId())
for inp in node.in_params:
pname = str(inp["name"])
if pname == menuItem.GetLabel():
hatetheglobals = count
return
count += 1
def ManuallyAskForInput(self, node):
menu = wx.Menu()
global hatemenu
hatemenu = menu
item = wx.MenuItem(menu, -1, "Connect to:")
menu.AppendItem(item)
item.Enable(False)
menu.AppendSeparator()
for inp in node.in_params:
pname = str(inp["name"])
nid = wx.NewId()
item = wx.MenuItem(menu, nid, pname)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.ManuallyAskCallback, id=nid)
global hatetheglobals
hatetheglobals = -1
self.PopupMenu(menu)
menu.Destroy()
return hatetheglobals
def OnMouseUp(self, event):
self.insidePreview = False
DropSuccessful = False
if self.temparrow != None:
for e in panels:
if e.inside(self.x, self.y):
stop = node_draw.IsArrowEnd(e.node, wx.ClientDC(self), self.x, self.y)
if -1 != stop:
self.stoppanel = e
if stop == 0: # ask for the connection using popupMenu and some ugly callback hack...
stop = self.ManuallyAskForInput(e.node)
if -1 == stop:
continue
start = node_draw.IsArrowStart(self.startpanel.node, wx.ClientDC(self), self.tax, self.tay)
conn = node.Connection(-1)
connections.append(conn)
conn.assignInput(self.startpanel.node, self.startpanel.node.out_params[start-1]["name"])
prev = self.stoppanel.node.in_connections.get(self.stoppanel.node.in_params[stop-1]["name"], None)
if prev != None:
self.ActuallyDeleteConnection(prev)
conn.assignOutput(self.stoppanel.node, self.stoppanel.node.in_params[stop-1]["name"])
arrows.append(self.temparrow)
self.temparrow.assignConnection(conn)
self.temparrow.refreshFont()
DropSuccessful = True
self.temparrow = None
if self.CUpdateMenuItem.IsChecked():
wx.PostEvent(self.parent, wx.CommandEvent(wx.wxEVT_COMMAND_MENU_SELECTED, frm.ID_ACTION))
if not DropSuccessful:
self.temparrow = None
if self.HasCapture():
self.ReleaseMouse()
self.selected = None
if self.selection != None:
del self.markedPanels[:]
for e in panels+groups:
if (self.selection[0] < e.x) and (self.selection[2] > e.x+e.width) and (self.selection[1] < e.y) and (self.selection[3] > e.y+e.height):
if e.visible:
self.markedPanels.append(e)
self.selection = None
self.Refresh(False)
def OnMouseMotion(self, event):
if event.Dragging() and (event.MiddleIsDown() or (event.LeftIsDown() and event.AltDown())):
self.mlastx, self.mlasty = self.mx, self.my
self.mx, self.my = event.GetPosition()
self.panx += self.mlastx-self.mx
self.pany += self.mlasty-self.my
self.InitGL()
self.Refresh(False)
return
if event.Dragging() and event.LeftIsDown():
self.lastx, self.lasty = self.x, self.y
self.x, self.y = self.CorrectPosition(event)#event.GetPosition()
if self.insidePreview:
offx = self.size.width+self.panx
offy = self.size.height+self.pany
xw23 = (self.maxsize[2]-self.maxsize[0])*2/3
yw23 = (self.maxsize[3]-self.maxsize[1])*2/3
dx, dy = self.CorrectPosition(event)
dx = (dx - offx)*30+xw23
dy = (dy - offy)*30+yw23
self.panx = dx
self.pany = dy
self.InitGL()
self.Refresh(False)
return
if self.selected != None:
if isinstance(self.selected, NodePanel):
self.selected.x = self.x-self.selected.delta[0]
self.selected.y = self.y-self.selected.delta[1]
if self.selected in self.markedPanels: # move all the other panels as well
for p in self.markedPanels:
if p != self.selected:
p.x += (self.selected.x-self.lastx+self.selected.delta[0])
p.y += (self.selected.y-self.lasty+self.selected.delta[1])
if isinstance(self.selected, Group):
x = self.x-self.selected.delta[0]
y = self.y-self.selected.delta[1]
for p in self.selected.panels:
p.x += (x-self.lastx+self.selected.delta[0])
p.y += (y-self.lasty+self.selected.delta[1])
self.selected.x += (x-self.lastx+self.selected.delta[0])
self.selected.y += (y-self.lasty+self.selected.delta[1])
if not self.selected.expanded:
if self.selected in self.markedPanels: # move all the other panels as well
for p in self.markedPanels:
if p != self.selected:
if not p in self.selected.panels: # because we moved'em already
p.x += (x-self.lastx+self.selected.delta[0])
p.y += (y-self.lasty+self.selected.delta[1])
if isinstance(p, Group):
for pp in p.panels:
pp.x += (x-self.lastx+self.selected.delta[0])
pp.y += (y-self.lasty+self.selected.delta[1])
self.Refresh(False)
else:
if self.selection != None:
self.selection = (self.selection[0], self.selection[1], self.x, self.y)
self.Refresh(False)
else:
if self.temparrow != None:
self.tax2 = self.x
self.tay2 = self.y
self.Refresh(False)
class NodeCanvas(NodeCanvasBase):
def InitGL(self):
glDisable( GL_DEPTH_TEST )
glDisable( GL_LIGHTING )
glMatrixMode( GL_PROJECTION )
glLoadIdentity()
#glOrtho( 0.0, self.size.width, self.size.height, 0.0, -1.0, 1.0 )
glOrtho( 0.0+self.panx, self.size.width+self.panx, self.size.height+self.pany, 0.0+self.pany, -1.0, 1.0 )
#print self.zoom
glScalef( self.zoom, self.zoom, 1 )
glMatrixMode( GL_MODELVIEW )
glLoadIdentity()
def OnDraw(self):
if wx.Platform == "__WXMAC__":
bgColor = wx.Color(240, 240, 240)
else:
bgColor = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW)
glClearColor( bgColor.Red()/255.0, bgColor.Green()/255.0, bgColor.Blue()/255.0, 1.0 )
glClear( GL_COLOR_BUFFER_BIT ) # | GL_DEPTH_BUFFER_BIT ) ?
#glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
glEnable(GL_LINE_SMOOTH)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
#glEnable(GL_POINT_SMOOTH)
#glHint(GL_POINT_SMOOTH_HINT, GL_NICEST)
glLineWidth(1.2)
glPointSize(1)
for a in arrows:
a.paint()
glEnable(GL_TEXTURE_2D)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
for p in panels:
p.paint()
glDisable(GL_TEXTURE_2D)
for g in groups:
g.paint()
if self.temparrow != None:
self.temparrow.paint()
if self.selection != None:
glColor4f(0.4, 0.4, 0.4, 0.3) # grey
glBegin(GL_POLYGON)
glVertex2i( self.selection[0], self.selection[1] )
glVertex2i( self.selection[0], self.selection[3] )
glVertex2i( self.selection[2], self.selection[3] )
glVertex2i( self.selection[2], self.selection[1] )
glEnd()
glColor4f(0.4, 0.4, 0.4, 1.0)
# small preview of bricks
offx = self.size.width+self.panx
offy = self.size.height+self.pany
xw = self.maxsize[2]-self.maxsize[0];
yw = self.maxsize[3]-self.maxsize[1];
xw23 = xw*2/3
yw23 = yw*2/3
glColor4f(0.4, 0.4, 0.4, 0.2) # total view
glBegin(GL_POLYGON)
glVertex2i( offx+(self.maxsize[0]-xw23)/30, offy+(self.maxsize[1]-yw23)/30 )
glVertex2i( offx+(self.maxsize[0]-xw23)/30, offy+(self.maxsize[3]-yw23)/30 )
glVertex2i( offx+(self.maxsize[2]-xw23)/30, offy+(self.maxsize[3]-yw23)/30 )
glVertex2i( offx+(self.maxsize[2]-xw23)/30, offy+(self.maxsize[1]-yw23)/30 )
glEnd()
glColor4f(0.4, 0.4, 0.4, 0.4) # current view
glBegin(GL_POLYGON)
glVertex2i( offx+(self.panx-xw23)/30, offy+(self.pany-yw23)/30 )
glVertex2i( offx+(self.panx-xw23)/30, offy+(offy-yw23)/30 )
glVertex2i( offx+(offx-xw23)/30, offy+(offy-yw23)/30 )
glVertex2i( offx+(offx-xw23)/30, offy+(self.pany-yw23)/30 )
glEnd()
glColor4f(0.4, 0.4, 0.4, 0.8) # bricks
for p in panels:
glBegin(GL_POLYGON)
glVertex2i( offx+(p.x-xw23)/30, offy+(p.y-yw23)/30 )
glVertex2i( offx+(p.x-xw23)/30, offy+(p.y+p.height-yw23)/30 )
glVertex2i( offx+(p.x+p.width-xw23)/30, offy+(p.y+p.height-yw23)/30 )
glVertex2i( offx+(p.x+p.width-xw23)/30, offy+(p.y-yw23)/30 )
glEnd()
glColor4f(0.4, 0.4, 0.4, 1.0)
self.SwapBuffers()
class CanvasDropTarget(wx.PyDropTarget):
def __init__(self, window):
wx.PyDropTarget.__init__(self)
self.window = window
self.df = wx.CustomDataFormat("CanvasDropTarget")
self.data = wx.CustomDataObject(self.df)
self.SetDataObject(self.data)
def OnEnter(self, x, y, d):
return d
def OnLeave(self):
pass
def OnDrop(self, x, y):
return True
def OnDragOver(self, x, y, d):
return d
def OnData(self, x, y, d):
if self.GetData():
data = self.data.GetData()
if data == "wxTreeCtrl":
win = self.window.OnTreeLeftDClick(None)
if win != None:
win.x = x+self.window.c.panx
win.y = y+self.window.c.pany
return d
class MainFrame(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title)
self.scenename = None
img = wx.Image(os.path.join(curpath, "core/icon.png"))
self.SetIcon(self.MakeIcon(img))
self.factory = node.Factory() # refactor to scene settings
self.factory.setName("test123") # should be the scene settings?
self.dividerPanel = wx.Panel(self, -1, style=wx.BORDER_NONE)
self.dividerPanel.SetSizeHints(6, -1, 6, -1) #minw, minh, maxw, maxh
if wx.Platform == "__WXMAC__": # should be adjustable from the settings...
bgColor = wx.Color(240, 240, 240)
else:
bgColor = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW)
lhsv = node_draw.RGBtoHSV(bgColor.Get())
lhsv[2] -= 20
lrgb = node_draw.HSVtoRGB(lhsv)
lcolor = wx.Colour(lrgb[0], lrgb[1], lrgb[2])
self.dividerPanel.SetBackgroundColour(lcolor)
self.c = NodeCanvas(self)
self.tree = wx.TreeCtrl(self, -1, style = wx.TR_HAS_BUTTONS | wx.TR_HIDE_ROOT | wx.TR_NO_LINES | wx.TR_FULL_ROW_HIGHLIGHT | wx.BORDER_NONE)
self.tree.SetDimensions(0, 0, 160, -1)
self.tree.SetSizeHints(160, -1, -1, -1) #minw, minh, maxw, maxh
isz = (16,16)
il = wx.ImageList(isz[0], isz[1])
self.fldridx = il.Add(wx.ArtProvider_GetBitmap(wx.ART_FOLDER, wx.ART_OTHER, isz))
self.fldropenidx = il.Add(wx.ArtProvider_GetBitmap(wx.ART_FILE_OPEN, wx.ART_OTHER, isz))
self.fileidx = il.Add(wx.ArtProvider_GetBitmap(wx.ART_NORMAL_FILE, wx.ART_OTHER, isz))
self.tree.SetImageList(il)
self.il = il
#self.tree.SetBackgroundColour(bgColor)
#self.tree.Refresh(True)
self.tree.Bind(wx.EVT_LEFT_DCLICK, self.OnTreeLeftDClick)
#self.ID_MINIM = wx.NewId()
self.ID_ACTION = wx.NewId()
self.ID_VIEWCODE = wx.NewId()
self.ID_IMMEDIATEUPDATE = wx.NewId()
self.ID_MODEPREFERENCES = wx.NewId()
self.ID_LAYOUTNODES = wx.NewId()
self.ID_SHOWSHELL = wx.NewId()
self.modeMenus = {}
self.brickMenu = None
self.brickMenuArr = {}
#self.currentMode = "Renderman SL" # should load from the settings
#self._setMenu()
#self.ActuallySwitchMode()
self.currentMode = ""
self._setMenu()
aTable = wx.AcceleratorTable([
#(wx.ACCEL_NORMAL, ord('M'), self.ID_MINIM),
(wx.ACCEL_NORMAL, wx.WXK_F9, self.ID_ACTION),
(wx.ACCEL_NORMAL, wx.WXK_DELETE, wx.ID_DELETE),
(wx.ACCEL_CTRL, ord('S'), wx.ID_SAVE),
(wx.ACCEL_CTRL, ord('N'), wx.ID_NEW),
(wx.ACCEL_CTRL, ord('O'), wx.ID_OPEN),
(wx.ACCEL_CTRL, ord('A'), wx.ID_SELECTALL),
])
self.SetAcceleratorTable(aTable)
self.shell = ShellFrame(self, "ShaderMan Shell")
self.pform = PropertiesFrame(self)
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.tree, 0, wx.EXPAND)
self.sizer.Add(self.dividerPanel, 0, wx.EXPAND)
self.sizer.Add(self.c, 1, wx.EXPAND)
self.SetSizer(self.sizer)
self.sizer.Layout()
self.sx = 0
self.dividerPanel.Bind(wx.EVT_LEFT_DOWN, self.OnSMouseDown)
self.dividerPanel.Bind(wx.EVT_LEFT_UP, self.OnSMouseUp)
self.dividerPanel.Bind(wx.EVT_MOTION, self.OnSMouseMotion)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_SHOW, self.OnSize)
self.Bind(wx.EVT_MOVE, self.OnMove)
#self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnCleanup)
self.tree.Bind(wx.EVT_TREE_BEGIN_DRAG, self._startDrag)
dt = CanvasDropTarget(self)
self.c.SetDropTarget(dt)
def _startDrag(self, event):
button = event.GetEventObject()
item = event.GetItem()
tree = event.GetEventObject()
df = wx.CustomDataFormat("CanvasDropTarget")
ldata = wx.CustomDataObject(df)
ldata.SetData(str(button.GetName()))
dropSource = wx.DropSource(self)
dropSource.SetData(ldata)
result = dropSource.DoDragDrop(True)
def OnSMouseDown(self, evt):
self.dividerPanel.CaptureMouse()
self.sx, y = evt.GetPosition()
evt.Skip()
def OnSMouseUp(self, evt):
if self.dividerPanel.HasCapture():
self.dividerPanel.ReleaseMouse()
self.sx, y = evt.GetPosition()
evt.Skip()
def OnSMouseMotion(self, evt):
if evt.Dragging() and evt.LeftIsDown():
x, y = evt.GetPosition()
w, h = self.GetClientSizeTuple()
cw, ch = self.tree.GetClientSizeTuple()
if cw+x-self.sx>120:
self.tree.SetSizeHints(cw+x-self.sx, -1, -1, -1) #minw, minh, maxw, maxh
self.sizer.Layout()
evt.Skip()
def OnCleanup(self, event):
#if isinstance(self, MainFrame):
try:
del self.filehistory
except:
pass
def ActuallySwitchMode(self):
# check if menu is switched
for mid, mmenu in self.modeMenus.items():
if mmenu.GetLabel() == self.currentMode:
if not (mmenu.IsChecked()):
mmenu.Check(True)
root = opj('%s/modes/%s/nodes' % (curpath, self.currentMode))
if self.brickMenu != None:
del self.brickMenu
self.brickMenu = wx.Menu()
item = wx.MenuItem(self.brickMenu, -1, "Insert node:")
self.brickMenu.AppendItem(item)
item.Enable(False)
self.brickMenu.AppendSeparator()
self.tree.Freeze()
self.tree.DeleteAllItems()
self.brickMenuArr.clear()
self.treeids = {root : self.tree.AddRoot(root)}
self.root = self.treeids[root]
self.tree.SetPyData(self.root, root)
for (dirpath, dirnames, filenames) in os.walk(root):
if dirpath.find(".svn") == -1:
for dirname in dirnames:
if dirname.find(".svn") == -1:
fullpath = os.path.join(dirpath, dirname)
self.treeids[fullpath] = self.tree.AppendItem(self.treeids[dirpath], dirname)
self.tree.SetPyData(self.treeids[fullpath], fullpath)
self.tree.SetItemImage(self.treeids[fullpath], self.fldridx, wx.TreeItemIcon_Normal)
self.tree.SetItemImage(self.treeids[fullpath], self.fldropenidx, wx.TreeItemIcon_Selected)
#for filename in sorted(filenames): # removed for Python 2.3 compatibility
fmenu = wx.Menu()
for filename in filenames:
if filename.endswith(".br"):
i = self.tree.AppendItem(self.treeids[dirpath], filename.replace(".br", ""))
self.tree.SetPyData(i, "%s%s%s" % (dirpath, os.path.sep, filename))
self.tree.SetItemImage(i, self.fileidx, wx.TreeItemIcon_Normal)
self.tree.SetItemImage(i, self.fileidx, wx.TreeItemIcon_Selected)
nid = wx.NewId()
if dirpath==root:
self.brickMenu.Append(nid, filename.replace(".br", ""))
else:
fmenu.Append(nid, filename.replace(".br", ""))
self.brickMenuArr[nid] = os.path.join(dirpath, filename)
self.Bind(wx.EVT_MENU, self.OnNewBrickContextMenu, id=nid)
if dirpath!=root:
self.brickMenu.AppendMenu(-1, dirpath.replace(root+os.path.sep, ""), fmenu)
try:
self.tree.Expand(self.root)
except:
pass
self.tree.Thaw()
self.tree.Refresh(True)
def OnNewBrickContextMenu(self, event):
newname = self.brickMenuArr[event.GetId()]
node1 = node.Node(-1, newname, factory = self.factory)
nodes.append(node1)
pnl = NodePanel(self, x = 20 + self.c.panx, y = 20 + self.c.pany)
panels.append(pnl)
pnl.assignNode(node1)
self.c.Refresh(False)
#event.Skip()
def _setMenu(self):
self.mainmenu = wx.MenuBar()
menu1 = wx.Menu()
menu1.Append(wx.ID_NEW, "New")
menu1.AppendSeparator()
menu1.Append(wx.ID_OPEN, "Open...")
menu1.Append(wx.ID_SAVE, "Save")
menu1.Append(wx.ID_SAVEAS, "Save as...")
if wx.Platform != "__WXMAC__":
menu1.AppendSeparator()
menu1.Append(wx.ID_PREFERENCES, "Preferences...")
if wx.Platform != "__WXMAC__":
menu1.AppendSeparator()
menu1.Append(wx.ID_EXIT, "Exit")
self.mainmenu.Append(menu1, "&File")
self.filehistory = wx.FileHistory()
self.filehistory.UseMenu(menu1)
self.Bind( wx.EVT_MENU_RANGE, self.OnFileHistory, id=wx.ID_FILE1, id2=wx.ID_FILE9 )
menu3 = wx.Menu()
#menu3.Append(wx.ID_UNDO, "Undo")
#menu3.Append(wx.ID_REDO, "Redo")
#menu3.AppendSeparator()
#menu3.Append(wx.ID_CUT, "Cut")
#menu3.Append(wx.ID_COPY, "Copy")
#menu3.Append(wx.ID_PASTE, "Paste")
menu3.Append(wx.ID_DELETE, "Delete")
menu3.AppendSeparator()
menu3.Append(wx.ID_SELECTALL, "Select all")
menu3.AppendSeparator()
#menu3.Append(self.ID_MINIM, "Minimize current node")
#menu3.Append(wx.ID_SELECTALL, "Select all")
menu3.Append(self.ID_LAYOUTNODES, "Compact nodes layout")
self.mainmenu.Append(menu3, "&Edit")
menu4 = wx.Menu()
menu4.Append(self.ID_ACTION, "Render")
menu4.Append(self.ID_VIEWCODE, "View generated code")
menu4.AppendSeparator()
self.c.CUpdateMenuItem = wx.MenuItem(menu4, self.ID_IMMEDIATEUPDATE, "Continuous update", kind=wx.ITEM_CHECK)
menu4.AppendItem(self.c.CUpdateMenuItem)
menu4.Append(self.ID_SHOWSHELL, "Show Shell")
self.Bind(wx.EVT_MENU, self.OnShowShell, id=self.ID_SHOWSHELL)
self.mainmenu.Append(menu4, "&Action!")
# dynamic menu...
menud = wx.Menu()
#directories = filter(lambda x: os.path.isdir(x), sorted(glob.glob(opj('%s/modes/*' % os.getcwd()))))
# removed for Python 2.3 compatibility
directories = filter(lambda x: os.path.isdir(x), glob.glob(opj('%s/modes/*' % curpath)))
directories = map(lambda y: os.path.split(y)[1] , directories)
for d in directories:
nid = wx.NewId()
dd = d
item = wx.MenuItem(menud, nid, dd, kind = wx.ITEM_RADIO)
menud.AppendItem(item)
self.modeMenus[nid] = item
if dd == self.currentMode:
item.Check(True)
self.Bind(wx.EVT_MENU, self.OnSwitchMode, id=nid)
menud.AppendSeparator()
menud.Append(self.ID_MODEPREFERENCES, "Mode preferences...")
self.Bind(wx.EVT_MENU, self.OnModePreferences, id=self.ID_MODEPREFERENCES)
self.mainmenu.Append(menud, "&Mode")
menu2 = wx.Menu()
menu2.Append(wx.ID_HELP, "Help")
menu2.Append(wx.ID_ABOUT, "About...")
self.mainmenu.Append(menu2, "&Help")
self.SetMenuBar(self.mainmenu)
if wx.Platform == "__WXMAC__":
wx.App.SetMacAboutMenuItemId(wx.ID_ABOUT)
wx.App.SetMacPreferencesMenuItemId(wx.ID_PREFERENCES)
'''
add an handler for verify things on
menu items opening
'''
#if wx.Platform in ['__WXMSW__','__WXGTK__']:
#self.Bind( wx.EVT_MENU_OPEN, self.onMainMenuOpen )
#self.Bind( wx.EVT_MENU_CLOSE, self.onMainMenuClose )
# FILE MENU
self.Bind(wx.EVT_MENU, self.OnNewDocument, id=wx.ID_NEW)
self.Bind(wx.EVT_MENU, self.OnOpenDocument, id=wx.ID_OPEN)
self.Bind(wx.EVT_MENU, self.OnSaveDocument, id=wx.ID_SAVE)
self.Bind(wx.EVT_MENU, self.OnSaveAs, id=wx.ID_SAVEAS)
self.Bind(wx.EVT_MENU, self.OnExit, id=wx.ID_EXIT)
self.Bind(wx.EVT_MENU, self.OnAction, id=self.ID_ACTION)
self.Bind(wx.EVT_MENU, self.OnImmediateUpdate, id=self.ID_IMMEDIATEUPDATE)
self.Bind(wx.EVT_MENU, self.OnViewGeneratedCode, id=self.ID_VIEWCODE)
self.Bind(wx.EVT_MENU, self.OnPreferences, id=wx.ID_PREFERENCES)
self.Bind(wx.EVT_MENU, self.OnLayoutNodes, id=self.ID_LAYOUTNODES)
## EDIT MENU
#self.Bind(wx.EVT_MENU, self.OnUndo, id=wx.ID_UNDO)
#self.Bind(wx.EVT_MENU, self.OnRedo, id=wx.ID_REDO)
self.Bind(wx.EVT_MENU, self.OnDelete, id=wx.ID_DELETE)
self.Bind(wx.EVT_MENU, self.OnSelectAll, id=wx.ID_SELECTALL)
## HELP MENU
self.Bind(wx.EVT_MENU, self.OnAbout, id=wx.ID_ABOUT)
self.Bind(wx.EVT_MENU, self.OnHelp, id=wx.ID_HELP)
def OnShowShell(self, event):
self.shell.Show(True)
def OnLayoutNodes(self, event): # apparently, doesn't work as expected. But left here for [possible] experiments in the future.
import core.topo
#nodes = ['root', 'diff', 'spec', 'op', 'n1', 'n2']
#route = [('diff', 'root'), ('spec', 'root'), ('n1', 'op'), ('op','diff'), ('n2','root')]
#nodes = [ p.node.id for p in panels ]
#route = [ (a.connection.inputNode.id, a.connection.outputNode.id) for a in arrows ]
nodes = [ p.node for p in panels ]
route = [ (a.connection.inputNode, a.connection.outputNode) for a in arrows ]
#print nodes
#print route
sorted = core.topo.toposort(nodes, route)
#for row in sorted:
#print ">",
#for item in row:
#print item.id,
#print ""
MARGIN = 35
x = 0
for row in sorted:
maxwidth = max([item.panel.width for item in row])
for item in row:
item.panel.x = x
#item.panel.y = y
#y += item.panel.height + 20
x += maxwidth + MARGIN
self.c.Refresh()
event.Skip()
def OnPreferences(self, event):
if wx.Platform == "__WXMAC__":
deffontsize = 12
else:
deffontsize = 10
curfontsize = settings.get("fontsize", str(deffontsize))
self.preferences = [{'default': curfontsize, 'name': u'Font Size', 'type': u'string'}]
dlg = prefs.PropertiesFrame(None, self.preferences, title = "ShaderMan preferences")
dlg.Bind(wx.EVT_CLOSE, self.SavePreferences)
dlg.Show()
def SavePreferences(self, event):
for s in self.preferences:
if s["name"] == "Font Size":
settings['fontsize'] = s["default"]
#if s["name"] == "Font Name":
#settings['fontname'] = s["default"]
InitNodeDraw()
for obj in panels+arrows:
obj.refreshFont()
self.c.Refresh(True)
event.Skip()
def OnViewGeneratedCode(self, event): # code dublicated from OnAction
imported = __import__("modes.%s" % self.currentMode, globals(), locals(), ("name", "generator"))
rootnodes = filter(self.findRootNode, nodes) # can be selected mode as well?
if len(rootnodes):
cg = rootnodes[0].GenerateCode()
print cg[0]
def OnModePreferences(self, event):
imported = __import__("modes.%s" % self.currentMode, globals(), locals(), ("preferences"))
try:
imported.preferences()
except: # module doesn't require preferences or they're not implemented yet
wx.MessageBox("This module doesn't provide the preferences to edit", "Nothing to do", wx.ICON_INFORMATION)
def OnFileHistory(self, event):
temp = self.filehistory.GetHistoryFile(event.GetId() - wx.ID_FILE1)
if os.path.exists(temp):
self.scenename = temp
self.JustLoadTheData()
self.SetTitle("%s - %s" % (self.scenename, productname))
self.filehistory.AddFileToHistory(self.scenename)
self.UpdateFileHistoryArray()
else:
wx.MessageBox("Scene file %s not found." % temp, "Nothing to do", wx.ICON_INFORMATION)
def OnImmediateUpdate(self, event):
pass
def OnSwitchMode(self, event):
self.currentMode = self.modeMenus[event.GetId()].GetLabel()
self.ActuallySwitchMode()
self.OnNewDocument(None)
event.Skip()
def findRootNode(self, x):
return (x.code != "")
def OnAction(self, event):
imported = __import__("modes.%s" % self.currentMode, globals(), locals(), ("name", "generator"))
rootnodes = filter(self.findRootNode, nodes)
filename = self.factory.getName()
if len(rootnodes):
cg = rootnodes[0].GenerateCode()
imported.generator(self, filename, cg[0])
def OnSelectAll(self, event):
del self.c.markedPanels[:]
self.c.markedPanels = [p for p in panels]
self.c.Refresh(True)
def OnDelete(self, event):
connectedArrows = []
for pnl in self.c.markedPanels:
node = pnl.node
# find connected arrows
for a in arrows:
if (a.connection.inputNode == pnl.node) or (a.connection.outputNode == pnl.node):
try:
i = connectedArrows.index(a)
except:
connectedArrows.append(a)
SafelyDelete(panels, pnl)
del pnl
SafelyDelete(nodes, node)
del node
del self.c.markedPanels[:]
#delete connected arrows
for a in connectedArrows:
self.c.ActuallyDeleteConnection(a.connection)
self.c.Refresh(False)
def OnAbout(self, event):
import core.about as about
dlg = about.AboutBox(self)
dlg.ShowModal()
dlg.Destroy()
def OnHelp(self, event):
pass
def OnNewDocument(self, event):
self.scenename = None
del arrows[:]
del connections[:]
del panels[:]
del nodes[:]
del groups[:]
node.Node._instance_count = 0
node.Connection._instance_count = 0
Group._instance_count = 0
clearImageCache()
self.c.Refresh(False)
def JustLoadTheData(self):
del arrows[:]
del connections[:]
del panels[:]
del nodes[:]
del groups[:]
node.Node._instance_count = 0
node.Connection._instance_count = 0
Group._instance_count = 0
clearImageCache()
res = False
try:
f = open(self.scenename, 'r')
# simple sandbox protection
import copy
gg = globals()
ll = locals()
ng = copy.copy(gg)
nc = copy.copy(ll)
try:
del ng['os'] # we're not allowing os module in files we're loading...
except:
pass
exec(f, ng, nc)
f.close()
del ng
del nc
res = True
finally:
self.c.Refresh(True)
return res
def OnOpenDocument(self, event):
wildcard = "ShaderMan scenes|*.smn|All files (*)|*"
dlg = wx.FileDialog( self, message="Load scene", defaultDir=os.getcwd(), defaultFile="", wildcard=wildcard, style=wx.OPEN )
if dlg.ShowModal() == wx.ID_OK:
self.scenename = dlg.GetPath()
self.JustLoadTheData()
self.SetTitle("%s - %s" % (self.scenename, productname))
self.filehistory.AddFileToHistory(self.scenename)
self.UpdateFileHistoryArray()
dlg.Destroy()
def JustSaveTheData(self):
f = open(self.scenename, 'w')
print >>f, """self.currentMode = "%s"\nself.ActuallySwitchMode()\n""" % self.currentMode
print >>f, "\n".join([str(thing) for thing in nodes+panels+connections+arrows+groups])
f.flush()
f.close()
def OnSaveDocument(self, event):
if self.scenename == None:
self.OnSaveAs(event)
else:
self.JustSaveTheData()
def OnSaveAs(self, event):
wildcard = "ShaderMan scenes|*.smn|All files (*)|*"
dlg = wx.FileDialog( self, message="Save scene", defaultDir=os.getcwd(), defaultFile="test.smn", wildcard=wildcard, style=wx.SAVE )
# should be factory.name?
if dlg.ShowModal() == wx.ID_OK:
self.scenename = dlg.GetPath()
self.JustSaveTheData()
self.SetTitle("%s - %s" % (self.scenename, productname))
dlg.Destroy()
def OnTreeLeftDClick(self, event):
itemid = self.tree.GetSelection()
fullpath = self.tree.GetPyData(itemid)
pnl = None
if fullpath.endswith(".br"): # isn't directory or something
node1 = node.Node(-1, fullpath, factory = self.factory)
nodes.append(node1)
pnl = NodePanel(self, x = 20 + self.c.panx, y = 20 + self.c.pany)
panels.append(pnl)
pnl.assignNode(node1)
self.c.Refresh(False)
if event != None:
event.Skip()
return pnl
def OnEraseBackground(self, event):
pass # Do nothing, to avoid flashing on MSW.
def OnExit(self, event):
self.pform.Close()
del self.pform
self.Close()
def OnSize(self, event):
ws, hs = self.GetSize()
settings['width'] = ws
settings['height'] = hs
event.Skip()
def OnMove(self, event):
x, y = self.GetPosition()
settings['left'] = x
settings['top'] = y
event.Skip()
def MakeIcon(self, img):
"""
The various platforms have different requirements for the
icon size...
"""
if "wxMSW" in wx.PlatformInfo:
img = img.Scale(16, 16)
elif "wxGTK" in wx.PlatformInfo:
img = img.Scale(22, 22)
# wxMac can be any size upto 128x128, so leave the source img alone....
icon = wx.IconFromBitmap(img.ConvertToBitmap() )
return icon
def UpdateFileHistoryArray(self):
history = []
for i in range(self.filehistory.GetCount()):
history.append(self.filehistory.GetHistoryFile(i))
settings['history'] = ",".join(history)
def LoadSettings():
filename = os.path.expanduser("~/.ShaderMan")
if os.path.isfile(filename):
raw_settings = open(filename).readlines()
for line in raw_settings:
a = line.strip().split("=")
settings[a[0]] = a[1]
def SaveSettings():
filename = os.path.expanduser("~/.ShaderMan")
h = open(filename, 'w')
for key in settings.keys():
line = "%s=%s\n" % (key, settings[key])
h.write(line)
h.close()
if __name__ == '__main__':
LoadSettings()
app = wx.PySimpleApp(redirect=False, useBestVisual=True)
app.SetUseBestVisual(True)
frm = MainFrame(None, productname)
InitNodeDraw()
w = int(settings.get("width", 600))
h = int(settings.get("height", 700))
x = min(int(settings.get("left", 0)), wx.SystemSettings_GetMetric( wx.SYS_SCREEN_X )-w) # because we want to see it on the screen
y = min(int(settings.get("top", 0)), wx.SystemSettings_GetMetric( wx.SYS_SCREEN_Y )-h)
history = settings.get("history", None)
if history != None:
for hh in history.split(","):
frm.filehistory.AddFileToHistory(hh)
frm.SetSize((w,h))
frm.SetPosition((x, y))
frm.Show()
app.SetTopWindow(frm)
if len(sys.argv)>1:
frm.scenename = sys.argv[1]
frm.JustLoadTheData()
frm.SetTitle("%s - %s" % (frm.scenename, productname))
frm.filehistory.AddFileToHistory(frm.scenename)
frm.UpdateFileHistoryArray()
frm.c.Refresh(True)
else:
frm.currentMode = "Renderman SL" # should load from the settings
frm.ActuallySwitchMode()
frm.c.Refresh(True)
app.MainLoop()
SaveSettings()
| bsd-3-clause |
KelSolaar/sIBL_GUI | sibl_gui/components/core/inspector/models.py | 1 | 2265 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
**models.py**
**Platform:**
Windows, Linux, Mac Os X.
**Description:**
Defines the :class:`sibl_gui.components.core.inspector.inspector.Inspector`
Component Interface class Models.
**Others:**
"""
from __future__ import unicode_literals
import foundations.verbose
import sibl_gui.ui.models
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "thomas.mansencal@gmail.com"
__status__ = "Production"
__all__ = ["LOGGER", "PlatesModel"]
LOGGER = foundations.verbose.install_logger()
class PlatesModel(sibl_gui.ui.models.GraphModel):
"""
Defines the Model used the by :class:`sibl_gui.components.core.inspector.inspector.Inspector`
Component Interface class.
"""
def __init__(self, parent=None, root_node=None, horizontal_headers=None, vertical_headers=None):
"""
Initializes the class.
:param parent: Object parent.
:type parent: QObject
:param root_node: Root node.
:type root_node: AbstractCompositeNode
:param horizontal_headers: Headers.
:type horizontal_headers: OrderedDict
:param vertical_headers: Headers.
:type vertical_headers: OrderedDict
"""
LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))
sibl_gui.ui.models.GraphModel.__init__(self,
parent,
root_node,
horizontal_headers,
vertical_headers)
def initialize_model(self, root_node):
"""
Initializes the Model using given root node.
:param root_node: Graph root node.
:type root_node: DefaultNode
:return: Method success
:rtype: bool
"""
LOGGER.debug("> Initializing model with '{0}' root node.".format(root_node))
self.beginResetModel()
self.root_node = root_node
self.enable_model_triggers(True)
self.endResetModel()
return True
| gpl-3.0 |
rismalrv/edx-platform | lms/djangoapps/edxnotes/views.py | 72 | 3809 | """
Views related to EdxNotes.
"""
import json
import logging
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.conf import settings
from django.core.urlresolvers import reverse
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.keys import CourseKey
from courseware.courses import get_course_with_access
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from util.json_request import JsonResponse, JsonResponseBadRequest
from edxnotes.exceptions import EdxNotesParseError, EdxNotesServiceUnavailable
from edxnotes.helpers import (
get_notes,
get_id_token,
is_feature_enabled,
search,
get_course_position,
)
log = logging.getLogger(__name__)
@login_required
def edxnotes(request, course_id):
"""
Displays the EdxNotes page.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
try:
notes = get_notes(request.user, course)
except EdxNotesServiceUnavailable:
raise Http404
context = {
"course": course,
"search_endpoint": reverse("search_notes", kwargs={"course_id": course_id}),
"notes": notes,
"debug": json.dumps(settings.DEBUG),
'position': None,
}
if not notes:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2
)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course_key, course=course
)
position = get_course_position(course_module)
if position:
context.update({
'position': position,
})
return render_to_response("edxnotes/edxnotes.html", context)
@login_required
def search_notes(request, course_id):
"""
Handles search requests.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
if "text" not in request.GET:
return HttpResponseBadRequest()
query_string = request.GET["text"]
try:
search_results = search(request.user, course, query_string)
except (EdxNotesParseError, EdxNotesServiceUnavailable) as err:
return JsonResponseBadRequest({"error": err.message}, status=500)
return HttpResponse(search_results)
# pylint: disable=unused-argument
@login_required
def get_token(request, course_id):
"""
Get JWT ID-Token, in case you need new one.
"""
return HttpResponse(get_id_token(request.user), content_type='text/plain')
@login_required
def edxnotes_visibility(request, course_id):
"""
Handle ajax call from "Show notes" checkbox.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
field_data_cache = FieldDataCache([course], course_key, request.user)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course_key, course=course
)
if not is_feature_enabled(course):
raise Http404
try:
visibility = json.loads(request.body)["visibility"]
course_module.edxnotes_visibility = visibility
course_module.save()
return JsonResponse(status=200)
except (ValueError, KeyError):
log.warning(
"Could not decode request body as JSON and find a boolean visibility field: '%s'", request.body
)
return JsonResponseBadRequest()
| agpl-3.0 |
KirillMysnik/ArcJail | srcds/addons/source-python/plugins/arcjail/modules/games/base_classes/prepare_time.py | 1 | 6961 | # This file is part of ArcJail.
#
# ArcJail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ArcJail is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ArcJail. If not, see <http://www.gnu.org/licenses/>.
from events.manager import event_manager
from listeners.tick import Delay, on_tick_listener_manager
from messages import TextMsg
from ....internal_events import internal_event_manager
from ...overlays import show_overlay
from ...players import broadcast, player_manager
from .. import config_manager, stage, strings_module
from .jail_game import JailGame
class PrepareTime(JailGame):
stage_groups = {
'init': ["prepare-prepare", ],
'destroy': [
"prepare-cancel-delays",
"unsend-popups",
"cancel-delays",
"destroy",
],
'prepare-start': [
'prepare-freeze',
'prepare-register-event-handlers',
'prepare-entry',
],
'abort-prepare-interrupted': ["abort-prepare-interrupted", ],
'prepare-continue': [
"prepare-cancel-countdown",
"prepare-undo-prepare-start",
"register-event-handlers",
"start-notify",
"basegame-entry",
],
}
def __init__(self, leader_player, players, **kwargs):
super().__init__(leader_player, players, **kwargs)
self._prepare_delay = None
self._prepare_countdown = None
@stage('prepare-prepare')
def stage_prepare_prepare(self):
if self._settings.get('prepare', True):
indexes = list(player.index for player in self._players)
if self.leader.index not in indexes:
indexes.append(self.leader.index)
def callback():
self.undo_stages('prepare-start')
self.set_stage_group('prepare-continue')
self._prepare_delay = Delay(
config_manager['prepare_timeout'], callback)
def countdown(ticks_left):
if (ticks_left > 3 or ticks_left < 1 or config_manager[
'countdown_{}_material'.format(ticks_left)] == ""):
TextMsg(str(ticks_left)).send(*indexes)
else:
for player in self._players:
show_overlay(player, config_manager[
'countdown_{}_material'.format(ticks_left)], 1)
if config_manager['countdown_sound'] is not None:
config_manager['countdown_sound'].play(*indexes)
self._prepare_countdown = Delay(1.0, countdown, ticks_left - 1)
countdown(int(config_manager['prepare_timeout']))
broadcast(strings_module['stage_prepare'])
if config_manager['prepare_sound'] is not None:
config_manager['prepare_sound'].play(*indexes)
self.set_stage_group('prepare-start')
else:
self.set_stage_group('prepare-continue')
def _prepare_event_handler_player_death(self, game_event):
player = player_manager.get_by_userid(game_event['userid'])
if player in self._players or player == self.leader:
self.set_stage_group('abort-prepare-interrupted')
def _prepare_event_handler_player_deleted(self, player):
if player in self._players or player == self.leader:
self.set_stage_group('abort-prepare-interrupted')
def _prepare_event_handler_player_hurt(self, game_event):
player = player_manager.get_by_userid(game_event['userid'])
if player in self._players or player == self.leader:
self.set_stage_group('abort-prepare-interrupted')
@stage('prepare-register-event-handlers')
def stage_prepare_register_event_handlers(self):
event_manager.register_for_event(
'player_death', self._prepare_event_handler_player_death)
event_manager.register_for_event(
'player_hurt', self._prepare_event_handler_player_hurt)
internal_event_manager.register_event_handler(
'player_deleted',
self._prepare_event_handler_player_deleted
)
@stage('undo-prepare-register-event-handlers')
def stage_undo_prepare_register_event_handlers(self):
event_manager.unregister_for_event(
'player_death', self._prepare_event_handler_player_death)
event_manager.unregister_for_event(
'player_hurt', self._prepare_event_handler_player_hurt)
internal_event_manager.unregister_event_handler(
'player_deleted',
self._prepare_event_handler_player_deleted
)
@stage('prepare-cancel-delays')
def stage_prepare_cancel_delays(self):
for delay in (self._prepare_delay, self._prepare_countdown):
if delay is not None and delay.running:
delay.cancel()
@stage('prepare-cancel-countdown')
def stage_prepare_cancel_countdown(self):
if self._prepare_countdown is not None:
self._prepare_countdown.cancel()
@stage('prepare-undo-prepare-start')
def stage_prepare_undo_prepare_start(self):
self.undo_stages('prepare-start')
@stage('prepare-entry')
def stage_prepare_entry(self):
pass
def _prepare_freeze_tick_handler(self):
for player in self._players:
weapon = player.active_weapon
if weapon is None:
continue
weapon.next_attack += 1
weapon.next_secondary_fire_attack += 1
@stage('prepare-freeze')
def stage_prepare_freeze(self):
on_tick_listener_manager.register_listener(
self._prepare_freeze_tick_handler)
for player in self._players:
player.stuck = True
@stage('undo-prepare-freeze')
def stage_undo_prepare_freeze(self):
on_tick_listener_manager.unregister_listener(
self._prepare_freeze_tick_handler)
for player in self._players:
player.stuck = False
weapon = player.active_weapon
if weapon is None:
continue
weapon.next_attack = 0
weapon.next_secondary_fire_attack = 0
@stage('abort-prepare-interrupted')
def stage_abort_prepare_interrupted(self):
broadcast(strings_module['abort_prepare_interrupted'])
if config_manager['prepare_sound'] is not None:
config_manager['prepare_sound'].stop()
self.set_stage_group('destroy')
| gpl-3.0 |
MajorMajor807/Cpp_Final_Project | third_party/gtest/test/gtest_catch_exceptions_test.py | 2139 | 9901 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-3.0 |
marcellodesales/svnedge-console | ext/windows/pkg-toolkit/pkg/vendor-packages/pkg/search_errors.py | 4 | 4941 | #!/usr/bin/python2.4
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
# __str__ methods defined for subclasses of IndexError should be defined
# for the server implementations. If the client needs different messages
# displayed, catch the exception on the client side and display a custom
# message.
class IndexingException(Exception):
"""The base class for all exceptions that can occur while indexing."""
def __init__(self, cause):
self.cause = cause
class InconsistentIndexException(IndexingException):
"""This is used when the existing index is found to have inconsistent
versions."""
def __str__(self):
return "Index corrupted, remove all files and " \
"rebuild from scratch by clearing out %s " \
" and restarting the depot." % self.cause
class PartialIndexingException(IndexingException):
"""This is used when the directory the temporary files the indexer
should write to already exists."""
def __str__(self):
return "Unable to build or update search indices. Result of " \
"partial indexing found:%s. Please remove this directory "\
"and start a depot with the --refresh-index flag." % \
self.cause
class ProblematicPermissionsIndexException(IndexingException):
"""This is used when the indexer is unable to create, move, or remove
files or directories it should be able to."""
def __str__(self):
return "Could not remove or create " \
"%s because of\nincorrect " \
"permissions. Please correct this issue then " \
"rebuild the index." % self.cause
class NoIndexException(Exception):
"""This is used when a search is executed while no index exists."""
def __init__(self, index_dir):
self.index_dir = index_dir
def __str__(self):
return "Could not find index to search, looked in: %s" \
% self.index_dir
class IncorrectIndexFileHash(Exception):
"""This is used when the index hash value doesn't match the hash of the
packages installed in the image."""
def __init__(self, existing_val, incoming_val):
Exception.__init__(self)
self.ev = existing_val
self.iv = incoming_val
def __str__(self):
return "existing_val was:%s\nincoming_val was:%s" % \
(self.ev, self.iv)
class MainDictParsingException(Exception):
"""This is used when an error occurred while parsing the main search
dictionary file."""
def __init__(self, split_chars, unquote_list, line, file_pos):
self.split_chars = split_chars
self.unquote_list = unquote_list
self.line = line
self.file_pos = file_pos
class EmptyUnquoteList(MainDictParsingException):
"""This is used when the function to parse the main dictionary file
wasn't given enough values in its unquote_list argument."""
def __init__(self, split_chars, line):
Exception.__init__(self, split_chars, None, line)
def __str__(self):
return _("Got an empty unquote_list while indexing. split_chars"
" was %(sc)s and line was %(l)s" %
{ "sc": self.split_chars, "l": self.line })
class EmptyMainDictLine(MainDictParsingException):
"""This is used when a blank line in the main dictionary file was
encountered."""
def __init__(self, split_chars, unquote_list):
Exception.__init__(self, split_chars, unquote_list, None)
def __str__(self):
return _("Had an empty line in the main dictionary. split_chars"
" is %(sc)s and unquote_list is %(ul)s.%(s)s" %
{ "sc": self.split_chars, "ul": self.unquote_list, "l": s })
| agpl-3.0 |
hfp/tensorflow-xsmm | tensorflow/api_template.__init__.py | 3 | 4370 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bring in all of the public TensorFlow interface into this module."""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import distutils as _distutils
import inspect as _inspect
import os as _os
import site as _site
import sys as _sys
# API IMPORTS PLACEHOLDER
# pylint: disable=g-bad-import-order
from tensorflow.python.tools import component_api_helper as _component_api_helper
_component_api_helper.package_hook(
parent_package_str=__name__,
child_package_str=(
'tensorflow_estimator.python.estimator.api._v2.estimator'))
_current_module = _sys.modules[__name__]
if not hasattr(_current_module, 'estimator'):
_component_api_helper.package_hook(
parent_package_str=__name__,
child_package_str=(
'tensorflow_estimator.python.estimator.api.estimator'))
_component_api_helper.package_hook(
parent_package_str=__name__,
child_package_str=('tensorflow.python.keras.api._v2.keras'))
# Make sure directory containing top level submodules is in
# the __path__ so that "from tensorflow.foo import bar" works.
# We're using bitwise, but there's nothing special about that.
_tf_api_dir = _os.path.dirname(_os.path.dirname(bitwise.__file__)) # pylint: disable=undefined-variable
if not hasattr(_current_module, '__path__'):
__path__ = [_tf_api_dir]
elif _tf_api_dir not in __path__:
__path__.append(_tf_api_dir)
# Enable TF2 behaviors
from tensorflow.python.compat import v2_compat as _compat # pylint: disable=g-import-not-at-top
_compat.enable_v2_behavior()
# Load all plugin libraries from site-packages/tensorflow-plugins if we are
# running under pip.
# TODO(gunan): Enable setting an environment variable to define arbitrary plugin
# directories.
# TODO(gunan): Find a better location for this code snippet.
from tensorflow.python.framework import load_library as _ll
from tensorflow.python.lib.io import file_io as _fi
# Get sitepackages directories for the python installation.
_site_packages_dirs = []
_site_packages_dirs += [_site.USER_SITE]
_site_packages_dirs += [_p for _p in _sys.path if 'site-packages' in _p]
if 'getsitepackages' in dir(_site):
_site_packages_dirs += _site.getsitepackages()
if 'sysconfig' in dir(_distutils):
_site_packages_dirs += [_distutils.sysconfig.get_python_lib()]
_site_packages_dirs = list(set(_site_packages_dirs))
# Find the location of this exact file.
_current_file_location = _inspect.getfile(_inspect.currentframe())
def _running_from_pip_package():
return any(
_current_file_location.startswith(dir_) for dir_ in _site_packages_dirs)
if _running_from_pip_package():
for s in _site_packages_dirs:
# TODO(gunan): Add sanity checks to loaded modules here.
plugin_dir = _os.path.join(s, 'tensorflow-plugins')
if _fi.file_exists(plugin_dir):
_ll.load_library(plugin_dir)
# These symbols appear because we import the python package which
# in turn imports from tensorflow.core and tensorflow.python. They
# must come from this module. So python adds these symbols for the
# resolution to succeed.
# pylint: disable=undefined-variable
try:
del python
del core
except NameError:
# Don't fail if these modules are not available.
# For e.g. this file will be originally placed under tensorflow/_api/v1 which
# does not have 'python', 'core' directories. Then, it will be copied
# to tensorflow/ which does have these two directories.
pass
# Similarly for compiler. Do it separately to make sure we do this even if the
# others don't exist.
try:
del compiler
except NameError:
pass
# pylint: enable=undefined-variable
| apache-2.0 |
sekikn/ambari | ambari-common/src/main/python/ambari_ws4py/server/wsgirefserver.py | 2 | 5353 | # -*- coding: utf-8 -*-
__doc__ = """
Add WebSocket support to the built-in WSGI server
provided by the :py:mod:`wsgiref`. This is clearly not
meant to be a production server so please consider this
only for testing purpose.
Mostly, this module overrides bits and pieces of
the built-in classes so that it supports the WebSocket
workflow.
.. code-block:: python
from wsgiref.simple_server import make_server
from ambari_ws4py.websocket import EchoWebSocket
from ambari_ws4py.server.wsgirefserver import WSGIServer, WebSocketWSGIRequestHandler
from ambari_ws4py.server.wsgiutils import WebSocketWSGIApplication
server = make_server('', 9000, server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=EchoWebSocket))
server.initialize_websockets_manager()
server.serve_forever()
.. note::
For some reason this server may fail against autobahntestsuite.
"""
import logging
import sys
import itertools
import operator
from wsgiref.handlers import SimpleHandler
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer as _WSGIServer
from wsgiref import util
util._hoppish = {}.__contains__
from ambari_ws4py.manager import WebSocketManager
from ambari_ws4py import format_addresses
from ambari_ws4py.server.wsgiutils import WebSocketWSGIApplication
from ambari_ws4py.compat import get_connection
__all__ = ['WebSocketWSGIHandler', 'WebSocketWSGIRequestHandler',
'WSGIServer']
logger = logging.getLogger('ambari_ws4py')
class WebSocketWSGIHandler(SimpleHandler):
def setup_environ(self):
"""
Setup the environ dictionary and add the
`'ws4py.socket'` key. Its associated value
is the real socket underlying socket.
"""
SimpleHandler.setup_environ(self)
self.environ['ws4py.socket'] = get_connection(self.environ['wsgi.input'])
self.http_version = self.environ['SERVER_PROTOCOL'].rsplit('/')[-1]
def finish_response(self):
"""
Completes the response and performs the following tasks:
- Remove the `'ws4py.socket'` and `'ws4py.websocket'`
environ keys.
- Attach the returned websocket, if any, to the WSGI server
using its ``link_websocket_to_server`` method.
"""
# force execution of the result iterator until first actual content
rest = iter(self.result)
first = list(itertools.islice(rest, 1))
self.result = itertools.chain(first, rest)
# now it's safe to look if environ was modified
ws = None
if self.environ:
self.environ.pop('ws4py.socket', None)
ws = self.environ.pop('ws4py.websocket', None)
try:
SimpleHandler.finish_response(self)
except:
if ws:
ws.close(1011, reason='Something broke')
raise
else:
if ws:
self.request_handler.server.link_websocket_to_server(ws)
class WebSocketWSGIRequestHandler(WSGIRequestHandler):
WebSocketWSGIHandler = WebSocketWSGIHandler
def handle(self):
"""
Unfortunately the base class forces us
to override the whole method to actually provide our wsgi handler.
"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
# next line is where we'd have expect a configuration key somehow
handler = self.WebSocketWSGIHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
class WSGIServer(_WSGIServer):
def initialize_websockets_manager(self):
"""
Call thos to start the underlying websockets
manager. Make sure to call it once your server
is created.
"""
self.manager = WebSocketManager()
self.manager.start()
def shutdown_request(self, request):
"""
The base class would close our socket
if we didn't override it.
"""
pass
def link_websocket_to_server(self, ws):
"""
Call this from your WSGI handler when a websocket
has been created.
"""
self.manager.add(ws)
def server_close(self):
"""
Properly initiate closing handshakes on
all websockets when the WSGI server terminates.
"""
if hasattr(self, 'manager'):
self.manager.close_all()
self.manager.stop()
self.manager.join()
delattr(self, 'manager')
_WSGIServer.server_close(self)
if __name__ == '__main__':
from ambari_ws4py import configure_logger
configure_logger()
from wsgiref.simple_server import make_server
from ambari_ws4py.websocket import EchoWebSocket
server = make_server('', 9000, server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=EchoWebSocket))
server.initialize_websockets_manager()
try:
server.serve_forever()
except KeyboardInterrupt:
server.server_close()
| apache-2.0 |
chenjiafan/pjsip | tests/pjsua/mod_sipp.py | 8 | 8609 | # $Id: mod_sipp.py 5067 2015-04-13 12:28:02Z nanang $
## Automatic test module for SIPp.
##
## This module will need a test driver for each SIPp scenario:
## - For simple scenario, i.e: make/receive call (including auth), this
## test module can auto-generate a default test driver, i.e: make call
## or apply auto answer. Just name the SIPp scenario using "uas" or
## "uac" prefix accordingly.
## - Custom test driver can be defined in a python script file containing
## a list of the PJSUA instances and another list for PJSUA expects/
## commands. The custom test driver file must use the same filename as
## the SIPp XML scenario. See samples of SIPp scenario + its driver
## in tests/pjsua/scripts-sipp/ folder for detail.
##
## Here are defined macros that can be used in the custom driver:
## - $SIPP_PORT : SIPp binding port
## - $SIPP_URI : SIPp SIP URI
## - $PJSUA_PORT[N] : binding port of PJSUA instance #N
## - $PJSUA_URI[N] : SIP URI of PJSUA instance #N
import ctypes
import time
import imp
import sys
import os
import re
import subprocess
from inc_cfg import *
import inc_const
# flags that test is running in Unix
G_INUNIX = False
if sys.platform.lower().find("win32")!=-1 or sys.platform.lower().find("microsoft")!=-1:
G_INUNIX = False
else:
G_INUNIX = True
# /dev/null handle, for redirecting output when SIPP is not in background mode
FDEVNULL = None
# SIPp executable path and param
#SIPP_PATH = '"C:\\devs\\bin\\Sipp_3.2\\sipp.exe"'
SIPP_PATH = 'sipp'
SIPP_PORT = 6000
SIPP_PARAM = "-m 1 -i 127.0.0.1 -p " + str(SIPP_PORT)
SIPP_TIMEOUT = 60
# On BG mode, SIPp doesn't require special terminal
# On non-BG mode, on win, it needs env var: "TERMINFO=c:\cygwin\usr\share\terminfo"
# TODO: on unix with BG mode, waitpid() always fails, need to be fixed
SIPP_BG_MODE = False
#SIPP_BG_MODE = not G_INUNIX
# Will be updated based on the test driver file (a .py file whose the same name as SIPp XML file)
PJSUA_INST_PARAM = []
PJSUA_EXPECTS = []
# Default PJSUA param if test driver is not available:
# - no-tcp as SIPp is on UDP only
# - id, username, and realm: to allow PJSUA sending re-INVITE with auth after receiving 401/407 response
PJSUA_DEF_PARAM = "--null-audio --max-calls=1 --no-tcp --id=sip:a@localhost --username=a --realm=*"
# Get SIPp scenario (XML file)
SIPP_SCEN_XML = ""
if ARGS[1].endswith('.xml'):
SIPP_SCEN_XML = ARGS[1]
else:
exit(-99)
# Functions for resolving macros in the test driver
def resolve_pjsua_port(mo):
return str(PJSUA_INST_PARAM[int(mo.group(1))].sip_port)
def resolve_pjsua_uri(mo):
return PJSUA_INST_PARAM[int(mo.group(1))].uri[1:-1]
def resolve_driver_macros(st):
st = re.sub("\$SIPP_PORT", str(SIPP_PORT), st)
st = re.sub("\$SIPP_URI", "sip:sipp@127.0.0.1:"+str(SIPP_PORT), st)
st = re.sub("\$PJSUA_PORT\[(\d+)\]", resolve_pjsua_port, st)
st = re.sub("\$PJSUA_URI\[(\d+)\]", resolve_pjsua_uri, st)
return st
# Init test driver
if os.access(SIPP_SCEN_XML[:-4]+".py", os.R_OK):
# Load test driver file (the corresponding .py file), if any
cfg_file = imp.load_source("cfg_file", SIPP_SCEN_XML[:-4]+".py")
for ua_idx, ua_param in enumerate(cfg_file.PJSUA):
ua_param = resolve_driver_macros(ua_param)
PJSUA_INST_PARAM.append(InstanceParam("pjsua"+str(ua_idx), ua_param))
PJSUA_EXPECTS = cfg_file.PJSUA_EXPECTS
else:
# Generate default test driver
if os.path.basename(SIPP_SCEN_XML)[0:3] == "uas":
# auto make call when SIPp is as UAS
ua_param = PJSUA_DEF_PARAM + " sip:127.0.0.1:" + str(SIPP_PORT)
else:
# auto answer when SIPp is as UAC
ua_param = PJSUA_DEF_PARAM + " --auto-answer=200"
PJSUA_INST_PARAM.append(InstanceParam("pjsua", ua_param))
# Start SIPp process, returning PID
def start_sipp():
global SIPP_BG_MODE
sipp_proc = None
sipp_param = SIPP_PARAM + " -sf " + SIPP_SCEN_XML
if SIPP_BG_MODE:
sipp_param = sipp_param + " -bg"
if SIPP_TIMEOUT:
sipp_param = sipp_param + " -timeout "+str(SIPP_TIMEOUT)+"s -timeout_error" + " -deadcall_wait "+str(SIPP_TIMEOUT)+"s"
# add target param
sipp_param = sipp_param + " 127.0.0.1:" + str(PJSUA_INST_PARAM[0].sip_port)
# run SIPp
fullcmd = os.path.normpath(SIPP_PATH) + " " + sipp_param
print "Running SIPP: " + fullcmd
if SIPP_BG_MODE:
sipp_proc = subprocess.Popen(fullcmd, bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=G_INUNIX, universal_newlines=False)
else:
# redirect output to NULL
global FDEVNULL
#FDEVNULL = open(os.devnull, 'w')
FDEVNULL = open("logs/sipp_output.tmp", 'w')
sipp_proc = subprocess.Popen(fullcmd, shell=G_INUNIX, stdout=FDEVNULL, stderr=FDEVNULL)
if not SIPP_BG_MODE:
if sipp_proc == None or sipp_proc.poll():
return None
return sipp_proc
else:
# get SIPp child process PID
pid = 0
r = re.compile("PID=\[(\d+)\]", re.I)
while True:
line = sipp_proc.stdout.readline()
pid_r = r.search(line)
if pid_r:
pid = int(pid_r.group(1))
break
if not sipp_proc.poll():
break
if pid != 0:
# Win specific: get process handle from PID, as on win32, os.waitpid() takes process handle instead of pid
if (sys.platform == "win32"):
SYNCHRONIZE = 0x00100000
PROCESS_QUERY_INFORMATION = 0x0400
hnd = ctypes.windll.kernel32.OpenProcess(SYNCHRONIZE | PROCESS_QUERY_INFORMATION, False, pid)
pid = hnd
return pid
# Wait SIPp process to exit, returning SIPp exit code
def wait_sipp(sipp):
if not SIPP_BG_MODE:
global FDEVNULL
sipp.wait()
FDEVNULL.close()
return sipp.returncode
else:
print "Waiting SIPp (PID=" + str(sipp) + ") to exit.."
wait_cnt = 0
while True:
try:
wait_cnt = wait_cnt + 1
[pid_, ret_code] = os.waitpid(sipp, 0)
if sipp == pid_:
#print "SIPP returned ", ret_code
ret_code = ret_code >> 8
# Win specific: Close process handle
if (sys.platform == "win32"):
ctypes.windll.kernel32.CloseHandle(sipp)
return ret_code
except os.error:
if wait_cnt <= 5:
print "Retry ("+str(wait_cnt)+") waiting SIPp.."
else:
return -99
# Execute PJSUA flow
def exec_pjsua_expects(t, sipp):
# Get all PJSUA instances
ua = []
for ua_idx in range(len(PJSUA_INST_PARAM)):
ua.append(t.process[ua_idx])
ua_err_st = ""
while len(PJSUA_EXPECTS):
expect = PJSUA_EXPECTS.pop(0)
ua_idx = expect[0]
expect_st = expect[1]
send_cmd = resolve_driver_macros(expect[2])
# Handle exception in pjsua flow, to avoid zombie SIPp process
try:
if expect_st != "":
ua[ua_idx].expect(expect_st, raise_on_error = True)
if send_cmd != "":
ua[ua_idx].send(send_cmd)
except TestError, e:
ua_err_st = e.desc
break;
except:
ua_err_st = "Unknown error"
break;
# Need to poll here for handling these cases:
# - If there is no PJSUA EXPECT scenario, we must keep polling the stdout,
# otherwise PJSUA process may stuck (due to stdout pipe buffer full?).
# - last PJSUA_EXPECT contains a pjsua command that needs time to
# finish, for example "v" (re-INVITE), the SIPp XML scenario may expect
# that re-INVITE transaction to be completed and without stdout poll
# PJSUA process may stuck.
# Ideally the poll should be done contiunously until SIPp process is
# terminated.
# Update: now pjsua stdout is polled continuously by a dedicated thread,
# so the poll is no longer needed
#for ua_idx in range(len(ua)):
# ua[ua_idx].expect(inc_const.STDOUT_REFRESH, raise_on_error = False)
return ua_err_st
def sipp_err_to_str(err_code):
if err_code == 0:
return "All calls were successful"
elif err_code == 1:
return "At least one call failed"
elif err_code == 97:
return "exit on internal command. Calls may have been processed"
elif err_code == 99:
return "Normal exit without calls processed"
elif err_code == -1:
return "Fatal error (timeout)"
elif err_code == -2:
return "Fatal error binding a socket"
else:
return "Unknown error"
# Test body function
def TEST_FUNC(t):
sipp_ret_code = 0
ua_err_st = ""
sipp = start_sipp()
if not sipp:
raise TestError("Failed starting SIPp")
ua_err_st = exec_pjsua_expects(t, sipp)
sipp_ret_code = wait_sipp(sipp)
if ua_err_st != "":
raise TestError(ua_err_st)
if sipp_ret_code:
rc = ctypes.c_byte(sipp_ret_code).value
raise TestError("SIPp returned error " + str(rc) + ": " + sipp_err_to_str(rc))
# Here where it all comes together
test = TestParam(SIPP_SCEN_XML[:-4],
PJSUA_INST_PARAM,
TEST_FUNC)
| gpl-2.0 |
infra-structure/pinball | tests/pinball/repository/repository_test.py | 6 | 5251 | # Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation tests for configuration repository."""
import unittest
from pinball.repository.config import JobConfig
from pinball.repository.config import WorkflowScheduleConfig
from pinball.repository.repository import Repository
__author__ = 'Pawel Garbacki'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
_SCHEDULE_TEMPLATE = """{
"emails": [
"some_email@pinterest.com",
"some_other_email@pinterest.com"
],
"overrun_policy": "DELAY",
"recurrence": "%s",
"start_date": "2012-01-01",
"time": "00.00.01.000",
"workflow": "some_workflow"
}"""
_JOB_TEMPLATE = """{
"abort_timeout_sec": 20,
"emails": [
"some_email@pinterest.com",
"some_other_email@pinterest.com"
],
"is_condition": false,
"job": "some_job",
"max_attempts": %d,
"parents": [
"some_parent_job",
"some_other_parent_job"
],
"priority": 123,
"retry_delay_sec": 10,
"template": "some_template",
"template_params": {
"some_param": "some_value"
},
"warn_timeout_sec": 10,
"workflow": "some_workflow"
}"""
class FakeRepository(Repository):
def __init__(self):
self.configs = {}
def _get_config(self, path):
if path == '/workflow/some_workflow/schedule':
return _SCHEDULE_TEMPLATE % "1d"
elif path == '/workflow/some_workflow/job/some_job':
return _JOB_TEMPLATE % 10
assert False, 'unrecognized path %s' % path
def _put_config(self, path, content):
self.configs[path] = content
def _delete_config(self, path):
del self.configs[path]
def _list_directory(self, directory, allow_not_found):
if directory == '/workflow/':
return ['some_workflow/', 'some_other_workflow/']
elif directory == '/workflow/some_other_workflow/':
return ['job/']
elif directory == '/workflow/some_workflow/':
return ['job/']
elif directory == '/workflow/some_workflow/job/':
return ['some_job']
elif directory == '/workflow/some_other_workflow/job/':
return ['some_other_job', 'yet_another_job']
assert False, 'unrecognized directory %s' % directory
class RepositoryTestCase(unittest.TestCase):
def setUp(self):
self._repository = FakeRepository()
def test_get_schedule(self):
schedule_config = self._repository.get_schedule('some_workflow')
self.assertEqual('some_workflow', schedule_config.workflow)
self.assertEqual('1d', schedule_config.recurrence)
def test_put_schedule(self):
schedule_config = WorkflowScheduleConfig.from_json(
_SCHEDULE_TEMPLATE % '1w')
self._repository.put_schedule(schedule_config)
self.assertEqual(1, len(self._repository.configs))
self.assertEqual(
_SCHEDULE_TEMPLATE % '1w',
self._repository.configs['/workflow/some_workflow/schedule'])
def test_delete_schedule(self):
schedule_config = WorkflowScheduleConfig.from_json(
_SCHEDULE_TEMPLATE % 100)
self._repository.put_schedule(schedule_config)
self._repository.delete_schedule('some_workflow')
self.assertEqual({}, self._repository.configs)
def test_get_job(self):
job_config = self._repository.get_job('some_workflow', 'some_job')
self.assertEqual('some_job', job_config.job)
self.assertEqual('some_workflow', job_config.workflow)
self.assertEqual(10, job_config.max_attempts)
def test_put_job(self):
job_config = JobConfig.from_json(_JOB_TEMPLATE % 100)
self._repository.put_job(job_config)
self.assertEqual(1, len(self._repository.configs))
self.assertEqual(
_JOB_TEMPLATE % 100,
self._repository.configs['/workflow/some_workflow/job/some_job'])
def test_delete_job(self):
job_config = JobConfig.from_json(_JOB_TEMPLATE % 100)
self._repository.put_job(job_config)
self._repository.delete_job('some_workflow', 'some_job')
self.assertEqual({}, self._repository.configs)
def test_get_workflow_names(self):
self.assertEqual(['some_workflow', 'some_other_workflow'],
self._repository.get_workflow_names())
def test_get_job_names(self):
self.assertEqual(['some_job'],
self._repository.get_job_names('some_workflow'))
self.assertEqual(['some_other_job', 'yet_another_job'],
self._repository.get_job_names('some_other_workflow'))
| apache-2.0 |
BuildingLink/sentry | src/sentry/security/__init__.py | 3 | 1036 | from __future__ import absolute_import, print_function
import logging
from django.utils import timezone
from .emails import generate_security_email
logger = logging.getLogger('sentry.security')
def capture_security_activity(account, type, actor, ip_address, context=None,
send_email=True, current_datetime=None):
if current_datetime is None:
current_datetime = timezone.now()
logger_context = {
'ip_address': ip_address,
'user_id': account.id,
'actor_id': actor.id,
}
if type == 'mfa-removed' or type == 'mfa-added':
logger_context['authenticator_id'] = context['authenticator'].id
logger.info('user.{}'.format(type), extra=logger_context)
if send_email:
msg = generate_security_email(
account=account,
type=type,
actor=actor,
ip_address=ip_address,
context=context,
current_datetime=current_datetime,
)
msg.send_async([account.email])
| bsd-3-clause |
sjebbara/gensim | gensim/models/lsi_dispatcher.py | 33 | 7100 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s SIZE_OF_JOBS_QUEUE
Dispatcher process which orchestrates distributed LSI computations. Run this \
script only once, on any node in your cluster.
Example: python -m gensim.models.lsi_dispatcher
"""
from __future__ import with_statement
import os, sys, logging, threading, time
from six import iteritems, itervalues
try:
from Queue import Queue
except ImportError:
from queue import Queue
import Pyro4
from gensim import utils
logger = logging.getLogger("gensim.models.lsi_dispatcher")
# How many jobs (=chunks of N documents) to keep "pre-fetched" in a queue?
# A small number is usually enough, unless iteration over the corpus is very very
# slow (slower than the actual computation of LSI), in which case you can override
# this value from command line. ie. run "python ./lsi_dispatcher.py 100"
MAX_JOBS_QUEUE = 10
# timeout for the Queue object put/get blocking methods.
# it should really be infinity, but then keyboard interrupts don't work.
# so this is really just a hack, see http://bugs.python.org/issue1360
HUGE_TIMEOUT = 365 * 24 * 60 * 60 # one year
class Dispatcher(object):
"""
Dispatcher object that communicates and coordinates individual workers.
There should never be more than one dispatcher running at any one time.
"""
def __init__(self, maxsize=0):
"""
Note that the constructor does not fully initialize the dispatcher;
use the `initialize()` function to populate it with workers etc.
"""
self.maxsize = maxsize
self.workers = {}
self.callback = None # a pyro proxy to this object (unknown at init time, but will be set later)
def initialize(self, **model_params):
"""
`model_params` are parameters used to initialize individual workers (gets
handed all the way down to worker.initialize()).
"""
self.jobs = Queue(maxsize=self.maxsize)
self.lock_update = threading.Lock()
self._jobsdone = 0
self._jobsreceived = 0
# locate all available workers and store their proxies, for subsequent RMI calls
self.workers = {}
with utils.getNS() as ns:
self.callback = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher') # = self
for name, uri in iteritems(ns.list(prefix='gensim.lsi_worker')):
try:
worker = Pyro4.Proxy(uri)
workerid = len(self.workers)
# make time consuming methods work asynchronously
logger.info("registering worker #%i from %s" % (workerid, uri))
worker.initialize(workerid, dispatcher=self.callback, **model_params)
self.workers[workerid] = worker
except Pyro4.errors.PyroError:
logger.exception("unresponsive worker at %s, deleting it from the name server" % uri)
ns.remove(name)
if not self.workers:
raise RuntimeError('no workers found; run some lsi_worker scripts on your machines first!')
def getworkers(self):
"""
Return pyro URIs of all registered workers.
"""
return [worker._pyroUri for worker in itervalues(self.workers)]
def getjob(self, worker_id):
logger.info("worker #%i requesting a new job" % worker_id)
job = self.jobs.get(block=True, timeout=1)
logger.info("worker #%i got a new job (%i left)" % (worker_id, self.jobs.qsize()))
return job
def putjob(self, job):
self._jobsreceived += 1
self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT)
logger.info("added a new job (len(queue)=%i items)" % self.jobs.qsize())
def getstate(self):
"""
Merge projections from across all workers and return the final projection.
"""
logger.info("end of input, assigning all remaining jobs")
logger.debug("jobs done: %s, jobs received: %s" % (self._jobsdone, self._jobsreceived))
while self._jobsdone < self._jobsreceived:
time.sleep(0.5) # check every half a second
# TODO: merge in parallel, so that we're done in `log_2(workers)` merges,
# and not `workers - 1` merges!
# but merging only takes place once, after all input data has been processed,
# so the overall effect would be small... compared to the amount of coding :-)
logger.info("merging states from %i workers" % len(self.workers))
workers = list(self.workers.items())
result = workers[0][1].getstate()
for workerid, worker in workers[1:]:
logger.info("pulling state from worker %s" % workerid)
result.merge(worker.getstate())
logger.info("sending out merged projection")
return result
def reset(self):
"""
Initialize all workers for a new decomposition.
"""
for workerid, worker in iteritems(self.workers):
logger.info("resetting worker %s" % workerid)
worker.reset()
worker.requestjob()
self._jobsdone = 0
self._jobsreceived = 0
@Pyro4.oneway
@utils.synchronous('lock_update')
def jobdone(self, workerid):
"""
A worker has finished its job. Log this event and then asynchronously
transfer control back to the worker.
In this way, control flow basically oscillates between dispatcher.jobdone()
worker.requestjob().
"""
self._jobsdone += 1
logger.info("worker #%s finished job #%i" % (workerid, self._jobsdone))
worker = self.workers[workerid]
worker.requestjob() # tell the worker to ask for another job, asynchronously (one-way)
def jobsdone(self):
"""Wrap self._jobsdone, needed for remote access through proxies"""
return self._jobsdone
@Pyro4.oneway
def exit(self):
"""
Terminate all registered workers and then the dispatcher.
"""
for workerid, worker in iteritems(self.workers):
logger.info("terminating worker %s" % workerid)
worker.exit()
logger.info("terminating dispatcher")
os._exit(0) # exit the whole process (not just this thread ala sys.exit())
#endclass Dispatcher
def main():
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger.info("running %s" % " ".join(sys.argv))
program = os.path.basename(sys.argv[0])
# make sure we have enough cmd line parameters
if len(sys.argv) < 1:
print(globals()["__doc__"] % locals())
sys.exit(1)
if len(sys.argv) < 2:
maxsize = MAX_JOBS_QUEUE
else:
maxsize = int(sys.argv[1])
utils.pyro_daemon('gensim.lsi_dispatcher', Dispatcher(maxsize=maxsize))
logger.info("finished running %s" % program)
if __name__ == '__main__':
main()
| gpl-3.0 |
YOTOV-LIMITED/kuma | vendor/packages/logilab/common/sphinx_ext.py | 117 | 3329 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
from logilab.common.decorators import monkeypatch
from sphinx.ext import autodoc
class DocstringOnlyModuleDocumenter(autodoc.ModuleDocumenter):
objtype = 'docstring'
def format_signature(self):
pass
def add_directive_header(self, sig):
pass
def document_members(self, all_members=False):
pass
def resolve_name(self, modname, parents, path, base):
if modname is not None:
return modname, parents + [base]
return (path or '') + base, []
#autodoc.add_documenter(DocstringOnlyModuleDocumenter)
def setup(app):
app.add_autodocumenter(DocstringOnlyModuleDocumenter)
from sphinx.ext.autodoc import (ViewList, Options, AutodocReporter, nodes,
assemble_option_dict, nested_parse_with_titles)
@monkeypatch(autodoc.AutoDirective)
def run(self):
self.filename_set = set() # a set of dependent filenames
self.reporter = self.state.document.reporter
self.env = self.state.document.settings.env
self.warnings = []
self.result = ViewList()
# find out what documenter to call
objtype = self.name[4:]
doc_class = self._registry[objtype]
# process the options with the selected documenter's option_spec
self.genopt = Options(assemble_option_dict(
self.options.items(), doc_class.option_spec))
# generate the output
documenter = doc_class(self, self.arguments[0])
documenter.generate(more_content=self.content)
if not self.result:
return self.warnings
# record all filenames as dependencies -- this will at least
# partially make automatic invalidation possible
for fn in self.filename_set:
self.env.note_dependency(fn)
# use a custom reporter that correctly assigns lines to source
# filename/description and lineno
old_reporter = self.state.memo.reporter
self.state.memo.reporter = AutodocReporter(self.result,
self.state.memo.reporter)
if self.name in ('automodule', 'autodocstring'):
node = nodes.section()
# necessary so that the child nodes get the right source/line set
node.document = self.state.document
nested_parse_with_titles(self.state, self.result, node)
else:
node = nodes.paragraph()
node.document = self.state.document
self.state.nested_parse(self.result, 0, node)
self.state.memo.reporter = old_reporter
return self.warnings + node.children
| mpl-2.0 |
abhishek-ch/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/formtools/tests/wizard/wizardtests/tests.py | 116 | 16304 | from __future__ import unicode_literals
import os
from django import forms
from django.test import TestCase
from django.test.client import RequestFactory
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.formtools.wizard.views import CookieWizardView
from django.utils._os import upath
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = '__all__'
UserFormSet = forms.models.modelformset_factory(User, form=UserForm, extra=2)
class WizardTests(object):
urls = 'django.contrib.formtools.tests.wizard.wizardtests.urls'
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def test_initial_call(self):
response = self.client.get(self.wizard_url)
wizard = response.context['wizard']
self.assertEqual(response.status_code, 200)
self.assertEqual(wizard['steps'].current, 'form1')
self.assertEqual(wizard['steps'].step0, 0)
self.assertEqual(wizard['steps'].step1, 1)
self.assertEqual(wizard['steps'].last, 'form4')
self.assertEqual(wizard['steps'].prev, None)
self.assertEqual(wizard['steps'].next, 'form2')
self.assertEqual(wizard['steps'].count, 4)
def test_form_post_error(self):
response = self.client.post(self.wizard_url, self.wizard_step_1_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context['wizard']['form'].errors,
{'name': ['This field is required.'],
'user': ['This field is required.']})
def test_form_post_success(self):
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
wizard = response.context['wizard']
self.assertEqual(response.status_code, 200)
self.assertEqual(wizard['steps'].current, 'form2')
self.assertEqual(wizard['steps'].step0, 1)
self.assertEqual(wizard['steps'].prev, 'form1')
self.assertEqual(wizard['steps'].next, 'form3')
def test_form_stepback(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(self.wizard_url, {
'wizard_goto_step': response.context['wizard']['steps'].prev})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_template_context(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context.get('another_var', None), None)
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
self.assertEqual(response.context.get('another_var', None), True)
# ticket #19025: `form` should be included in context
form = response.context_data['wizard']['form']
self.assertEqual(response.context_data['form'], form)
def test_form_finish(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(upath(__file__), 'rb')
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
all_data = response.context['form_list']
with open(upath(__file__), 'rb') as f:
self.assertEqual(all_data[1]['file1'].read(), f.read())
all_data[1]['file1'].close()
del all_data[1]['file1']
self.assertEqual(all_data, [
{'name': 'Pony', 'thirsty': True, 'user': self.testuser},
{'address1': '123 Main St', 'address2': 'Djangoland'},
{'random_crap': 'blah blah'},
[{'random_crap': 'blah blah'},
{'random_crap': 'blah blah'}]])
def test_cleaned_data(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
with open(upath(__file__), 'rb') as post_file:
post_data['form2-file1'] = post_file
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
all_data = response.context['all_cleaned_data']
with open(upath(__file__), 'rb') as f:
self.assertEqual(all_data['file1'].read(), f.read())
all_data['file1'].close()
del all_data['file1']
self.assertEqual(all_data, {
'name': 'Pony', 'thirsty': True, 'user': self.testuser,
'address1': '123 Main St', 'address2': 'Djangoland',
'random_crap': 'blah blah', 'formset-form4': [
{'random_crap': 'blah blah'},
{'random_crap': 'blah blah'}]})
def test_manipulated_data(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
post_data['form2-file1'].close()
post_data['form2-file1'] = open(upath(__file__), 'rb')
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
self.client.cookies.pop('sessionid', None)
self.client.cookies.pop('wizard_cookie_contact_wizard', None)
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_refresh(self):
response = self.client.get(self.wizard_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
post_data['form2-file1'].close()
post_data['form2-file1'] = open(upath(__file__), 'rb')
response = self.client.post(self.wizard_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
response = self.client.post(self.wizard_url, self.wizard_step_data[2])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(self.wizard_url, self.wizard_step_data[0])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(self.wizard_url, self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
@skipIfCustomUser
class SessionWizardTests(WizardTests, TestCase):
wizard_url = '/wiz_session/'
wizard_step_1_data = {
'session_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'session_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'session_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form4',
}
)
@skipIfCustomUser
class CookieWizardTests(WizardTests, TestCase):
wizard_url = '/wiz_cookie/'
wizard_step_1_data = {
'cookie_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'cookie_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'cookie_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form4',
}
)
@skipIfCustomUser
class WizardTestKwargs(TestCase):
wizard_url = '/wiz_other_template/'
wizard_step_1_data = {
'cookie_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'cookie_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'cookie_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form4',
}
)
urls = 'django.contrib.formtools.tests.wizard.wizardtests.urls'
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def test_template(self):
templates = os.path.join(os.path.dirname(upath(__file__)), 'templates')
with self.settings(
TEMPLATE_DIRS=list(settings.TEMPLATE_DIRS) + [templates]):
response = self.client.get(self.wizard_url)
self.assertTemplateUsed(response, 'other_wizard_form.html')
class WizardTestGenericViewInterface(TestCase):
def test_get_context_data_inheritance(self):
class TestWizard(CookieWizardView):
"""
A subclass that implements ``get_context_data`` using the standard
protocol for generic views (accept only **kwargs).
See ticket #17148.
"""
def get_context_data(self, **kwargs):
context = super(TestWizard, self).get_context_data(**kwargs)
context['test_key'] = 'test_value'
return context
factory = RequestFactory()
view = TestWizard.as_view([forms.Form])
response = view(factory.get('/'))
self.assertEqual(response.context_data['test_key'], 'test_value')
def test_get_context_data_with_mixin(self):
class AnotherMixin(object):
def get_context_data(self, **kwargs):
context = super(AnotherMixin, self).get_context_data(**kwargs)
context['another_key'] = 'another_value'
return context
class TestWizard(AnotherMixin, CookieWizardView):
"""
A subclass that implements ``get_context_data`` using the standard
protocol for generic views (accept only **kwargs).
See ticket #17148.
"""
def get_context_data(self, **kwargs):
context = super(TestWizard, self).get_context_data(**kwargs)
context['test_key'] = 'test_value'
return context
factory = RequestFactory()
view = TestWizard.as_view([forms.Form])
response = view(factory.get('/'))
self.assertEqual(response.context_data['test_key'], 'test_value')
self.assertEqual(response.context_data['another_key'], 'another_value')
@skipIfCustomUser
class WizardFormKwargsOverrideTests(TestCase):
def setUp(self):
super(WizardFormKwargsOverrideTests, self).setUp()
self.rf = RequestFactory()
# Create two users so we can filter by is_staff when handing our
# wizard a queryset keyword argument.
self.normal_user = User.objects.create(username='test1', email='normal@example.com')
self.staff_user = User.objects.create(username='test2', email='staff@example.com', is_staff=True)
def test_instance_is_maintained(self):
self.assertEqual(2, User.objects.count())
queryset = User.objects.get(pk=self.staff_user.pk)
class InstanceOverrideWizard(CookieWizardView):
def get_form_kwargs(self, step):
return {'instance': queryset}
view = InstanceOverrideWizard.as_view([UserForm])
response = view(self.rf.get('/'))
form = response.context_data['wizard']['form']
self.assertNotEqual(form.instance.pk, None)
self.assertEqual(form.instance.pk, self.staff_user.pk)
self.assertEqual('staff@example.com', form.initial.get('email', None))
def test_queryset_is_maintained(self):
queryset = User.objects.filter(pk=self.staff_user.pk)
class QuerySetOverrideWizard(CookieWizardView):
def get_form_kwargs(self, step):
return {'queryset': queryset}
view = QuerySetOverrideWizard.as_view([UserFormSet])
response = view(self.rf.get('/'))
formset = response.context_data['wizard']['form']
self.assertNotEqual(formset.queryset, None)
self.assertEqual(formset.initial_form_count(), 1)
self.assertEqual(['staff@example.com'],
list(formset.queryset.values_list('email', flat=True)))
| apache-2.0 |
fishroot/nemoa | nemoa/dataset/commons/labels/__init__.py | 1 | 2018 | # -*- coding: utf-8 -*-
__author__ = 'Patrick Michl'
__email__ = 'frootlab@gmail.com'
__license__ = 'GPLv3'
import nemoa
import numpy
import importlib
def convert(list, input, output = None, filter = False):
generic_types = ['number', 'string', 'float']
if isinstance(list, (numpy.ndarray)):
list = list.tolist()
input_dtype = 'nparray'
else: input_dtype = 'list'
# 'input'
if input in generic_types:
input_class = 'generic'
input_format = input
elif ':' in input:
input_class = input.lower().split(':')[0].strip()
input_format = input.lower().split(':')[1].strip()
else: raise Warning("""could not convert list:
unknown input format '%s'.""" % input)
# 'output'
if output in generic_types:
output_class = 'generic'
output_format = output
elif not output:
output_class = input_class
output_format = None
elif ':' in input:
output_class = output.lower().split(':')[0].strip()
output_format = output.lower().split(':')[1].strip()
else: raise Warning("""could not convert list:
unknown output format '%s'.""" % output)
# 'input' vs 'output'
if input_class != output_class:
raise Warning("'%s' can not be converted to '%s'"
% (input_class, output_class))
# trivial cases
if input_class == 'generic' or input_format == output_format:
if input_dtype == 'nparray':
return numpy.asarray(list), numpy.asarray([])
else: return list, []
# import annotation module
module_name = input_class.lower()
module = importlib.import_module('nemoa.dataset.commons.labels.'
+ module_name)
converter = getattr(module, module_name)()
output_list, output_lost = converter.convert_list(
list, input_format, output_format, filter)
if input_dtype == 'nparray':
return numpy.asarray(output_list), numpy.asarray(output_lost)
return output_list, output_lost
| gpl-3.0 |
nathan-hoad/python-icap | icap/parsing.py | 2 | 10383 | import gzip
from collections import namedtuple
from io import BytesIO, SEEK_END
from werkzeug import cached_property
from .utils import parse_encapsulated_field, convert_offsets_to_sizes
from .errors import (InvalidEncapsulatedHeadersError, MalformedRequestError,
abort)
__all__ = [
'ChunkedMessageParser',
'ICAPRequestParser',
'HTTPMessageParser',
]
# who could resist a class name like this?
BodyPart = namedtuple('BodyPart', 'content header')
class ParseState(object):
empty = 1
started = 2
headers_complete = 3
body_complete = 4
class ChunkParsingError(Exception):
pass
class ChunkedMessageParser(object):
def __init__(self):
from .models import HeadersDict
self.sline = None
self.headers = HeadersDict()
self.state = ParseState.empty
self.body = BytesIO()
self.chunks = []
def started(self, set=False):
if set:
self.state = ParseState.started
return self.state != ParseState.empty
def headers_complete(self, set=False):
if set:
self.state = ParseState.headers_complete
self.on_headers_complete()
return self.state > ParseState.started
def on_headers_complete(self):
pass
def on_complete(self):
pass
def complete(self, set=False):
if set:
self.state = ParseState.body_complete
self.on_complete()
return self.state == ParseState.body_complete
def feed_line(self, line):
if isinstance(line, bytes):
line = line.decode('utf8')
# FIXME: non-crlf-endings
if not line.endswith('\r\n'):
return False
if not self.started():
self.handle_status_line(line)
elif not self.headers_complete():
self.handle_header(line)
return True
def feed_body(self, data):
self.body.write(data)
self.body.seek(0)
try:
while not self.complete():
self.attempt_body_parse()
except ChunkParsingError:
pass
finally:
self.body.seek(0, SEEK_END)
@classmethod
def from_bytes(cls, bytes):
self = cls()
stream = BytesIO(bytes)
while not self.headers_complete():
line = stream.readline()
if not self.feed_line(line):
raise MalformedRequestError('Line not valid: %r' % line)
s = stream.read()
if s:
self.feed_body(s)
else:
self.complete(True)
assert self.complete()
return self
def attempt_body_parse(self):
raise NotImplementedError()
def handle_status_line(self, sline):
assert not self.started()
self.started(True)
self.sline = parse_start_line(sline.strip())
def handle_header(self, header):
# FIXME: non-crlf-endings
if not header.replace('\r\n', ''):
self.headers_complete(True)
return
header = header.replace('\r\n', '')
# multiline headers
if header.startswith(('\t', ' ')):
k = list(self.headers.keys())[-1]
from collections import OrderedDict
raw_v = OrderedDict.__getitem__(self.headers, k)
k, v = raw_v[-1]
# section 4.2 says that we MAY reduce whitespace down to a single
# character, so let's do it.
v = ''.join((v, header.lstrip()))
raw_v[-1] = k, v
else:
k, v = header.split(':', 1)
k = k.rstrip()
v = v.lstrip()
self.headers[k] = v
@cached_property
def is_request(self):
from .models import RequestLine
return isinstance(self.sline, RequestLine)
@cached_property
def is_response(self):
return not self.is_request
class ICAPRequestParser(ChunkedMessageParser):
def on_headers_complete(self):
self.encapsulated_parts = list(
convert_offsets_to_sizes(self.encapsulated_header).items())
parts = self.encapsulated_header
missing_headers = ((self.is_reqmod and 'req-hdr' not in parts) or
(self.is_respmod and 'res-hdr' not in parts))
if missing_headers:
abort(418)
self.request_parser = HTTPMessageParser()
self.response_parser = HTTPMessageParser()
def attempt_body_parse(self):
name, size = self.encapsulated_parts[0]
data = self.body.read(size)
if size > 0 and len(data) != size:
raise ChunkParsingError
if size == -1 and not data:
raise ChunkParsingError
if size == 0:
assert name == 'null-body'
assert not data
if name in ('req-hdr', 'req-body'):
parser = self.request_parser
elif name in ('res-hdr', 'res-body'):
parser = self.response_parser
if name in ('req-hdr', 'res-hdr'):
self.encapsulated_parts.pop(0)
buffer = BytesIO(data)
for line in buffer:
parser.feed_line(line)
assert parser.headers_complete()
elif name in ('req-body', 'res-body'):
self.body.seek(0)
self.body.truncate()
assert parser.headers_complete()
parser.feed_body(data)
if parser.complete():
self.encapsulated_parts.pop(0)
else:
raise ChunkParsingError
else:
if self.is_reqmod:
parser = self.request_parser
else:
parser = self.response_parser
assert parser.headers_complete()
assert name == 'null-body'
self.request_parser.complete(True)
self.response_parser.complete(True)
self.body = BytesIO(self.body.read())
def complete(self, set=False):
if set:
super().complete(set)
return super().complete() or (self.headers_complete() and (
(self.is_reqmod and self.request_parser.complete()) or
(self.is_respmod and self.response_parser.complete()) or
(self.is_options)
))
@classmethod
def from_bytes(cls, bytes):
self = super().from_bytes(bytes)
return self.to_icap()
def to_icap(self):
from .models import ICAPRequest
return ICAPRequest.from_parser(self)
def handle_status_line(self, sline):
super(ICAPRequestParser, self).handle_status_line(sline)
if self.sline.method not in {'OPTIONS', 'REQMOD', 'RESPMOD'}:
abort(501)
@cached_property
def encapsulated_header(self):
try:
e = self.headers['encapsulated']
except KeyError:
if self.is_request and self.is_options:
e = 'null-body=0'
else:
raise InvalidEncapsulatedHeadersError(
'%s object is missing encapsulated header' %
(self.__class__.__name__))
parsed = parse_encapsulated_field(e)
return parsed
@cached_property
def is_reqmod(self):
return self.sline.method == 'REQMOD'
@cached_property
def is_respmod(self):
return self.sline.method == 'RESPMOD'
@cached_property
def is_options(self):
return self.sline.method == 'OPTIONS'
class HTTPMessageParser(ChunkedMessageParser):
payload = b''
def attempt_body_parse(self):
while True:
chunk = self.attempt_parse_chunk()
if chunk is None:
assert self.complete()
break
self.chunks.append(chunk)
@cached_property
def is_gzipped(self):
return 'gzip' in self.headers.get('Content-Encoding', '')
def on_complete(self):
payload = b''.join(b.content for b in self.chunks)
if self.is_gzipped:
# FIXME: this should be done in a thread
payload = gzip.decompress(payload)
self.payload = payload
def attempt_parse_chunk(self):
line = self.body.readline()
# FIXME: non-crlf-endings
if not line.endswith(b'\r\n'):
raise ChunkParsingError
else:
try:
size, header = line.split(b';', 1)
except ValueError:
size = line
header = b''
size = int(size, 16)
if size:
# FIXME: non-crlf-endings
data = self.body.read(size+2) # +2 for CRLF
if len(data) != size+2:
raise ChunkParsingError
# reset the stream so we don't create the same chunk over and over
self.body = BytesIO(self.body.read())
# FIXME: non-crlf-endings
chunk = BodyPart(data[:-2], header.strip())
return chunk
else:
# end of stream, make sure we have trailing newline
s = self.body.readline()
# FIXME: non-crlf-endings
if s != b'\r\n':
raise ChunkParsingError
self.complete(True)
@classmethod
def from_bytes(cls, bytes):
self = super().from_bytes(bytes)
return self.to_http()
def to_http(self):
from .models import HTTPRequest, HTTPResponse
if self.is_request:
cls = HTTPRequest
else:
cls = HTTPResponse
return cls.from_parser(self)
def parse_start_line(sline):
"""Parse the first line from an HTTP/ICAP message and return an instance of
StatusLine or RequestLine.
Will raise MalformedRequestError if there was an error during parsing.
"""
from .models import StatusLine, RequestLine
try:
method, uri, version = parts = sline.split(' ', 2)
except ValueError:
raise MalformedRequestError('Malformed start line: %r' % sline)
if method.upper().startswith(('HTTP', 'ICAP')):
version, code, reason = parts
try:
return StatusLine(version.upper(), code, reason)
except ValueError:
raise MalformedRequestError('Malformed status line: %r' % sline)
else:
return RequestLine(method.upper(), uri, version.upper())
| bsd-3-clause |
burzillibus/RobHome | venv/lib/python2.7/site-packages/chardet/__init__.py | 1778 | 1295 | ######################## BEGIN LICENSE BLOCK ########################
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
__version__ = "2.3.0"
from sys import version_info
def detect(aBuf):
if ((version_info < (3, 0) and isinstance(aBuf, unicode)) or
(version_info >= (3, 0) and not isinstance(aBuf, bytes))):
raise ValueError('Expected a bytes object, not a unicode object')
from . import universaldetector
u = universaldetector.UniversalDetector()
u.reset()
u.feed(aBuf)
u.close()
return u.result
| mit |
ngokevin/cyder | .virtualenv/lib/python2.6/site-packages/pip-1.0.2-py2.6.egg/pip/req.py | 7 | 64177 | import sys
import os
import shutil
import re
import zipfile
import pkg_resources
import tempfile
from pip.locations import bin_py, running_under_virtualenv
from pip.exceptions import InstallationError, UninstallationError
from pip.vcs import vcs
from pip.log import logger
from pip.util import display_path, rmtree
from pip.util import ask, backup_dir
from pip.util import is_installable_dir, is_local, dist_is_local
from pip.util import renames, normalize_path, egg_link_path
from pip.util import make_path_relative
from pip import call_subprocess
from pip.backwardcompat import (any, copytree, urlparse, urllib,
ConfigParser, string_types, HTTPError,
FeedParser, get_python_version,
b)
from pip.index import Link
from pip.locations import build_prefix
from pip.download import (get_file_content, is_url, url_to_path,
path_to_url, is_archive_file,
unpack_vcs_link, is_vcs_url, is_file_url,
unpack_file_url, unpack_http_url)
PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
url=None, update=True):
if isinstance(req, string_types):
req = pkg_resources.Requirement.parse(req)
self.req = req
self.comes_from = comes_from
self.source_dir = source_dir
self.editable = editable
self.url = url
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
self._temp_build_dir = None
self._is_bundle = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None):
name, url = parse_editable(editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
return cls(name, comes_from, source_dir=source_dir, editable=True, url=url)
@classmethod
def from_line(cls, name, comes_from=None):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
url = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
if is_url(name):
link = Link(name)
elif os.path.isdir(path) and (os.path.sep in name or name.startswith('.')):
if not is_installable_dir(path):
raise InstallationError("Directory %r is not installable. File 'setup.py' not found.", name)
link = Link(path_to_url(name))
elif is_archive_file(path):
if not os.path.isfile(path):
logger.warn('Requirement %r looks like a filename, but the file does not exist', name)
link = Link(path_to_url(name))
# If the line has an egg= definition, but isn't editable, pull the requirement out.
# Otherwise, assume the name is the req for the non URL/path/archive case.
if link and req is None:
url = link.url_fragment
req = link.egg_fragment
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', url):
url = path_to_url(os.path.normpath(os.path.abspath(link.path)))
else:
req = name
return cls(req, comes_from, url=url)
def __str__(self):
if self.req:
s = str(self.req)
if self.url:
s += ' from %s' % self.url
else:
s = self.url
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir, unpack=True):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-')
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr need this)
if not os.path.exists(build_dir):
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def correct_build_location(self):
"""If the build location was a temporary directory, this will move it
to a new more permanent location"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
old_location = self._temp_build_dir
new_build_dir = self._ideal_build_dir
del self._ideal_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
new_location = os.path.join(new_build_dir, name)
if not os.path.exists(new_build_dir):
logger.debug('Creating directory %s' % new_build_dir)
_make_build_dir(new_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug('Moving package %s from %s to new location %s'
% (self, display_path(old_location), display_path(new_location)))
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return self.req.project_name
@property
def url_name(self):
if self.req is None:
return None
return urllib.quote(self.req.unsafe_name)
@property
def setup_py(self):
return os.path.join(self.source_dir, 'setup.py')
def run_egg_info(self, force_root_egg_info=False):
assert self.source_dir
if self.name:
logger.notify('Running setup.py egg_info for package %s' % self.name)
else:
logger.notify('Running setup.py egg_info for package from %s' % self.url)
logger.indent += 2
try:
script = self._run_setup_py
script = script.replace('__SETUP_PY__', repr(self.setup_py))
script = script.replace('__PKG_NAME__', repr(self.name))
# We can't put the .egg-info files at the root, because then the source code will be mistaken
# for an installed egg, causing problems
if self.editable or force_root_egg_info:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info')
if not os.path.exists(egg_info_dir):
os.makedirs(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
call_subprocess(
[sys.executable, '-c', script, 'egg_info'] + egg_base_option,
cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False,
command_level=logger.VERBOSE_DEBUG,
command_desc='python setup.py egg_info')
finally:
logger.indent -= 2
if not self.req:
self.req = pkg_resources.Requirement.parse(
"%(Name)s==%(Version)s" % self.pkg_info())
self.correct_build_location()
## FIXME: This is a lame hack, entirely for PasteScript which has
## a self-provided entry point that causes this awkwardness
_run_setup_py = """
__file__ = __SETUP_PY__
from setuptools.command import egg_info
def replacement_run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in egg_info.iter_entry_points('egg_info.writers'):
# require=False is the change we're making:
writer = ep.load(require=False)
if writer:
writer(self, ep.name, egg_info.os.path.join(self.egg_info,ep.name))
self.find_sources()
egg_info.egg_info.run = replacement_run
exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))
"""
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
fp = open(filename, 'r')
data = fp.read()
fp.close()
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.source_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
for dir in dirs:
# Don't search in anything that looks like a virtualenv environment
if (os.path.exists(os.path.join(root, dir, 'bin', 'python'))
or os.path.exists(os.path.join(root, dir, 'Scripts', 'Python.exe'))):
dirs.remove(dir)
# Also don't search through tests
if dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError('No files/directores in %s (from %s)' % (base, filename))
assert filenames, "No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This can
# easily be the case if there is a dist folder which contains an
# extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(key=lambda x: x.count(os.path.sep) +
(os.path.altsep and
x.count(os.path.altsep) or 0))
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def egg_info_lines(self, filename):
data = self.egg_info_data(filename)
if not data:
return []
result = []
for line in data.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
result.append(line)
return result
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warn('No PKG-INFO file found in %s' % display_path(self.egg_info_path('PKG-INFO')))
p.feed(data or '')
return p.close()
@property
def dependency_links(self):
return self.egg_info_lines('dependency_links.txt')
_requirements_section_re = re.compile(r'\[(.*?)\]')
def requirements(self, extras=()):
in_extra = None
for line in self.egg_info_lines('requires.txt'):
match = self._requirements_section_re.match(line)
if match:
in_extra = match.group(1)
continue
if in_extra and in_extra not in extras:
# Skip requirement for an extra we aren't requiring
continue
yield line
@property
def absolute_versions(self):
for qualifier, version in self.req.specs:
if qualifier == '==':
yield version
@property
def installed_version(self):
return self.pkg_info()['version']
def assert_source_matches_version(self):
assert self.source_dir
if self.comes_from is None:
# We don't check the versions of things explicitly installed.
# This makes, e.g., "pip Package==dev" possible
return
version = self.installed_version
if version not in self.req:
logger.fatal(
'Source in %s has the version %s, which does not match the requirement %s'
% (display_path(self.source_dir), version, self))
raise InstallationError(
'Source in %s has version %s that conflicts with %s'
% (display_path(self.source_dir), version, self))
else:
logger.debug('Source in %s has version %s, which satisfies requirement %s'
% (display_path(self.source_dir), version, self))
def update_editable(self, obtain=True):
if not self.url:
logger.info("Cannot update repository at %s; repository location is unknown" % self.source_dir)
return
assert self.editable
assert self.source_dir
if self.url.startswith('file:'):
# Static paths don't get updated
return
assert '+' in self.url, "bad url: %r" % self.url
if not self.update:
return
vc_type, url = self.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.url, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError("Cannot uninstall requirement %s, not installed" % (self.name,))
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
pip_egg_info_path = os.path.join(dist.location,
dist.egg_name()) + '.egg-info'
# workaround for http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367
debian_egg_info_path = pip_egg_info_path.replace(
'-py%s' % pkg_resources.PY_MAJOR, '')
easy_install_egg = dist.egg_name() + '.egg'
develop_egg_link = egg_link_path(dist)
pip_egg_info_exists = os.path.exists(pip_egg_info_path)
debian_egg_info_exists = os.path.exists(debian_egg_info_path)
if pip_egg_info_exists or debian_egg_info_exists:
# package installed by pip
if pip_egg_info_exists:
egg_info_path = pip_egg_info_path
else:
egg_info_path = debian_egg_info_path
paths_to_remove.add(egg_info_path)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata('installed-files.txt').splitlines():
path = os.path.normpath(os.path.join(egg_info_path, installed_file))
paths_to_remove.add(path)
if dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif dist.location.endswith(easy_install_egg):
# package installed by easy_install
paths_to_remove.add(dist.location)
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif os.path.isfile(develop_egg_link):
# develop egg
fh = open(develop_egg_link, 'r')
link_pointer = os.path.normcase(fh.readline().strip())
fh.close()
assert (link_pointer == dist.location), 'Egg-link %s does not match installed location of %s (at %s)' % (link_pointer, self.name, dist.location)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
paths_to_remove.add(os.path.join(bin_py, script))
if sys.platform == 'win32':
paths_to_remove.add(os.path.join(bin_py, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = ConfigParser.SafeConfigParser()
config.readfp(FakeFile(dist.get_metadata_lines('entry_points.txt')))
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
paths_to_remove.add(os.path.join(bin_py, name))
if sys.platform == 'win32':
paths_to_remove.add(os.path.join(bin_py, name) + '.exe')
paths_to_remove.add(os.path.join(bin_py, name) + '.exe.manifest')
paths_to_remove.add(os.path.join(bin_py, name) + '-script.py')
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error("Can't rollback %s, nothing uninstalled."
% (self.project_name,))
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
else:
logger.error("Can't commit %s, nothing uninstalled."
% (self.project_name,))
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.installed_version)
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask('The file %s exists. (i)gnore, (w)ipe, (b)ackup '
% display_path(archive_path), ('i', 'w', 'b'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warn('Backing up %s to %s'
% (display_path(archive_path), display_path(dest_file)))
shutil.move(archive_path, dest_file)
if create_archive:
zip = zipfile.ZipFile(archive_path, 'w', zipfile.ZIP_DEFLATED)
dir = os.path.normcase(os.path.abspath(self.source_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.indent -= 2
logger.notify('Saved %s' % display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix+os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix))
name = name[len(prefix)+1:]
name = name.replace(os.path.sep, '/')
return name
def install(self, install_options, global_options=()):
if self.editable:
self.install_editable(install_options, global_options)
return
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = [
sys.executable, '-c',
"import setuptools;__file__=%r;"\
"exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py] +\
list(global_options) + [
'install',
'--single-version-externally-managed',
'--record', record_filename]
if running_under_virtualenv():
## FIXME: I'm not sure if this is a reasonable location; probably not
## but we can't put it in the default location, as that is a virtualenv symlink that isn't writable
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
'python' + get_python_version())]
logger.notify('Running setup.py install for %s' % self.name)
logger.indent += 2
try:
call_subprocess(install_args + install_options,
cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False)
finally:
logger.indent -= 2
if not os.path.exists(record_filename):
logger.notify('Record file %s not found' % record_filename)
return
self.install_succeeded = True
f = open(record_filename)
for line in f:
line = line.strip()
if line.endswith('.egg-info'):
egg_info_dir = line
break
else:
logger.warn('Could not find .egg-info directory in install record for %s' % self)
## FIXME: put the record somewhere
## FIXME: should this be an error?
return
f.close()
new_lines = []
f = open(record_filename)
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(make_path_relative(filename, egg_info_dir))
f.close()
f = open(os.path.join(egg_info_dir, 'installed-files.txt'), 'w')
f.write('\n'.join(new_lines)+'\n')
f.close()
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
os.rmdir(temp_location)
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.is_bundle or os.path.exists(self.delete_marker_filename):
logger.info('Removing source in %s' % self.source_dir)
if self.source_dir:
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options, global_options=()):
logger.notify('Running setup.py develop for %s' % self.name)
logger.indent += 2
try:
## FIXME: should we do --install-headers here too?
call_subprocess(
[sys.executable, '-c',
"import setuptools; __file__=%r; exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py]
+ list(global_options) + ['develop', '--no-deps'] + list(install_options),
cwd=self.source_dir, filter_stdout=self._filter_install,
show_stdout=False)
finally:
logger.indent -= 2
self.install_succeeded = True
def _filter_install(self, line):
level = logger.NOTIFY
for regex in [r'^running .*', r'^writing .*', '^creating .*', '^[Cc]opying .*',
r'^reading .*', r"^removing .*\.egg-info' \(and everything under it\)$",
r'^byte-compiling ',
# Not sure what this warning is, but it seems harmless:
r"^warning: manifest_maker: standard file '-c' not found$"]:
if re.search(regex, line.strip()):
level = logger.INFO
break
return (level, line)
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately."""
if self.req is None:
return False
try:
self.satisfied_by = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
self.conflicts_with = pkg_resources.get_distribution(self.req.project_name)
return True
@property
def is_bundle(self):
if self._is_bundle is not None:
return self._is_bundle
base = self._temp_build_dir
if not base:
## FIXME: this doesn't seem right:
return False
self._is_bundle = (os.path.exists(os.path.join(base, 'pip-manifest.txt'))
or os.path.exists(os.path.join(base, 'pyinstall-manifest.txt')))
return self._is_bundle
def bundle_requirements(self):
for dest_dir in self._bundle_editable_dirs:
package = os.path.basename(dest_dir)
## FIXME: svnism:
for vcs_backend in vcs.backends:
url = rev = None
vcs_bundle_file = os.path.join(
dest_dir, vcs_backend.bundle_file)
if os.path.exists(vcs_bundle_file):
vc_type = vcs_backend.name
fp = open(vcs_bundle_file)
content = fp.read()
fp.close()
url, rev = vcs_backend().parse_vcs_bundle_file(content)
break
if url:
url = '%s+%s@%s' % (vc_type, url, rev)
else:
url = None
yield InstallRequirement(
package, self, editable=True, url=url,
update=False, source_dir=dest_dir)
for dest_dir in self._bundle_build_dirs:
package = os.path.basename(dest_dir)
yield InstallRequirement(
package, self,
source_dir=dest_dir)
def move_bundle_files(self, dest_build_dir, dest_src_dir):
base = self._temp_build_dir
assert base
src_dir = os.path.join(base, 'src')
build_dir = os.path.join(base, 'build')
bundle_build_dirs = []
bundle_editable_dirs = []
for source_dir, dest_dir, dir_collection in [
(src_dir, dest_src_dir, bundle_editable_dirs),
(build_dir, dest_build_dir, bundle_build_dirs)]:
if os.path.exists(source_dir):
for dirname in os.listdir(source_dir):
dest = os.path.join(dest_dir, dirname)
dir_collection.append(dest)
if os.path.exists(dest):
logger.warn('The directory %s (containing package %s) already exists; cannot move source from bundle %s'
% (dest, dirname, self))
continue
if not os.path.exists(dest_dir):
logger.info('Creating directory %s' % dest_dir)
os.makedirs(dest_dir)
shutil.move(os.path.join(source_dir, dirname), dest)
if not os.listdir(source_dir):
os.rmdir(source_dir)
self._temp_build_dir = None
self._bundle_build_dirs = bundle_build_dirs
self._bundle_editable_dirs = bundle_editable_dirs
@property
def delete_marker_filename(self):
assert self.source_dir
return os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
values_list = []
for key in self._keys:
values_list.append(self._dict[key])
return values_list
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = [ '%s: %s' % (repr(k), repr(self[k])) for k in self.keys() ]
return 'Requirements({%s})' % ', '.join(values)
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, download_cache=None,
upgrade=False, ignore_installed=False,
ignore_dependencies=False):
self.build_dir = build_dir
self.src_dir = src_dir
self.download_dir = download_dir
self.download_cache = download_cache
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def add_requirement(self, install_req):
name = install_req.name
if not name:
self.unnamed_requirements.append(install_req)
else:
if self.has_requirement(name):
raise InstallationError(
'Double requirement given: %s (aready in %s, name=%r)'
% (install_req, self.get_requirement(name), name))
self.requirements[name] = install_req
## FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
def has_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements or name in self.requirement_aliases:
return True
return False
@property
def has_requirements(self):
return list(self.requirements.values()) or self.unnamed_requirements
@property
def has_editables(self):
if any(req.editable for req in self.requirements.values()):
return True
if any(req.editable for req in self.unnamed_requirements):
return True
return False
@property
def is_download(self):
if self.download_dir:
self.download_dir = os.path.expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.fatal('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def locate_files(self):
## FIXME: duplicates code from install_files; relevant code should
## probably be factored out into a separate method
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install_needed = True
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
req_to_install.conflicts_with = req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install_needed = False
if req_to_install.satisfied_by:
logger.notify('Requirement already satisfied '
'(use --upgrade to upgrade): %s'
% req_to_install)
if req_to_install.editable:
if req_to_install.source_dir is None:
req_to_install.source_dir = req_to_install.build_location(self.src_dir)
elif install_needed:
req_to_install.source_dir = req_to_install.build_location(self.build_dir, not self.is_download)
if req_to_install.source_dir is not None and not os.path.isdir(req_to_install.source_dir):
raise InstallationError('Could not install requirement %s '
'because source folder %s does not exist '
'(perhaps --no-download was used without first running '
'an equivalent install with --no-install?)'
% (req_to_install, req_to_install.source_dir))
def prepare_files(self, finder, force_root_egg_info=False, bundle=False):
"""Prepare process. Create temp directories, download and/or unpack files."""
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install = True
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
req_to_install.conflicts_with = req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install = False
if req_to_install.satisfied_by:
logger.notify('Requirement already satisfied '
'(use --upgrade to upgrade): %s'
% req_to_install)
if req_to_install.editable:
logger.notify('Obtaining %s' % req_to_install)
elif install:
if req_to_install.url and req_to_install.url.lower().startswith('file:'):
logger.notify('Unpacking %s' % display_path(url_to_path(req_to_install.url)))
else:
logger.notify('Downloading/unpacking %s' % req_to_install)
logger.indent += 2
try:
is_bundle = False
if req_to_install.editable:
if req_to_install.source_dir is None:
location = req_to_install.build_location(self.src_dir)
req_to_install.source_dir = location
else:
location = req_to_install.source_dir
if not os.path.exists(self.build_dir):
_make_build_dir(self.build_dir)
req_to_install.update_editable(not self.is_download)
if self.is_download:
req_to_install.run_egg_info()
req_to_install.archive(self.download_dir)
else:
req_to_install.run_egg_info()
elif install:
##@@ if filesystem packages are not marked
##editable in a req, a non deterministic error
##occurs when the script attempts to unpack the
##build directory
location = req_to_install.build_location(self.build_dir, not self.is_download)
## FIXME: is the existance of the checkout good enough to use it? I don't think so.
unpack = True
if not os.path.exists(os.path.join(location, 'setup.py')):
## FIXME: this won't upgrade when there's an existing package unpacked in `location`
if req_to_install.url is None:
url = finder.find_requirement(req_to_install, upgrade=self.upgrade)
else:
## FIXME: should req_to_install.url already be a link?
url = Link(req_to_install.url)
assert url
if url:
try:
self.unpack_url(url, location, self.is_download)
except HTTPError:
e = sys.exc_info()[1]
logger.fatal('Could not install requirement %s because of error %s'
% (req_to_install, e))
raise InstallationError(
'Could not install requirement %s because of HTTP error %s for URL %s'
% (req_to_install, e, url))
else:
unpack = False
if unpack:
is_bundle = req_to_install.is_bundle
url = None
if is_bundle:
req_to_install.move_bundle_files(self.build_dir, self.src_dir)
for subreq in req_to_install.bundle_requirements():
reqs.append(subreq)
self.add_requirement(subreq)
elif self.is_download:
req_to_install.source_dir = location
if url and url.scheme in vcs.all_schemes:
req_to_install.run_egg_info()
req_to_install.archive(self.download_dir)
else:
req_to_install.source_dir = location
req_to_install.run_egg_info()
if force_root_egg_info:
# We need to run this to make sure that the .egg-info/
# directory is created for packing in the bundle
req_to_install.run_egg_info(force_root_egg_info=True)
req_to_install.assert_source_matches_version()
#@@ sketchy way of identifying packages not grabbed from an index
if bundle and req_to_install.url:
self.copy_to_build_dir(req_to_install)
install = False
# req_to_install.req is only avail after unpack for URL pkgs
# repeat check_if_exists to uninstall-on-upgrade (#14)
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
req_to_install.conflicts_with = req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install = False
if not is_bundle and not self.is_download:
## FIXME: shouldn't be globally added:
finder.add_dependency_links(req_to_install.dependency_links)
## FIXME: add extras in here:
if not self.ignore_dependencies:
for req in req_to_install.requirements():
try:
name = pkg_resources.Requirement.parse(req).project_name
except ValueError:
e = sys.exc_info()[1]
## FIXME: proper warning
logger.error('Invalid requirement: %r (%s) in requirement %s' % (req, e, req_to_install))
continue
if self.has_requirement(name):
## FIXME: check for conflict
continue
subreq = InstallRequirement(req, req_to_install)
reqs.append(subreq)
self.add_requirement(subreq)
if req_to_install.name not in self.requirements:
self.requirements[req_to_install.name] = req_to_install
else:
self.reqs_to_cleanup.append(req_to_install)
if install:
self.successfully_downloaded.append(req_to_install)
if bundle and (req_to_install.url and req_to_install.url.startswith('file:///')):
self.copy_to_build_dir(req_to_install)
finally:
logger.indent -= 2
def cleanup_files(self, bundle=False):
"""Clean up files, remove builds."""
logger.notify('Cleaning up...')
logger.indent += 2
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
remove_dir = []
if self._pip_has_created_build_dir():
remove_dir.append(self.build_dir)
# The source dir of a bundle can always be removed.
if bundle:
remove_dir.append(self.src_dir)
for dir in remove_dir:
if os.path.exists(dir):
logger.info('Removing temporary dir %s...' % dir)
rmtree(dir)
logger.indent -= 2
def _pip_has_created_build_dir(self):
return (self.build_dir == build_prefix and
os.path.exists(os.path.join(self.build_dir, PIP_DELETE_MARKER_FILENAME)))
def copy_to_build_dir(self, req_to_install):
target_dir = req_to_install.editable and self.src_dir or self.build_dir
logger.info("Copying %s to %s" % (req_to_install.name, target_dir))
dest = os.path.join(target_dir, req_to_install.name)
copytree(req_to_install.source_dir, dest)
call_subprocess(["python", "%s/setup.py" % dest, "clean"], cwd=dest,
command_desc='python setup.py clean')
def unpack_url(self, link, location, only_download=False):
if only_download:
location = self.download_dir
if is_vcs_url(link):
return unpack_vcs_link(link, location, only_download)
elif is_file_url(link):
return unpack_file_url(link, location)
else:
if self.download_cache:
self.download_cache = os.path.expanduser(self.download_cache)
return unpack_http_url(link, location, self.download_cache, only_download)
def install(self, install_options, global_options=()):
"""Install everything in this set (after having downloaded and unpacked the packages)"""
to_install = [r for r in self.requirements.values()
if self.upgrade or not r.satisfied_by]
if to_install:
logger.notify('Installing collected packages: %s' % ', '.join([req.name for req in to_install]))
logger.indent += 2
try:
for requirement in to_install:
if requirement.conflicts_with:
logger.notify('Found existing installation: %s'
% requirement.conflicts_with)
logger.indent += 2
try:
requirement.uninstall(auto_confirm=True)
finally:
logger.indent -= 2
try:
requirement.install(install_options, global_options)
except:
# if install did not succeed, rollback previous uninstall
if requirement.conflicts_with and not requirement.install_succeeded:
requirement.rollback_uninstall()
raise
else:
if requirement.conflicts_with and requirement.install_succeeded:
requirement.commit_uninstall()
requirement.remove_temporary_source()
finally:
logger.indent -= 2
self.successfully_installed = to_install
def create_bundle(self, bundle_filename):
## FIXME: can't decide which is better; zip is easier to read
## random files from, but tar.bz2 is smaller and not as lame a
## format.
## FIXME: this file should really include a manifest of the
## packages, maybe some other metadata files. It would make
## it easier to detect as well.
zip = zipfile.ZipFile(bundle_filename, 'w', zipfile.ZIP_DEFLATED)
vcs_dirs = []
for dir, basename in (self.build_dir, 'build'), (self.src_dir, 'src'):
dir = os.path.normcase(os.path.abspath(dir))
for dirpath, dirnames, filenames in os.walk(dir):
for backend in vcs.backends:
vcs_backend = backend()
vcs_url = vcs_rev = None
if vcs_backend.dirname in dirnames:
for vcs_dir in vcs_dirs:
if dirpath.startswith(vcs_dir):
# vcs bundle file already in parent directory
break
else:
vcs_url, vcs_rev = vcs_backend.get_info(
os.path.join(dir, dirpath))
vcs_dirs.append(dirpath)
vcs_bundle_file = vcs_backend.bundle_file
vcs_guide = vcs_backend.guide % {'url': vcs_url,
'rev': vcs_rev}
dirnames.remove(vcs_backend.dirname)
break
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zip.writestr(basename + '/' + name + '/', '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, basename + '/' + name)
if vcs_url:
name = os.path.join(dirpath, vcs_bundle_file)
name = self._clean_zip_name(name, dir)
zip.writestr(basename + '/' + name, vcs_guide)
zip.writestr('pip-manifest.txt', self.bundle_requirements())
zip.close()
BUNDLE_HEADER = '''\
# This is a pip bundle file, that contains many source packages
# that can be installed as a group. You can install this like:
# pip this_file.zip
# The rest of the file contains a list of all the packages included:
'''
def bundle_requirements(self):
parts = [self.BUNDLE_HEADER]
for req in [req for req in self.requirements.values()
if not req.comes_from]:
parts.append('%s==%s\n' % (req.name, req.installed_version))
parts.append('# These packages were installed to satisfy the above requirements:\n')
for req in [req for req in self.requirements.values()
if req.comes_from]:
parts.append('%s==%s\n' % (req.name, req.installed_version))
## FIXME: should we do something with self.unnamed_requirements?
return ''.join(parts)
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix+os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix))
name = name[len(prefix)+1:]
name = name.replace(os.path.sep, '/')
return name
def _make_build_dir(build_dir):
os.makedirs(build_dir)
_write_delete_marker_message(os.path.join(build_dir, PIP_DELETE_MARKER_FILENAME))
def _write_delete_marker_message(filepath):
marker_fp = open(filepath, 'w')
marker_fp.write(DELETE_MARKER_MESSAGE)
marker_fp.close()
_scheme_re = re.compile(r'^(http|https|file):', re.I)
def parse_requirements(filename, finder=None, comes_from=None, options=None):
skip_match = None
skip_regex = options.skip_requirements_regex
if skip_regex:
skip_match = re.compile(skip_regex)
filename, content = get_file_content(filename, comes_from=comes_from)
for line_number, line in enumerate(content.splitlines()):
line_number += 1
line = line.strip()
if not line or line.startswith('#'):
continue
if skip_match and skip_match.search(line):
continue
if line.startswith('-r') or line.startswith('--requirement'):
if line.startswith('-r'):
req_url = line[2:].strip()
else:
req_url = line[len('--requirement'):].strip().strip('=')
if _scheme_re.search(filename):
# Relative to a URL
req_url = urlparse.urljoin(req_url, filename)
elif not _scheme_re.search(req_url):
req_url = os.path.join(os.path.dirname(filename), req_url)
for item in parse_requirements(req_url, finder, comes_from=filename, options=options):
yield item
elif line.startswith('-Z') or line.startswith('--always-unzip'):
# No longer used, but previously these were used in
# requirement files, so we'll ignore.
pass
elif line.startswith('-f') or line.startswith('--find-links'):
if line.startswith('-f'):
line = line[2:].strip()
else:
line = line[len('--find-links'):].strip().lstrip('=')
## FIXME: it would be nice to keep track of the source of
## the find_links:
if finder:
finder.find_links.append(line)
elif line.startswith('-i') or line.startswith('--index-url'):
if line.startswith('-i'):
line = line[2:].strip()
else:
line = line[len('--index-url'):].strip().lstrip('=')
if finder:
finder.index_urls = [line]
elif line.startswith('--extra-index-url'):
line = line[len('--extra-index-url'):].strip().lstrip('=')
if finder:
finder.index_urls.append(line)
else:
comes_from = '-r %s (line %s)' % (filename, line_number)
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
req = InstallRequirement.from_editable(
line, comes_from=comes_from, default_vcs=options.default_vcs)
else:
req = InstallRequirement.from_line(line, comes_from)
yield req
def parse_editable(editable_req, default_vcs=None):
"""Parses svn+http://blahblah@rev#egg=Foobar into a requirement
(Foobar) and a URL"""
url = editable_req
if os.path.isdir(url) and os.path.exists(os.path.join(url, 'setup.py')):
# Treating it as code that has already been checked out
url = path_to_url(url)
if url.lower().startswith('file:'):
return None, url
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
if '+' not in url:
if default_vcs:
url = default_vcs + '+' + url
else:
raise InstallationError(
'--editable=%s should be formatted with svn+URL, git+URL, hg+URL or bzr+URL' % editable_req)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
raise InstallationError(
'For --editable=%s only svn (svn+URL), Git (git+URL), Mercurial (hg+URL) and Bazaar (bzr+URL) is currently supported' % editable_req)
match = re.search(r'(?:#|#.*?&)egg=([^&]*)', editable_req)
if (not match or not match.group(1)) and vcs.get_backend(vc_type):
parts = [p for p in editable_req.split('#', 1)[0].split('/') if p]
if parts[-2] in ('tags', 'branches', 'tag', 'branch'):
req = parts[-3]
elif parts[-1] == 'trunk':
req = parts[-2]
else:
raise InstallationError(
'--editable=%s is not the right format; it must have #egg=Package'
% editable_req)
else:
req = match.group(1)
## FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req, url
class UninstallPathSet(object):
"""A set of file paths to be removed in the uninstallation of a
requirement."""
def __init__(self, dist):
self.paths = set()
self._refuse = set()
self.pth = {}
self.dist = dist
self.save_dir = None
self._moved_paths = []
def _permitted(self, path):
"""
Return True if the given path is one we are permitted to
remove/modify, False otherwise.
"""
return is_local(path)
def _can_uninstall(self):
if not dist_is_local(self.dist):
logger.notify("Not uninstalling %s at %s, outside environment %s"
% (self.dist.project_name, normalize_path(self.dist.location), sys.prefix))
return False
return True
def add(self, path):
path = normalize_path(path)
if not os.path.exists(path):
return
if self._permitted(path):
self.paths.add(path)
else:
self._refuse.add(path)
def add_pth(self, pth_file, entry):
pth_file = normalize_path(pth_file)
if self._permitted(pth_file):
if pth_file not in self.pth:
self.pth[pth_file] = UninstallPthEntries(pth_file)
self.pth[pth_file].add(entry)
else:
self._refuse.add(pth_file)
def compact(self, paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
short_paths = set()
for path in sorted(paths, key=len):
if not any([(path.startswith(shortpath) and
path[len(shortpath.rstrip(os.path.sep))] == os.path.sep)
for shortpath in short_paths]):
short_paths.add(path)
return short_paths
def _stash(self, path):
return os.path.join(
self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep))
def remove(self, auto_confirm=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self._can_uninstall():
return
logger.notify('Uninstalling %s:' % self.dist.project_name)
logger.indent += 2
paths = sorted(self.compact(self.paths))
try:
if auto_confirm:
response = 'y'
else:
for path in paths:
logger.notify(path)
response = ask('Proceed (y/n)? ', ('y', 'n'))
if self._refuse:
logger.notify('Not removing or modifying (outside of prefix):')
for path in self.compact(self._refuse):
logger.notify(path)
if response == 'y':
self.save_dir = tempfile.mkdtemp(suffix='-uninstall',
prefix='pip-')
for path in paths:
new_path = self._stash(path)
logger.info('Removing file or directory %s' % path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.notify('Successfully uninstalled %s' % self.dist.project_name)
finally:
logger.indent -= 2
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir is None:
logger.error("Can't roll back %s; was not uninstalled" % self.dist.project_name)
return False
logger.notify('Rolling back uninstall of %s' % self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.info('Replacing %s' % path)
renames(tmp_path, path)
for pth in self.pth:
pth.rollback()
def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
if self.save_dir is not None:
rmtree(self.save_dir)
self.save_dir = None
self._moved_paths = []
class UninstallPthEntries(object):
def __init__(self, pth_file):
if not os.path.isfile(pth_file):
raise UninstallationError("Cannot remove entries from nonexistent file %s" % pth_file)
self.file = pth_file
self.entries = set()
self._saved_lines = None
def add(self, entry):
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
if sys.platform == 'win32' and not os.path.splitdrive(entry)[0]:
entry = entry.replace('\\', '/')
self.entries.add(entry)
def remove(self):
logger.info('Removing pth entries from %s:' % self.file)
fh = open(self.file, 'rb')
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
fh.close()
if any(b('\r\n') in line for line in lines):
endline = '\r\n'
else:
endline = '\n'
for entry in self.entries:
try:
logger.info('Removing entry: %s' % entry)
lines.remove(b(entry + endline))
except ValueError:
pass
fh = open(self.file, 'wb')
fh.writelines(lines)
fh.close()
def rollback(self):
if self._saved_lines is None:
logger.error('Cannot roll back changes to %s, none were made' % self.file)
return False
logger.info('Rolling %s back to previous state' % self.file)
fh = open(self.file, 'wb')
fh.writelines(self._saved_lines)
fh.close()
return True
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
| bsd-3-clause |
deandunbar/bitwave | hackathon_version/venv/lib/python2.7/site-packages/django/contrib/gis/gdal/driver.py | 104 | 2477 | # prerequisites imports
from ctypes import c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils import six
from django.utils.encoding import force_bytes
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Dr_* routines are relevant here.
class Driver(GDALBase):
"Wraps an OGR Data Source Driver."
# Case-insensitive aliases for OGR Drivers.
_alias = {'esri': 'ESRI Shapefile',
'shp': 'ESRI Shapefile',
'shape': 'ESRI Shapefile',
'tiger': 'TIGER',
'tiger/line': 'TIGER',
}
def __init__(self, dr_input):
"Initializes an OGR driver on either a string or integer input."
if isinstance(dr_input, six.string_types):
# If a string name of the driver was passed in
self._register()
# Checking the alias dictionary (case-insensitive) to see if an alias
# exists for the given driver.
if dr_input.lower() in self._alias:
name = self._alias[dr_input.lower()]
else:
name = dr_input
# Attempting to get the OGR driver by the string name.
dr = capi.get_driver_by_name(force_bytes(name))
elif isinstance(dr_input, int):
self._register()
dr = capi.get_driver(dr_input)
elif isinstance(dr_input, c_void_p):
dr = dr_input
else:
raise OGRException('Unrecognized input type for OGR Driver: %s' % str(type(dr_input)))
# Making sure we get a valid pointer to the OGR Driver
if not dr:
raise OGRException('Could not initialize OGR Driver on input: %s' % str(dr_input))
self.ptr = dr
def __str__(self):
"Returns the string name of the OGR Driver."
return capi.get_driver_name(self.ptr)
def _register(self):
"Attempts to register all the data source drivers."
# Only register all if the driver count is 0 (or else all drivers
# will be registered over and over again)
if not self.driver_count:
capi.register_all()
# Driver properties
@property
def driver_count(self):
"Returns the number of OGR data source drivers registered."
return capi.get_driver_count()
| mit |
doomsterinc/odoo | openerp/workflow/__init__.py | 378 | 3793 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.workflow.service import WorkflowService
# The new API is in openerp.workflow.workflow_service
# OLD API of the Workflow
def clear_cache(cr, uid):
WorkflowService.clear_cache(cr.dbname)
def trg_write(uid, res_type, res_id, cr):
"""
Reevaluates the specified workflow instance. Thus if any condition for
a transition have been changed in the backend, then running ``trg_write``
will move the workflow over that transition.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).write()
def trg_trigger(uid, res_type, res_id, cr):
"""
Activate a trigger.
If a workflow instance is waiting for a trigger from another model, then this
trigger can be activated if its conditions are met.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).trigger()
def trg_delete(uid, res_type, res_id, cr):
"""
Delete a workflow instance
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).delete()
def trg_create(uid, res_type, res_id, cr):
"""
Create a new workflow instance
:param res_type: the model name
:param res_id: the model instance id to own the created worfklow instance
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).create()
def trg_validate(uid, res_type, res_id, signal, cr):
"""
Fire a signal on a given workflow instance
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:signal: the signal name to be fired
:param cr: a database cursor
"""
assert isinstance(signal, basestring)
return WorkflowService.new(cr, uid, res_type, res_id).validate(signal)
def trg_redirect(uid, res_type, res_id, new_rid, cr):
"""
Re-bind a workflow instance to another instance of the same model.
Make all workitems which are waiting for a (subflow) workflow instance
for the old resource point to the (first active) workflow instance for
the new resource.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param new_rid: the model instance id to own the worfklow instance
:param cr: a database cursor
"""
assert isinstance(new_rid, (long, int))
return WorkflowService.new(cr, uid, res_type, res_id).redirect(new_rid)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
SamiHiltunen/invenio-classifier | invenio_classifier/normalizer.py | 2 | 18618 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2008, 2009, 2010, 2011, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Classifier text normalizer.
This module provides methods to clean the text lines. Currently, the methods
are tuned to work with the output of `pdftotext` and documents in the HEP
field.
This modules uses the refextract module of BibEdit in order to find the
references section and to replace Unicode characters.
"""
import re
from flask import current_app
from .find import (
find_end_of_reference_section,
find_reference_section,
)
from six import iteritems
_washing_regex = []
def get_washing_regex():
"""Return a washing regex list."""
global _washing_regex
if len(_washing_regex):
return _washing_regex
washing_regex = [
# Replace non and anti with non- and anti-. This allows a better
# detection of keywords such as nonabelian.
(re.compile(r"(\snon)[- ](\w+)"), r"\1\2"),
(re.compile(r"(\santi)[- ](\w+)"), r"\1\2"),
# Remove all leading numbers (e.g. 2-pion -> pion).
(re.compile(r"\s\d-"), " "),
# Remove multiple spaces.
(re.compile(r" +"), " "),
]
# Remove spaces in particle names.
# Particles with -/+/*
washing_regex += [
(re.compile(r"(\W%s) ([-+*])" % name), r"\1\2")
for name in ("c", "muon", "s", "B", "D", "K", "Lambda",
"Mu", "Omega", "Pi", "Sigma", "Tau", "W", "Xi")
]
# Particles followed by numbers
washing_regex += [
(re.compile(r"(\W%s) ([0-9]\W)" % name), r"\1\2")
for name in ("a", "b", "c", "f", "h", "s", "B", "D", "H",
"K", "L", "Phi", "Pi", "Psi", "Rho", "Stor", "UA",
"Xi", "Z")
]
washing_regex += [(re.compile(r"(\W%s) ?\( ?([0-9]+) ?\)[A-Z]?" % name),
r"\1(\2)")
for name in ("CP", "E", "G", "O", "S", "SL", "SO",
"Spin", "SU", "U", "W", "Z")]
# Particles with '
washing_regex += [(re.compile(r"(\W%s) ('\W)" % name), r"\1\2")
for name in ("Eta", "W", "Z")]
# Particles with (N)
washing_regex += [(re.compile(r"(\W%s) ?\( ?N ?\)[A-Z]?" % name), r"\1(N)")
for name in ("CP", "GL", "O", "SL", "SO", "Sp", "Spin",
"SU", "U", "W", "Z")]
# All names followed by ([0-9]{3,4})
washing_regex.append((re.compile(r"([A-Za-z]) (\([0-9]{3,4}\)\+?)\s"),
r"\1\2 "))
# Some weird names followed by ([0-9]{3,4})
washing_regex += [(re.compile(r"\(%s\) (\([0-9]{3,4}\))" % name),
r"\1\2 ")
for name in ("a0", "Ds1", "Ds2", "K\*")]
washing_regex += [
# Remove all lonel operators (usually these are errors
# introduced by pdftotext.)
(re.compile(r" [+*] "), r" "),
# Remove multiple spaces.
(re.compile(r" +"), " "),
# Remove multiple line breaks.
(re.compile(r"\n+"), r"\n"),
]
_washing_regex = washing_regex
return _washing_regex
def normalize_fulltext(fulltext):
"""Return a 'cleaned' version of the output provided by pdftotext."""
# We recognize keywords by the spaces. We need these to match the
# first and last words of the document.
fulltext = " " + fulltext + " "
# Replace some weird unicode characters.
fulltext = replace_undesirable_characters(fulltext)
# Replace the greek characters by their name.
fulltext = _replace_greek_characters(fulltext)
washing_regex = get_washing_regex()
# Apply the regular expressions to the fulltext.
for regex, replacement in washing_regex:
fulltext = regex.sub(replacement, fulltext)
return fulltext
def cut_references(text_lines):
"""Return the text lines with the references cut."""
ref_sect_start = find_reference_section(text_lines)
if ref_sect_start is not None:
start = ref_sect_start["start_line"]
end = find_end_of_reference_section(text_lines, start,
ref_sect_start["marker"],
ref_sect_start["marker_pattern"])
del text_lines[start:end + 1]
else:
current_app.logger.warning("Found no references to remove.")
return text_lines
return text_lines
_GREEK_REPLACEMENTS = {
u'\u00AF': u' ',
u'\u00B5': u' Mu ',
u'\u00D7': u' x ',
u'\u0391': u' Alpha ',
u'\u0392': u' Beta ',
u'\u0393': u' Gamma ',
u'\u0394': u' Delta ',
u'\u0395': u' Epsilon ',
u'\u0396': u' Zeta ',
u'\u0397': u' Eta ',
u'\u0398': u' Theta ',
u'\u0399': u' Iota ',
u'\u039A': u' Kappa ',
u'\u039B': u' Lambda ',
u'\u039C': u' Mu ',
u'\u039D': u' Nu ',
u'\u039E': u' Xi ',
u'\u039F': u' Omicron ',
u'\u03A0': u' Pi ',
u'\u03A1': u' Rho ',
u'\u03A3': u' Sigma ',
u'\u03A4': u' Tau ',
u'\u03A5': u' Upsilon ',
u'\u03A6': u' Phi ',
u'\u03A7': u' Chi ',
u'\u03A8': u' Psi ',
u'\u03A9': u' Omega ',
u'\u03B1': u' Alpha ',
u'\u03B2': u' Beta ',
u'\u03B3': u' Gamma ',
u'\u03B4': u' Delta ',
u'\u03B5': u' Epsilon ',
u'\u03B6': u' Zeta ',
u'\u03B7': u' Eta ',
u'\u03B8': u' Theta ',
u'\u03B9': u' Iota ',
u'\u03BA': u' Kappa ',
u'\u03BB': u' Lambda ',
u'\u03BC': u' Mu ',
u'\u03BD': u' Nu ',
u'\u03BE': u' Xi ',
u'\u03BF': u' Omicron ',
u'\u03C0': u' Pi ',
u'\u03C1': u' Rho ',
u'\uC3C2': u' Sigma ',
u'\u03C3': u' Sigma ',
u'\u03C4': u' Tau ',
u'\u03C5': u' Upsilon ',
u'\u03C6': u' Phi ',
u'\u03C7': u' Chi ',
u'\u03C8': u' Psi ',
u'\u03C9': u' Omega ',
u'\u03CA': u' Iota ',
u'\u03CB': u' Upsilon ',
u'\u03CC': u' Omicron ',
u'\u03CD': u' Upsilon ',
u'\u03CE': u' Omega ',
u'\u03CF': u' Kai ',
u'\u03D0': u' Beta ',
u'\u03D1': u' Theta ',
u'\u03D2': u' Upsilon ',
u'\u03D3': u' Upsilon ',
u'\u03D4': u' Upsilon ',
u'\u03D5': u' Phi ',
u'\u03D6': u' Pi ',
u'\u03D7': u' Kai ',
u'\u03D8': u' Koppa ',
u'\u03D9': u' Koppa ',
u'\u03DA': u' Stigma ',
u'\u03DB': u' Stigma ',
u'\u03DC': u' Digamma ',
u'\u03DD': u' Digamma ',
u'\u03DE': u' Koppa ',
u'\u03DF': u' Koppa ',
u'\u03E0': u' Sampi ',
u'\u03E1': u' Sampi ',
u'\u03D1': u' Theta ',
u'\u03D5': u' Phi ',
u'\u2010': u'-',
u'\u2011': u'-',
u'\u2012': u'-',
u'\u2013': u'-',
u'\u2014': u'-',
u'\u2015': u'-',
u'\u2019': u"'",
u'\u2032': u"'",
u'\u2126': u' Omega ',
u'\u2206': u' Delta ',
u'\u2212': u'-',
u'\u2215': u"/",
u'\u2216': u"\\",
u'\u2217': u"*",
u'\u221D': u' Alpha ',
}
# a dictionary of undesirable characters and their replacements:
UNDESIRABLE_CHAR_REPLACEMENTS = {
# Control characters not allowed in XML:
u'\u2028': u"",
u'\u2029': u"",
u'\u202A': u"",
u'\u202B': u"",
u'\u202C': u"",
u'\u202D': u"",
u'\u202E': u"",
u'\u206A': u"",
u'\u206B': u"",
u'\u206C': u"",
u'\u206D': u"",
u'\u206E': u"",
u'\u206F': u"",
u'\uFFF9': u"",
u'\uFFFA': u"",
u'\uFFFB': u"",
u'\uFFFC': u"",
u'\uFEFF': u"",
# Remove the result of a bad UTF-8 character
u'\uFFFF': u"",
# Language Tag Code Points:
u"\U000E0000": u"",
u"\U000E0001": u"",
u"\U000E0002": u"",
u"\U000E0003": u"",
u"\U000E0004": u"",
u"\U000E0005": u"",
u"\U000E0006": u"",
u"\U000E0007": u"",
u"\U000E0008": u"",
u"\U000E0009": u"",
u"\U000E000A": u"",
u"\U000E000B": u"",
u"\U000E000C": u"",
u"\U000E000D": u"",
u"\U000E000E": u"",
u"\U000E000F": u"",
u"\U000E0010": u"",
u"\U000E0011": u"",
u"\U000E0012": u"",
u"\U000E0013": u"",
u"\U000E0014": u"",
u"\U000E0015": u"",
u"\U000E0016": u"",
u"\U000E0017": u"",
u"\U000E0018": u"",
u"\U000E0019": u"",
u"\U000E001A": u"",
u"\U000E001B": u"",
u"\U000E001C": u"",
u"\U000E001D": u"",
u"\U000E001E": u"",
u"\U000E001F": u"",
u"\U000E0020": u"",
u"\U000E0021": u"",
u"\U000E0022": u"",
u"\U000E0023": u"",
u"\U000E0024": u"",
u"\U000E0025": u"",
u"\U000E0026": u"",
u"\U000E0027": u"",
u"\U000E0028": u"",
u"\U000E0029": u"",
u"\U000E002A": u"",
u"\U000E002B": u"",
u"\U000E002C": u"",
u"\U000E002D": u"",
u"\U000E002E": u"",
u"\U000E002F": u"",
u"\U000E0030": u"",
u"\U000E0031": u"",
u"\U000E0032": u"",
u"\U000E0033": u"",
u"\U000E0034": u"",
u"\U000E0035": u"",
u"\U000E0036": u"",
u"\U000E0037": u"",
u"\U000E0038": u"",
u"\U000E0039": u"",
u"\U000E003A": u"",
u"\U000E003B": u"",
u"\U000E003C": u"",
u"\U000E003D": u"",
u"\U000E003E": u"",
u"\U000E003F": u"",
u"\U000E0040": u"",
u"\U000E0041": u"",
u"\U000E0042": u"",
u"\U000E0043": u"",
u"\U000E0044": u"",
u"\U000E0045": u"",
u"\U000E0046": u"",
u"\U000E0047": u"",
u"\U000E0048": u"",
u"\U000E0049": u"",
u"\U000E004A": u"",
u"\U000E004B": u"",
u"\U000E004C": u"",
u"\U000E004D": u"",
u"\U000E004E": u"",
u"\U000E004F": u"",
u"\U000E0050": u"",
u"\U000E0051": u"",
u"\U000E0052": u"",
u"\U000E0053": u"",
u"\U000E0054": u"",
u"\U000E0055": u"",
u"\U000E0056": u"",
u"\U000E0057": u"",
u"\U000E0058": u"",
u"\U000E0059": u"",
u"\U000E005A": u"",
u"\U000E005B": u"",
u"\U000E005C": u"",
u"\U000E005D": u"",
u"\U000E005E": u"",
u"\U000E005F": u"",
u"\U000E0060": u"",
u"\U000E0061": u"",
u"\U000E0062": u"",
u"\U000E0063": u"",
u"\U000E0064": u"",
u"\U000E0065": u"",
u"\U000E0066": u"",
u"\U000E0067": u"",
u"\U000E0068": u"",
u"\U000E0069": u"",
u"\U000E006A": u"",
u"\U000E006B": u"",
u"\U000E006C": u"",
u"\U000E006D": u"",
u"\U000E006E": u"",
u"\U000E006F": u"",
u"\U000E0070": u"",
u"\U000E0071": u"",
u"\U000E0072": u"",
u"\U000E0073": u"",
u"\U000E0074": u"",
u"\U000E0075": u"",
u"\U000E0076": u"",
u"\U000E0077": u"",
u"\U000E0078": u"",
u"\U000E0079": u"",
u"\U000E007A": u"",
u"\U000E007B": u"",
u"\U000E007C": u"",
u"\U000E007D": u"",
u"\U000E007E": u"",
u"\U000E007F": u"",
# Musical Notation Scoping
u"\U0001D173": u"",
u"\U0001D174": u"",
u"\U0001D175": u"",
u"\U0001D176": u"",
u"\U0001D177": u"",
u"\U0001D178": u"",
u"\U0001D179": u"",
u"\U0001D17A": u"",
u'\u0000': u"", # NULL
u'\u0001': u"", # START OF HEADING
# START OF TEXT & END OF TEXT:
u'\u0002': u"",
u'\u0003': u"",
u'\u0004': u"", # END OF TRANSMISSION
# ENQ and ACK
u'\u0005': u"",
u'\u0006': u"",
u'\u0007': u"", # BELL
u'\u0008': u"", # BACKSPACE
# SHIFT-IN & SHIFT-OUT
u'\u000E': u"",
u'\u000F': u"",
# Other controls:
u'\u0010': u"", # DATA LINK ESCAPE
u'\u0011': u"", # DEVICE CONTROL ONE
u'\u0012': u"", # DEVICE CONTROL TWO
u'\u0013': u"", # DEVICE CONTROL THREE
u'\u0014': u"", # DEVICE CONTROL FOUR
u'\u0015': u"", # NEGATIVE ACK
u'\u0016': u"", # SYNCRONOUS IDLE
u'\u0017': u"", # END OF TRANSMISSION BLOCK
u'\u0018': u"", # CANCEL
u'\u0019': u"", # END OF MEDIUM
u'\u001A': u"", # SUBSTITUTE
u'\u001B': u"", # ESCAPE
u'\u001C': u"", # INFORMATION SEPARATOR FOUR (file separator)
u'\u001D': u"", # INFORMATION SEPARATOR THREE (group separator)
u'\u001E': u"", # INFORMATION SEPARATOR TWO (record separator)
u'\u001F': u"", # INFORMATION SEPARATOR ONE (unit separator)
# \r -> remove it
u'\r': u"",
# Strange parantheses - change for normal:
u'\x1c': u'(',
u'\x1d': u')',
# Some ff from tex:
u'\u0013\u0010': u'\u00ED',
u'\x0b': u'ff',
# fi from tex:
u'\x0c': u'fi',
# ligatures from TeX:
u'\ufb00': u'ff',
u'\ufb01': u'fi',
u'\ufb02': u'fl',
u'\ufb03': u'ffi',
u'\ufb04': u'ffl',
# Superscripts from TeX
u'\u2212': u'-',
u'\u2013': u'-',
# Word style speech marks:
u'\u201c ': u'"',
u'\u201d': u'"',
u'\u201c': u'"',
# pdftotext has problems with umlaut and prints it as diaeresis
# followed by a letter:correct it
# (Optional space between char and letter - fixes broken
# line examples)
u'\u00A8 a': u'\u00E4',
u'\u00A8 e': u'\u00EB',
u'\u00A8 i': u'\u00EF',
u'\u00A8 o': u'\u00F6',
u'\u00A8 u': u'\u00FC',
u'\u00A8 y': u'\u00FF',
u'\u00A8 A': u'\u00C4',
u'\u00A8 E': u'\u00CB',
u'\u00A8 I': u'\u00CF',
u'\u00A8 O': u'\u00D6',
u'\u00A8 U': u'\u00DC',
u'\u00A8 Y': u'\u0178',
u'\xA8a': u'\u00E4',
u'\xA8e': u'\u00EB',
u'\xA8i': u'\u00EF',
u'\xA8o': u'\u00F6',
u'\xA8u': u'\u00FC',
u'\xA8y': u'\u00FF',
u'\xA8A': u'\u00C4',
u'\xA8E': u'\u00CB',
u'\xA8I': u'\u00CF',
u'\xA8O': u'\u00D6',
u'\xA8U': u'\u00DC',
u'\xA8Y': u'\u0178',
# More umlaut mess to correct:
u'\x7fa': u'\u00E4',
u'\x7fe': u'\u00EB',
u'\x7fi': u'\u00EF',
u'\x7fo': u'\u00F6',
u'\x7fu': u'\u00FC',
u'\x7fy': u'\u00FF',
u'\x7fA': u'\u00C4',
u'\x7fE': u'\u00CB',
u'\x7fI': u'\u00CF',
u'\x7fO': u'\u00D6',
u'\x7fU': u'\u00DC',
u'\x7fY': u'\u0178',
u'\x7f a': u'\u00E4',
u'\x7f e': u'\u00EB',
u'\x7f i': u'\u00EF',
u'\x7f o': u'\u00F6',
u'\x7f u': u'\u00FC',
u'\x7f y': u'\u00FF',
u'\x7f A': u'\u00C4',
u'\x7f E': u'\u00CB',
u'\x7f I': u'\u00CF',
u'\x7f O': u'\u00D6',
u'\x7f U': u'\u00DC',
u'\x7f Y': u'\u0178',
# pdftotext: fix accute accent:
u'\x13a': u'\u00E1',
u'\x13e': u'\u00E9',
u'\x13i': u'\u00ED',
u'\x13o': u'\u00F3',
u'\x13u': u'\u00FA',
u'\x13y': u'\u00FD',
u'\x13A': u'\u00C1',
u'\x13E': u'\u00C9',
u'\x13I': u'\u00CD',
u'\x13ı': u'\u00ED', # Lower case turkish 'i' (dotless i)
u'\x13O': u'\u00D3',
u'\x13U': u'\u00DA',
u'\x13Y': u'\u00DD',
u'\x13 a': u'\u00E1',
u'\x13 e': u'\u00E9',
u'\x13 i': u'\u00ED',
u'\x13 o': u'\u00F3',
u'\x13 u': u'\u00FA',
u'\x13 y': u'\u00FD',
u'\x13 A': u'\u00C1',
u'\x13 E': u'\u00C9',
u'\x13 I': u'\u00CD',
u'\x13 ı': u'\u00ED',
u'\x13 O': u'\u00D3',
u'\x13 U': u'\u00DA',
u'\x13 Y': u'\u00DD',
u'\u00B4 a': u'\u00E1',
u'\u00B4 e': u'\u00E9',
u'\u00B4 i': u'\u00ED',
u'\u00B4 o': u'\u00F3',
u'\u00B4 u': u'\u00FA',
u'\u00B4 y': u'\u00FD',
u'\u00B4 A': u'\u00C1',
u'\u00B4 E': u'\u00C9',
u'\u00B4 I': u'\u00CD',
u'\u00B4 ı': u'\u00ED',
u'\u00B4 O': u'\u00D3',
u'\u00B4 U': u'\u00DA',
u'\u00B4 Y': u'\u00DD',
u'\u00B4a': u'\u00E1',
u'\u00B4e': u'\u00E9',
u'\u00B4i': u'\u00ED',
u'\u00B4o': u'\u00F3',
u'\u00B4u': u'\u00FA',
u'\u00B4y': u'\u00FD',
u'\u00B4A': u'\u00C1',
u'\u00B4E': u'\u00C9',
u'\u00B4I': u'\u00CD',
u'\u00B4ı': u'\u00ED',
u'\u00B4O': u'\u00D3',
u'\u00B4U': u'\u00DA',
u'\u00B4Y': u'\u00DD',
# pdftotext: fix grave accent:
u'\u0060 a': u'\u00E0',
u'\u0060 e': u'\u00E8',
u'\u0060 i': u'\u00EC',
u'\u0060 o': u'\u00F2',
u'\u0060 u': u'\u00F9',
u'\u0060 A': u'\u00C0',
u'\u0060 E': u'\u00C8',
u'\u0060 I': u'\u00CC',
u'\u0060 O': u'\u00D2',
u'\u0060 U': u'\u00D9',
u'\u0060a': u'\u00E0',
u'\u0060e': u'\u00E8',
u'\u0060i': u'\u00EC',
u'\u0060o': u'\u00F2',
u'\u0060u': u'\u00F9',
u'\u0060A': u'\u00C0',
u'\u0060E': u'\u00C8',
u'\u0060I': u'\u00CC',
u'\u0060O': u'\u00D2',
u'\u0060U': u'\u00D9',
u'a´': u'á',
u'i´': u'í',
u'e´': u'é',
u'u´': u'ú',
u'o´': u'ó',
# \02C7 : caron
u'\u02C7C': u'\u010C',
u'\u02C7c': u'\u010D',
u'\u02C7S': u'\u0160',
u'\u02C7s': u'\u0161',
u'\u02C7Z': u'\u017D',
u'\u02C7z': u'\u017E',
# \027 : aa (a with ring above)
u'\u02DAa': u'\u00E5',
u'\u02DAA': u'\u00C5',
# \030 : cedilla
u'\u0327c': u'\u00E7',
u'\u0327C': u'\u00C7',
u'¸c': u'ç',
# \02DC : tilde
u'\u02DCn': u'\u00F1',
u'\u02DCN': u'\u00D1',
u'\u02DCo': u'\u00F5',
u'\u02DCO': u'\u00D5',
u'\u02DCa': u'\u00E3',
u'\u02DCA': u'\u00C3',
u'\u02DCs': u'\u0303s', # Combining tilde with 's'
# Circumflex accent (caret accent)
u'aˆ': u'â',
u'iˆ': u'î',
u'eˆ': u'ê',
u'uˆ': u'û',
u'oˆ': u'ô',
u'ˆa': u'â',
u'ˆi': u'î',
u'ˆe': u'ê',
u'ˆu': u'û',
u'ˆo': u'ô',
}
UNDESIRABLE_STRING_REPLACEMENTS = [
(u'\u201c ', '"'),
]
def replace_undesirable_characters(line):
"""
Replace certain bad characters in a text line.
@param line: (string) the text line in which bad characters are to
be replaced.
@return: (string) the text line after the bad characters have been
replaced.
"""
# These are separate because we want a particular order
for bad_string, replacement in UNDESIRABLE_STRING_REPLACEMENTS:
line = line.replace(bad_string, replacement)
for bad_char, replacement in iteritems(UNDESIRABLE_CHAR_REPLACEMENTS):
line = line.replace(bad_char, replacement)
return line
def _replace_greek_characters(line):
"""Replace greek characters in a string."""
for greek_char, replacement in iteritems(_GREEK_REPLACEMENTS):
try:
line = line.replace(greek_char, replacement)
except UnicodeDecodeError:
current_app.logger.exception("Unicode decoding error.")
return ""
return line
| gpl-2.0 |
jkrishnavs/linux | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
teosz/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/inputstream.py | 618 | 30855 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
from six.moves import http_client
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if isinstance(source, http_client.HTTPResponse):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Craziness
if len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| mpl-2.0 |
ovnicraft/odoo | openerp/tools/translate.py | 43 | 45413 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import codecs
import csv
import fnmatch
import inspect
import locale
import os
import openerp.sql_db as sql_db
import re
import logging
import tarfile
import tempfile
import threading
from babel.messages import extract
from collections import defaultdict
from datetime import datetime
from lxml import etree
from os.path import join
import config
import misc
from misc import SKIPPED_ELEMENT_TYPES
import osutil
import openerp
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
# used to notify web client that these translations should be loaded in the UI
WEB_TRANSLATION_COMMENT = "openerp-web"
SKIPPED_ELEMENTS = ('script', 'style')
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
'ar_SA': 'Arabic_Saudi Arabia',
'eu_ES': 'Basque_Spain',
'be_BY': 'Belarusian_Belarus',
'bs_BA': 'Bosnian_Bosnia and Herzegovina',
'bg_BG': 'Bulgarian_Bulgaria',
'ca_ES': 'Catalan_Spain',
'hr_HR': 'Croatian_Croatia',
'zh_CN': 'Chinese_China',
'zh_TW': 'Chinese_Taiwan',
'cs_CZ': 'Czech_Czech Republic',
'da_DK': 'Danish_Denmark',
'nl_NL': 'Dutch_Netherlands',
'et_EE': 'Estonian_Estonia',
'fa_IR': 'Farsi_Iran',
'ph_PH': 'Filipino_Philippines',
'fi_FI': 'Finnish_Finland',
'fr_FR': 'French_France',
'fr_BE': 'French_France',
'fr_CH': 'French_France',
'fr_CA': 'French_France',
'ga': 'Scottish Gaelic',
'gl_ES': 'Galician_Spain',
'ka_GE': 'Georgian_Georgia',
'de_DE': 'German_Germany',
'el_GR': 'Greek_Greece',
'gu': 'Gujarati_India',
'he_IL': 'Hebrew_Israel',
'hi_IN': 'Hindi',
'hu': 'Hungarian_Hungary',
'is_IS': 'Icelandic_Iceland',
'id_ID': 'Indonesian_indonesia',
'it_IT': 'Italian_Italy',
'ja_JP': 'Japanese_Japan',
'kn_IN': 'Kannada',
'km_KH': 'Khmer',
'ko_KR': 'Korean_Korea',
'lo_LA': 'Lao_Laos',
'lt_LT': 'Lithuanian_Lithuania',
'lat': 'Latvian_Latvia',
'ml_IN': 'Malayalam_India',
'mi_NZ': 'Maori',
'mn': 'Cyrillic_Mongolian',
'no_NO': 'Norwegian_Norway',
'nn_NO': 'Norwegian-Nynorsk_Norway',
'pl': 'Polish_Poland',
'pt_PT': 'Portuguese_Portugal',
'pt_BR': 'Portuguese_Brazil',
'ro_RO': 'Romanian_Romania',
'ru_RU': 'Russian_Russia',
'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
'sk_SK': 'Slovak_Slovakia',
'sl_SI': 'Slovenian_Slovenia',
#should find more specific locales for spanish countries,
#but better than nothing
'es_AR': 'Spanish_Spain',
'es_BO': 'Spanish_Spain',
'es_CL': 'Spanish_Spain',
'es_CO': 'Spanish_Spain',
'es_CR': 'Spanish_Spain',
'es_DO': 'Spanish_Spain',
'es_EC': 'Spanish_Spain',
'es_ES': 'Spanish_Spain',
'es_GT': 'Spanish_Spain',
'es_HN': 'Spanish_Spain',
'es_MX': 'Spanish_Spain',
'es_NI': 'Spanish_Spain',
'es_PA': 'Spanish_Spain',
'es_PE': 'Spanish_Spain',
'es_PR': 'Spanish_Spain',
'es_PY': 'Spanish_Spain',
'es_SV': 'Spanish_Spain',
'es_UY': 'Spanish_Spain',
'es_VE': 'Spanish_Spain',
'sv_SE': 'Swedish_Sweden',
'ta_IN': 'English_Australia',
'th_TH': 'Thai_Thailand',
'tr_TR': 'Turkish_Turkey',
'uk_UA': 'Ukrainian_Ukraine',
'vi_VN': 'Vietnamese_Viet Nam',
'tlh_TLH': 'Klingon',
}
# These are not all english small words, just those that could potentially be isolated within views
ENGLISH_SMALL_WORDS = set("as at by do go if in me no of ok on or to up us we".split())
class UNIX_LINE_TERMINATOR(csv.excel):
lineterminator = '\n'
csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
#
# Warning: better use self.pool.get('ir.translation')._get_source if you can
#
def translate(cr, name, source_type, lang, source=None):
if source and name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s and src=%s', (lang, source_type, str(name), source))
elif name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s', (lang, source_type, str(name)))
elif source:
cr.execute('select value from ir_translation where lang=%s and type=%s and src=%s', (lang, source_type, source))
res_trans = cr.fetchone()
res = res_trans and res_trans[0] or False
return res
class GettextAlias(object):
def _get_db(self):
# find current DB based on thread/worker db name (see netsvc)
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
return sql_db.db_connect(db_name)
def _get_cr(self, frame, allow_create=True):
# try, in order: cr, cursor, self.env.cr, self.cr,
# request.env.cr
if 'cr' in frame.f_locals:
return frame.f_locals['cr'], False
if 'cursor' in frame.f_locals:
return frame.f_locals['cursor'], False
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
return s.env.cr, False
if hasattr(s, 'cr'):
return s.cr, False
try:
from openerp.http import request
return request.env.cr, False
except RuntimeError:
pass
if allow_create:
# create a new cursor
db = self._get_db()
if db is not None:
return db.cursor(), True
return None, False
def _get_uid(self, frame):
# try, in order: uid, user, self.env.uid
if 'uid' in frame.f_locals:
return frame.f_locals['uid']
if 'user' in frame.f_locals:
return int(frame.f_locals['user']) # user may be a record
s = frame.f_locals.get('self')
return s.env.uid
def _get_lang(self, frame):
# try, in order: context.get('lang'), kwargs['context'].get('lang'),
# self.env.lang, self.localcontext.get('lang'), request.env.lang
lang = None
if frame.f_locals.get('context'):
lang = frame.f_locals['context'].get('lang')
if not lang:
kwargs = frame.f_locals.get('kwargs', {})
if kwargs.get('context'):
lang = kwargs['context'].get('lang')
if not lang:
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
lang = s.env.lang
if not lang:
if hasattr(s, 'localcontext'):
lang = s.localcontext.get('lang')
if not lang:
try:
from openerp.http import request
lang = request.env.lang
except RuntimeError:
pass
if not lang:
# Last resort: attempt to guess the language of the user
# Pitfall: some operations are performed in sudo mode, and we
# don't know the originial uid, so the language may
# be wrong when the admin language differs.
pool = getattr(s, 'pool', None)
(cr, dummy) = self._get_cr(frame, allow_create=False)
uid = self._get_uid(frame)
if pool and cr and uid:
lang = pool['res.users'].context_get(cr, uid)['lang']
return lang
def __call__(self, source):
res = source
cr = None
is_new_cr = False
try:
frame = inspect.currentframe()
if frame is None:
return source
frame = frame.f_back
if not frame:
return source
lang = self._get_lang(frame)
if lang:
cr, is_new_cr = self._get_cr(frame)
if cr:
# Try to use ir.translation to benefit from global cache if possible
registry = openerp.registry(cr.dbname)
res = registry['ir.translation']._get_source(cr, SUPERUSER_ID, None, ('code','sql_constraint'), lang, source)
else:
_logger.debug('no context cursor detected, skipping translation for "%r"', source)
else:
_logger.debug('no translation language detected, skipping translation for "%r" ', source)
except Exception:
_logger.debug('translation went wrong for "%r", skipped', source)
# if so, double-check the root/base translations filenames
finally:
if cr and is_new_cr:
cr.close()
return res
_ = GettextAlias()
def quote(s):
"""Returns quoted PO term string, with special PO characters escaped"""
assert r"\n" not in s, "Translation terms may not include escaped newlines ('\\n'), please use only literal newlines! (in '%s')" % s
return '"%s"' % s.replace('\\','\\\\') \
.replace('"','\\"') \
.replace('\n', '\\n"\n"')
re_escaped_char = re.compile(r"(\\.)")
re_escaped_replacements = {'n': '\n', }
def _sub_replacement(match_obj):
return re_escaped_replacements.get(match_obj.group(1)[1], match_obj.group(1)[1])
def unquote(str):
"""Returns unquoted PO term string, with special PO characters unescaped"""
return re_escaped_char.sub(_sub_replacement, str[1:-1])
# class to handle po files
class TinyPoFile(object):
def __init__(self, buffer):
self.buffer = buffer
def warn(self, msg, *args):
_logger.warning(msg, *args)
def __iter__(self):
self.buffer.seek(0)
self.lines = self._get_lines()
self.lines_count = len(self.lines)
self.first = True
self.extra_lines= []
return self
def _get_lines(self):
lines = self.buffer.readlines()
# remove the BOM (Byte Order Mark):
if len(lines):
lines[0] = unicode(lines[0], 'utf8').lstrip(unicode( codecs.BOM_UTF8, "utf8"))
lines.append('') # ensure that the file ends with at least an empty line
return lines
def cur_line(self):
return self.lines_count - len(self.lines)
def next(self):
trans_type = name = res_id = source = trad = None
if self.extra_lines:
trans_type, name, res_id, source, trad, comments = self.extra_lines.pop(0)
if not res_id:
res_id = '0'
else:
comments = []
targets = []
line = None
fuzzy = False
while not line:
if 0 == len(self.lines):
raise StopIteration()
line = self.lines.pop(0).strip()
while line.startswith('#'):
if line.startswith('#~ '):
break
if line.startswith('#.'):
line = line[2:].strip()
if not line.startswith('module:'):
comments.append(line)
elif line.startswith('#:'):
# Process the `reference` comments. Each line can specify
# multiple targets (e.g. model, view, code, selection,
# ...). For each target, we will return an additional
# entry.
for lpart in line[2:].strip().split(' '):
trans_info = lpart.strip().split(':',2)
if trans_info and len(trans_info) == 2:
# looks like the translation trans_type is missing, which is not
# unexpected because it is not a GetText standard. Default: 'code'
trans_info[:0] = ['code']
if trans_info and len(trans_info) == 3:
# this is a ref line holding the destination info (model, field, record)
targets.append(trans_info)
elif line.startswith('#,') and (line[2:].strip() == 'fuzzy'):
fuzzy = True
line = self.lines.pop(0).strip()
if not self.lines:
raise StopIteration()
while not line:
# allow empty lines between comments and msgid
line = self.lines.pop(0).strip()
if line.startswith('#~ '):
while line.startswith('#~ ') or not line.strip():
if 0 == len(self.lines):
raise StopIteration()
line = self.lines.pop(0)
# This has been a deprecated entry, don't return anything
return self.next()
if not line.startswith('msgid'):
raise Exception("malformed file: bad line: %s" % line)
source = unquote(line[6:])
line = self.lines.pop(0).strip()
if not source and self.first:
self.first = False
# if the source is "" and it's the first msgid, it's the special
# msgstr with the informations about the traduction and the
# traductor; we skip it
self.extra_lines = []
while line:
line = self.lines.pop(0).strip()
return self.next()
while not line.startswith('msgstr'):
if not line:
raise Exception('malformed file at %d'% self.cur_line())
source += unquote(line)
line = self.lines.pop(0).strip()
trad = unquote(line[7:])
line = self.lines.pop(0).strip()
while line:
trad += unquote(line)
line = self.lines.pop(0).strip()
if targets and not fuzzy:
# Use the first target for the current entry (returned at the
# end of this next() call), and keep the others to generate
# additional entries (returned the next next() calls).
trans_type, name, res_id = targets.pop(0)
for t, n, r in targets:
if t == trans_type == 'code': continue
self.extra_lines.append((t, n, r, source, trad, comments))
if name is None:
if not fuzzy:
self.warn('Missing "#:" formated comment at line %d for the following source:\n\t%s',
self.cur_line(), source[:30])
return self.next()
return trans_type, name, res_id, source, trad, '\n'.join(comments)
def write_infos(self, modules):
import openerp.release as release
self.buffer.write("# Translation of %(project)s.\n" \
"# This file contains the translation of the following modules:\n" \
"%(modules)s" \
"#\n" \
"msgid \"\"\n" \
"msgstr \"\"\n" \
'''"Project-Id-Version: %(project)s %(version)s\\n"\n''' \
'''"Report-Msgid-Bugs-To: \\n"\n''' \
'''"POT-Creation-Date: %(now)s\\n"\n''' \
'''"PO-Revision-Date: %(now)s\\n"\n''' \
'''"Last-Translator: <>\\n"\n''' \
'''"Language-Team: \\n"\n''' \
'''"MIME-Version: 1.0\\n"\n''' \
'''"Content-Type: text/plain; charset=UTF-8\\n"\n''' \
'''"Content-Transfer-Encoding: \\n"\n''' \
'''"Plural-Forms: \\n"\n''' \
"\n"
% { 'project': release.description,
'version': release.version,
'modules': reduce(lambda s, m: s + "#\t* %s\n" % m, modules, ""),
'now': datetime.utcnow().strftime('%Y-%m-%d %H:%M')+"+0000",
}
)
def write(self, modules, tnrs, source, trad, comments=None):
plurial = len(modules) > 1 and 's' or ''
self.buffer.write("#. module%s: %s\n" % (plurial, ', '.join(modules)))
if comments:
self.buffer.write(''.join(('#. %s\n' % c for c in comments)))
code = False
for typy, name, res_id in tnrs:
self.buffer.write("#: %s:%s:%s\n" % (typy, name, res_id))
if typy == 'code':
code = True
if code:
# only strings in python code are python formated
self.buffer.write("#, python-format\n")
if not isinstance(trad, unicode):
trad = unicode(trad, 'utf8')
if not isinstance(source, unicode):
source = unicode(source, 'utf8')
msg = "msgid %s\n" \
"msgstr %s\n\n" \
% (quote(source), quote(trad))
self.buffer.write(msg.encode('utf8'))
# Methods to export the translation file
def trans_export(lang, modules, buffer, format, cr):
def _process(format, modules, rows, buffer, lang):
if format == 'csv':
writer = csv.writer(buffer, 'UNIX')
# write header first
writer.writerow(("module","type","name","res_id","src","value"))
for module, type, name, res_id, src, trad, comments in rows:
# Comments are ignored by the CSV writer
writer.writerow((module, type, name, res_id, src, trad))
elif format == 'po':
writer = TinyPoFile(buffer)
writer.write_infos(modules)
# we now group the translations by source. That means one translation per source.
grouped_rows = {}
for module, type, name, res_id, src, trad, comments in rows:
row = grouped_rows.setdefault(src, {})
row.setdefault('modules', set()).add(module)
if not row.get('translation') and trad != src:
row['translation'] = trad
row.setdefault('tnrs', []).append((type, name, res_id))
row.setdefault('comments', set()).update(comments)
for src, row in sorted(grouped_rows.items()):
if not lang:
# translation template, so no translation value
row['translation'] = ''
elif not row.get('translation'):
row['translation'] = src
writer.write(row['modules'], row['tnrs'], src, row['translation'], row['comments'])
elif format == 'tgz':
rows_by_module = {}
for row in rows:
module = row[0]
rows_by_module.setdefault(module, []).append(row)
tmpdir = tempfile.mkdtemp()
for mod, modrows in rows_by_module.items():
tmpmoddir = join(tmpdir, mod, 'i18n')
os.makedirs(tmpmoddir)
pofilename = (lang if lang else mod) + ".po" + ('t' if not lang else '')
buf = file(join(tmpmoddir, pofilename), 'w')
_process('po', [mod], modrows, buf, lang)
buf.close()
tar = tarfile.open(fileobj=buffer, mode='w|gz')
tar.add(tmpdir, '')
tar.close()
else:
raise Exception(_('Unrecognized extension: must be one of '
'.csv, .po, or .tgz (received .%s).' % format))
translations = trans_generate(lang, modules, cr)
modules = set(t[0] for t in translations)
_process(format, modules, translations, buffer, lang)
del translations
def trans_parse_xsl(de):
return list(set(trans_parse_xsl_aux(de, False)))
def trans_parse_xsl_aux(de, t):
res = []
for n in de:
t = t or n.get("t")
if t:
if isinstance(n, SKIPPED_ELEMENT_TYPES) or n.tag.startswith('{http://www.w3.org/1999/XSL/Transform}'):
continue
if n.text:
l = n.text.strip().replace('\n',' ')
if len(l):
res.append(l.encode("utf8"))
if n.tail:
l = n.tail.strip().replace('\n',' ')
if len(l):
res.append(l.encode("utf8"))
res.extend(trans_parse_xsl_aux(n, t))
return res
def trans_parse_rml(de):
res = []
for n in de:
for m in n:
if isinstance(m, SKIPPED_ELEMENT_TYPES) or not m.text:
continue
string_list = [s.replace('\n', ' ').strip() for s in re.split('\[\[.+?\]\]', m.text)]
for s in string_list:
if s:
res.append(s.encode("utf8"))
res.extend(trans_parse_rml(n))
return res
def _push(callback, term, source_line):
""" Sanity check before pushing translation terms """
term = (term or "").strip().encode('utf8')
# Avoid non-char tokens like ':' '...' '.00' etc.
if len(term) > 8 or any(x.isalpha() for x in term):
callback(term, source_line)
def trans_parse_view(element, callback):
""" Helper method to recursively walk an etree document representing a
regular view and call ``callback(term)`` for each translatable term
that is found in the document.
:param ElementTree element: root of etree document to extract terms from
:param callable callback: a callable in the form ``f(term, source_line)``,
that will be called for each extracted term.
"""
for el in element.iter():
if (not isinstance(el, SKIPPED_ELEMENT_TYPES)
and el.tag.lower() not in SKIPPED_ELEMENTS
and el.get("translation", '').strip() != "off"
and el.text):
_push(callback, el.text, el.sourceline)
if el.tail:
_push(callback, el.tail, el.sourceline)
for attr in ('string', 'help', 'sum', 'confirm', 'placeholder'):
value = el.get(attr)
if value:
_push(callback, value, el.sourceline)
# tests whether an object is in a list of modules
def in_modules(object_name, modules):
if 'all' in modules:
return True
module_dict = {
'ir': 'base',
'res': 'base',
'workflow': 'base',
}
module = object_name.split('.')[0]
module = module_dict.get(module, module)
return module in modules
def _extract_translatable_qweb_terms(element, callback):
""" Helper method to walk an etree document representing
a QWeb template, and call ``callback(term)`` for each
translatable term that is found in the document.
:param etree._Element element: root of etree document to extract terms from
:param Callable callback: a callable in the form ``f(term, source_line)``,
that will be called for each extracted term.
"""
# not using elementTree.iterparse because we need to skip sub-trees in case
# the ancestor element had a reason to be skipped
for el in element:
if isinstance(el, SKIPPED_ELEMENT_TYPES): continue
if (el.tag.lower() not in SKIPPED_ELEMENTS
and "t-js" not in el.attrib
and not ("t-jquery" in el.attrib and "t-operation" not in el.attrib)
and el.get("t-translation", '').strip() != "off"):
_push(callback, el.text, el.sourceline)
for att in ('title', 'alt', 'label', 'placeholder'):
if att in el.attrib:
_push(callback, el.attrib[att], el.sourceline)
_extract_translatable_qweb_terms(el, callback)
_push(callback, el.tail, el.sourceline)
def babel_extract_qweb(fileobj, keywords, comment_tags, options):
"""Babel message extractor for qweb template files.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should
be recognized as translation functions
:param comment_tags: a list of translator tags to search for and
include in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: Iterable
"""
result = []
def handle_text(text, lineno):
result.append((lineno, None, text, []))
tree = etree.parse(fileobj)
_extract_translatable_qweb_terms(tree.getroot(), handle_text)
return result
def trans_generate(lang, modules, cr):
dbname = cr.dbname
registry = openerp.registry(dbname)
trans_obj = registry['ir.translation']
model_data_obj = registry['ir.model.data']
uid = 1
query = 'SELECT name, model, res_id, module' \
' FROM ir_model_data'
query_models = """SELECT m.id, m.model, imd.module
FROM ir_model AS m, ir_model_data AS imd
WHERE m.id = imd.res_id AND imd.model = 'ir.model' """
if 'all_installed' in modules:
query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') '
query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') "
query_param = None
if 'all' not in modules:
query += ' WHERE module IN %s'
query_models += ' AND imd.module in %s'
query_param = (tuple(modules),)
query += ' ORDER BY module, model, name'
query_models += ' ORDER BY module, model'
cr.execute(query, query_param)
_to_translate = set()
def push_translation(module, type, name, id, source, comments=None):
# empty and one-letter terms are ignored, they probably are not meant to be
# translated, and would be very hard to translate anyway.
if not source or len(source.strip()) <= 1:
return
tnx = (module, source, name, id, type, tuple(comments or ()))
_to_translate.add(tnx)
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
def push(mod, type, name, res_id, term):
term = (term or '').strip()
if len(term) > 2 or term in ENGLISH_SMALL_WORDS:
push_translation(mod, type, name, res_id, term)
def get_root_view(xml_id):
view = model_data_obj.xmlid_to_object(cr, uid, xml_id)
if view:
while view.mode != 'primary':
view = view.inherit_id
xml_id = view.get_external_id(cr, uid).get(view.id, xml_id)
return xml_id
for (xml_name,model,res_id,module) in cr.fetchall():
module = encode(module)
model = encode(model)
xml_name = "%s.%s" % (module, encode(xml_name))
if model not in registry:
_logger.error("Unable to find object %r", model)
continue
Model = registry[model]
if not Model._translate:
# explicitly disabled
continue
obj = Model.browse(cr, uid, res_id)
if not obj.exists():
_logger.warning("Unable to find object %r with id %d", model, res_id)
continue
if model=='ir.ui.view':
d = etree.XML(encode(obj.arch))
if obj.type == 'qweb':
view_id = get_root_view(xml_name)
push_qweb = lambda t,l: push(module, 'view', 'website', view_id, t)
_extract_translatable_qweb_terms(d, push_qweb)
else:
push_view = lambda t,l: push(module, 'view', obj.model, xml_name, t)
trans_parse_view(d, push_view)
elif model=='ir.actions.wizard':
pass # TODO Can model really be 'ir.actions.wizard' ?
elif model=='ir.model.fields':
try:
field_name = encode(obj.name)
except AttributeError, exc:
_logger.error("name error in %s: %s", xml_name, str(exc))
continue
objmodel = registry.get(obj.model)
if (objmodel is None or field_name not in objmodel._columns
or not objmodel._translate):
continue
field_def = objmodel._columns[field_name]
name = "%s,%s" % (encode(obj.model), field_name)
push_translation(module, 'field', name, 0, encode(field_def.string))
if field_def.help:
push_translation(module, 'help', name, 0, encode(field_def.help))
if field_def.translate:
ids = objmodel.search(cr, uid, [])
obj_values = objmodel.read(cr, uid, ids, [field_name])
for obj_value in obj_values:
res_id = obj_value['id']
if obj.name in ('ir.model', 'ir.ui.menu'):
res_id = 0
model_data_ids = model_data_obj.search(cr, uid, [
('model', '=', model),
('res_id', '=', res_id),
])
if not model_data_ids:
push_translation(module, 'model', name, 0, encode(obj_value[field_name]))
if hasattr(field_def, 'selection') and isinstance(field_def.selection, (list, tuple)):
for dummy, val in field_def.selection:
push_translation(module, 'selection', name, 0, encode(val))
elif model=='ir.actions.report.xml':
name = encode(obj.report_name)
fname = ""
if obj.report_rml:
fname = obj.report_rml
parse_func = trans_parse_rml
report_type = "report"
elif obj.report_xsl:
fname = obj.report_xsl
parse_func = trans_parse_xsl
report_type = "xsl"
if fname and obj.report_type in ('pdf', 'xsl'):
try:
report_file = misc.file_open(fname)
try:
d = etree.parse(report_file)
for t in parse_func(d.iter()):
push_translation(module, report_type, name, 0, t)
finally:
report_file.close()
except (IOError, etree.XMLSyntaxError):
_logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
for field_name, field_def in obj._columns.items():
if model == 'ir.model' and field_name == 'name' and obj.name == obj.model:
# ignore model name if it is the technical one, nothing to translate
continue
if field_def.translate:
name = model + "," + field_name
try:
term = obj[field_name] or ''
except:
term = ''
push_translation(module, 'model', name, xml_name, encode(term))
# End of data for ir.model.data query results
cr.execute(query_models, query_param)
def push_constraint_msg(module, term_type, model, msg):
if not hasattr(msg, '__call__'):
push_translation(encode(module), term_type, encode(model), 0, encode(msg))
def push_local_constraints(module, model, cons_type='sql_constraints'):
"""Climb up the class hierarchy and ignore inherited constraints
from other modules"""
term_type = 'sql_constraint' if cons_type == 'sql_constraints' else 'constraint'
msg_pos = 2 if cons_type == 'sql_constraints' else 1
for cls in model.__class__.__mro__:
if getattr(cls, '_module', None) != module:
continue
constraints = getattr(cls, '_local_' + cons_type, [])
for constraint in constraints:
push_constraint_msg(module, term_type, model._name, constraint[msg_pos])
for (_, model, module) in cr.fetchall():
if model not in registry:
_logger.error("Unable to find object %r", model)
continue
model_obj = registry[model]
if model_obj._constraints:
push_local_constraints(module, model_obj, 'constraints')
if model_obj._sql_constraints:
push_local_constraints(module, model_obj, 'sql_constraints')
installed_modules = map(
lambda m: m['name'],
registry['ir.module.module'].search_read(cr, uid, [('state', '=', 'installed')], fields=['name']))
path_list = [(path, True) for path in openerp.modules.module.ad_paths]
# Also scan these non-addon paths
for bin_path in ['osv', 'report', 'modules', 'service', 'tools']:
path_list.append((os.path.join(config.config['root_path'], bin_path), True))
# non-recursive scan for individual files in root directory but without
# scanning subdirectories that may contain addons
path_list.append((config.config['root_path'], False))
_logger.debug("Scanning modules at paths: %s", path_list)
def get_module_from_path(path):
for (mp, rec) in path_list:
if rec and path.startswith(mp) and os.path.dirname(path) != mp:
path = path[len(mp)+1:]
return path.split(os.path.sep)[0]
return 'base' # files that are not in a module are considered as being in 'base' module
def verified_module_filepaths(fname, path, root):
fabsolutepath = join(root, fname)
frelativepath = fabsolutepath[len(path):]
display_path = "addons%s" % frelativepath
module = get_module_from_path(fabsolutepath)
if ('all' in modules or module in modules) and module in installed_modules:
if os.path.sep != '/':
display_path = display_path.replace(os.path.sep, '/')
return module, fabsolutepath, frelativepath, display_path
return None, None, None, None
def babel_extract_terms(fname, path, root, extract_method="python", trans_type='code',
extra_comments=None, extract_keywords={'_': None}):
module, fabsolutepath, _, display_path = verified_module_filepaths(fname, path, root)
extra_comments = extra_comments or []
if not module: return
src_file = open(fabsolutepath, 'r')
try:
for extracted in extract.extract(extract_method, src_file,
keywords=extract_keywords):
# Babel 0.9.6 yields lineno, message, comments
# Babel 1.3 yields lineno, message, comments, context
lineno, message, comments = extracted[:3]
push_translation(module, trans_type, display_path, lineno,
encode(message), comments + extra_comments)
except Exception:
_logger.exception("Failed to extract terms from %s", fabsolutepath)
finally:
src_file.close()
for (path, recursive) in path_list:
_logger.debug("Scanning files of modules at %s", path)
for root, dummy, files in osutil.walksymlinks(path):
for fname in fnmatch.filter(files, '*.py'):
babel_extract_terms(fname, path, root)
# mako provides a babel extractor: http://docs.makotemplates.org/en/latest/usage.html#babel
for fname in fnmatch.filter(files, '*.mako'):
babel_extract_terms(fname, path, root, 'mako', trans_type='report')
# Javascript source files in the static/src/js directory, rest is ignored (libs)
if fnmatch.fnmatch(root, '*/static/src/js*'):
for fname in fnmatch.filter(files, '*.js'):
babel_extract_terms(fname, path, root, 'javascript',
extra_comments=[WEB_TRANSLATION_COMMENT],
extract_keywords={'_t': None, '_lt': None})
# QWeb template files
if fnmatch.fnmatch(root, '*/static/src/xml*'):
for fname in fnmatch.filter(files, '*.xml'):
babel_extract_terms(fname, path, root, 'openerp.tools.translate:babel_extract_qweb',
extra_comments=[WEB_TRANSLATION_COMMENT])
if not recursive:
# due to topdown, first iteration is in first level
break
out = []
# translate strings marked as to be translated
for module, source, name, id, type, comments in sorted(_to_translate):
trans = '' if not lang else trans_obj._get_source(cr, uid, name, type, lang, source)
out.append((module, type, name, id, source, encode(trans) or '', comments))
return out
def trans_load(cr, filename, lang, verbose=True, module_name=None, context=None):
try:
fileobj = misc.file_open(filename)
_logger.info("loading %s", filename)
fileformat = os.path.splitext(filename)[-1][1:].lower()
result = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, module_name=module_name, context=context)
fileobj.close()
return result
except IOError:
if verbose:
_logger.error("couldn't read translation file %s", filename)
return None
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, module_name=None, context=None):
"""Populates the ir_translation table."""
if verbose:
_logger.info('loading translation file for language %s', lang)
if context is None:
context = {}
db_name = cr.dbname
registry = openerp.registry(db_name)
lang_obj = registry.get('res.lang')
trans_obj = registry.get('ir.translation')
iso_lang = misc.get_iso_codes(lang)
try:
ids = lang_obj.search(cr, SUPERUSER_ID, [('code','=', lang)])
if not ids:
# lets create the language with locale information
lang_obj.load_lang(cr, SUPERUSER_ID, lang=lang, lang_name=lang_name)
# Parse also the POT: it will possibly provide additional targets.
# (Because the POT comments are correct on Launchpad but not the
# PO comments due to a Launchpad limitation. See LP bug 933496.)
pot_reader = []
# now, the serious things: we read the language file
fileobj.seek(0)
if fileformat == 'csv':
reader = csv.reader(fileobj, quotechar='"', delimiter=',')
# read the first line of the file (it contains columns titles)
for row in reader:
fields = row
break
elif fileformat == 'po':
reader = TinyPoFile(fileobj)
fields = ['type', 'name', 'res_id', 'src', 'value', 'comments']
# Make a reader for the POT file and be somewhat defensive for the
# stable branch.
if fileobj.name.endswith('.po'):
try:
# Normally the path looks like /path/to/xxx/i18n/lang.po
# and we try to find the corresponding
# /path/to/xxx/i18n/xxx.pot file.
# (Sometimes we have 'i18n_extra' instead of just 'i18n')
addons_module_i18n, _ = os.path.split(fileobj.name)
addons_module, i18n_dir = os.path.split(addons_module_i18n)
addons, module = os.path.split(addons_module)
pot_handle = misc.file_open(os.path.join(
addons, module, i18n_dir, module + '.pot'))
pot_reader = TinyPoFile(pot_handle)
except:
pass
else:
_logger.error('Bad file format: %s', fileformat)
raise Exception(_('Bad file format'))
# Read the POT references, and keep them indexed by source string.
class Target(object):
def __init__(self):
self.value = None
self.targets = set() # set of (type, name, res_id)
self.comments = None
pot_targets = defaultdict(Target)
for type, name, res_id, src, _, comments in pot_reader:
if type is not None:
target = pot_targets[src]
target.targets.add((type, name, res_id))
target.comments = comments
# read the rest of the file
irt_cursor = trans_obj._get_import_cursor(cr, SUPERUSER_ID, context=context)
def process_row(row):
"""Process a single PO (or POT) entry."""
# dictionary which holds values for this line of the csv file
# {'lang': ..., 'type': ..., 'name': ..., 'res_id': ...,
# 'src': ..., 'value': ..., 'module':...}
dic = dict.fromkeys(('type', 'name', 'res_id', 'src', 'value',
'comments', 'imd_model', 'imd_name', 'module'))
dic['lang'] = lang
dic.update(zip(fields, row))
# discard the target from the POT targets.
src = dic['src']
if src in pot_targets:
target = pot_targets[src]
target.value = dic['value']
target.targets.discard((dic['type'], dic['name'], dic['res_id']))
# This would skip terms that fail to specify a res_id
res_id = dic['res_id']
if not res_id:
return
if isinstance(res_id, (int, long)) or \
(isinstance(res_id, basestring) and res_id.isdigit()):
dic['res_id'] = int(res_id)
if module_name:
dic['module'] = module_name
else:
# res_id is an xml id
dic['res_id'] = None
dic['imd_model'] = dic['name'].split(',')[0]
if '.' in res_id:
dic['module'], dic['imd_name'] = res_id.split('.', 1)
else:
dic['module'], dic['imd_name'] = module_name, res_id
irt_cursor.push(dic)
# First process the entries from the PO file (doing so also fills/removes
# the entries from the POT file).
for row in reader:
process_row(row)
# Then process the entries implied by the POT file (which is more
# correct w.r.t. the targets) if some of them remain.
pot_rows = []
for src, target in pot_targets.iteritems():
if target.value:
for type, name, res_id in target.targets:
pot_rows.append((type, name, res_id, src, target.value, target.comments))
pot_targets.clear()
for row in pot_rows:
process_row(row)
irt_cursor.finish()
trans_obj.clear_caches()
if verbose:
_logger.info("translation file loaded succesfully")
except IOError:
filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat)
_logger.exception("couldn't read translation file %s", filename)
def get_locales(lang=None):
if lang is None:
lang = locale.getdefaultlocale()[0]
if os.name == 'nt':
lang = _LOCALE2WIN32.get(lang, lang)
def process(enc):
ln = locale._build_localename((lang, enc))
yield ln
nln = locale.normalize(ln)
if nln != ln:
yield nln
for x in process('utf8'): yield x
prefenc = locale.getpreferredencoding()
if prefenc:
for x in process(prefenc): yield x
prefenc = {
'latin1': 'latin9',
'iso-8859-1': 'iso8859-15',
'cp1252': '1252',
}.get(prefenc.lower())
if prefenc:
for x in process(prefenc): yield x
yield lang
def resetlocale():
# locale.resetlocale is bugged with some locales.
for ln in get_locales():
try:
return locale.setlocale(locale.LC_ALL, ln)
except locale.Error:
continue
def load_language(cr, lang):
"""Loads a translation terms for a language.
Used mainly to automate language loading at db initialization.
:param lang: language ISO code with optional _underscore_ and l10n flavor (ex: 'fr', 'fr_BE', but not 'fr-BE')
:type lang: str
"""
registry = openerp.registry(cr.dbname)
language_installer = registry['base.language.install']
oid = language_installer.create(cr, SUPERUSER_ID, {'lang': lang})
language_installer.lang_install(cr, SUPERUSER_ID, [oid], context=None)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
AnotherIvan/calibre | manual/epub.py | 13 | 3510 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from sphinx.builders.epub import EpubBuilder
from calibre.ebooks.oeb.base import OPF, DC
from calibre.ebooks.oeb.polish.container import get_container, OEB_DOCS
from calibre.ebooks.oeb.polish.check.links import check_links, UnreferencedResource
from calibre.ebooks.oeb.polish.pretty import pretty_html_tree, pretty_opf
from calibre.utils.magick.draw import identify_data
class EPUBHelpBuilder(EpubBuilder):
name = 'myepub'
def build_epub(self, outdir, outname):
EpubBuilder.build_epub(self, outdir, outname)
container = get_container(os.path.join(outdir, outname))
self.fix_epub(container)
container.commit()
def fix_epub(self, container):
' Fix all the brokenness that sphinx\'s epub builder creates '
for name, mt in container.mime_map.iteritems():
if mt in OEB_DOCS:
self.workaround_ade_quirks(container, name)
pretty_html_tree(container, container.parsed(name))
container.dirty(name)
self.fix_opf(container)
def workaround_ade_quirks(self, container, name):
root = container.parsed(name)
# ADE blows up floating images if their sizes are not specified
for img in root.xpath('//*[local-name() = "img" and (@class = "float-right-img" or @class = "float-left-img")]'):
if 'style' not in img.attrib:
imgname = container.href_to_name(img.get('src'), name)
width, height, fmt = identify_data(container.raw_data(imgname))
img.set('style', 'width: %dpx; height: %dpx' % (width, height))
def fix_opf(self, container):
spine_names = {n for n, l in container.spine_names}
spine = container.opf_xpath('//opf:spine')[0]
rmap = {v:k for k, v in container.manifest_id_map.iteritems()}
# Add unreferenced text files to the spine
for name, mt in container.mime_map.iteritems():
if mt in OEB_DOCS and name not in spine_names:
spine_names.add(name)
container.insert_into_xml(spine, spine.makeelement(OPF('itemref'), idref=rmap[name]))
# Remove duplicate entries from spine
seen = set()
for item, name, linear in container.spine_iter:
if name in seen:
container.remove_from_xml(item)
seen.add(name)
# Ensure that the meta cover tag is correct
cover_id = rmap['_static/' + self.config.epub_cover[0]]
for meta in container.opf_xpath('//opf:meta[@name="cover"]'):
meta.set('content', cover_id)
# Add description metadata
metadata = container.opf_xpath('//opf:metadata')[0]
container.insert_into_xml(metadata, metadata.makeelement(DC('description')))
metadata[-1].text = 'Comprehensive documentation for calibre'
# Remove search.html since it is useless in EPUB
container.remove_item('search.html')
# Remove unreferenced files
for error in check_links(container):
if error.__class__ is UnreferencedResource:
container.remove_item(error.name)
# Pretty print the OPF
pretty_opf(container.parsed(container.opf_name))
container.dirty(container.opf_name)
| gpl-3.0 |
drawks/ansible | lib/ansible/plugins/action/cli_config.py | 42 | 1287 | #
# Copyright 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
self._config_module = True
if self._play_context.connection != 'network_cli':
return {'failed': True, 'msg': 'Connection type %s is not valid for cli_config module' % self._play_context.connection}
return super(ActionModule, self).run(task_vars=task_vars)
| gpl-3.0 |
trishnaguha/ansible | lib/ansible/modules/network/f5/bigip_monitor_external.py | 14 | 23275 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_monitor_external
short_description: Manages external LTM monitors on a BIG-IP
description:
- Manages external LTM monitors on a BIG-IP.
version_added: 2.6
options:
name:
description:
- Specifies the name of the monitor.
required: True
description:
description:
- The description of the monitor.
version_added: 2.7
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(http)
parent on the C(Common) partition.
default: /Common/external
arguments:
description:
- Specifies any command-line arguments that the script requires.
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'. Note that if specifying an IP address, a value between 1 and 65535
must be specified.
external_program:
description:
- Specifies the name of the file for the monitor to use. In order to reference
a file, you must first import it using options on the System > File Management > External
Monitor Program File List > Import screen. The BIG-IP system automatically
places the file in the proper location on the file system.
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run. If this parameter is not provided when creating
a new monitor, then the default value will be 5. This value B(must)
be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request.
- If the target responds within the set time period, it is considered up.
- If the target does not respond within the set time period, it is considered
down.
- You can change this number to any number you want, however, it should be
3 times the interval number of seconds plus 1 second.
- If this parameter is not provided when creating a new monitor, then the
default value will be C(16).
variables:
description:
- Specifies any variables that the script requires.
- Note that double quotes in values will be suppressed.
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the monitor exists.
- When C(absent), ensures the monitor is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create an external monitor
bigip_monitor_external:
name: foo
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Create an external monitor with variables
bigip_monitor_external:
name: foo
timeout: 10
variables:
var1: foo
var2: bar
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Add a variable to an existing set
bigip_monitor_external:
name: foo
timeout: 10
variables:
var1: foo
var2: bar
cat: dog
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: str
sample: external
description:
description: The description of the monitor.
returned: changed
type: str
sample: Important Monitor
ip:
description: The new IP of IP/port definition.
returned: changed
type: str
sample: 10.12.13.14
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.six import iteritems
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import compare_dictionary
from library.module_utils.network.f5.ipaddress import is_valid_ip
from library.module_utils.network.f5.compare import cmp_str_with_none
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import compare_dictionary
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
from ansible.module_utils.network.f5.compare import cmp_str_with_none
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'apiRawValues': 'variables',
'run': 'external_program',
'args': 'arguments',
}
api_attributes = [
'defaultsFrom',
'interval',
'timeout',
'destination',
'run',
'args',
'description',
]
returnables = [
'parent',
'ip',
'port',
'interval',
'timeout',
'variables',
'external_program',
'arguments',
'description',
]
updatables = [
'destination',
'interval',
'timeout',
'variables',
'external_program',
'arguments',
'description',
]
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def interval(self):
if self._values['interval'] is None:
return None
# Per BZ617284, the BIG-IP UI does not raise a warning about this.
# So I do
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
if self._values['ip'] in ['*', '0.0.0.0']:
return '*'
elif is_valid_ip(self._values['ip']):
return self._values['ip']
else:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def type(self):
return 'external'
class ApiParameters(Parameters):
@property
def description(self):
if self._values['description'] in [None, 'none']:
return None
return self._values['description']
@property
def variables(self):
if self._values['variables'] is None:
return None
pattern = r'^userDefined\s(?P<key>.*)'
result = {}
for k, v in iteritems(self._values['variables']):
matches = re.match(pattern, k)
if not matches:
raise F5ModuleError(
"Unable to find the variable 'key' in the API payload."
)
key = matches.group('key')
result[key] = v
return result
class ModuleParameters(Parameters):
@property
def description(self):
if self._values['description'] is None:
return None
elif self._values['description'] in ['none', '']:
return ''
return self._values['description']
@property
def variables(self):
if self._values['variables'] is None:
return None
result = {}
for k, v in iteritems(self._values['variables']):
result[k] = str(v).replace('"', '')
return result
@property
def external_program(self):
if self._values['external_program'] is None:
return None
return fq_name(self.partition, self._values['external_program'])
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
@property
def variables(self):
if self.want.variables is None:
return None
if self.have.variables is None:
return dict(
variables=self.want.variables
)
result = dict()
different = compare_dictionary(self.want.variables, self.have.variables)
if not different:
return None
for k, v in iteritems(self.want.variables):
if k in self.have.variables and v != self.have.variables[k]:
result[k] = v
elif k not in self.have.variables:
result[k] = v
for k, v in iteritems(self.have.variables):
if k not in self.want.variables:
result[k] = "none"
if result:
result = dict(
variables=result
)
return result
@property
def description(self):
return cmp_str_with_none(self.want.description, self.have.description)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
self._set_default_creation_values()
if self.module.check_mode:
return True
self.create_on_device()
return True
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 16})
if self.want.interval is None:
self.want.update({'interval': 5})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.want.variables:
self.set_variable_on_device(self.want.variables)
def set_variable_on_device(self, commands):
command = ' '.join(['user-defined {0} \\\"{1}\\\"'.format(k, v) for k, v in iteritems(commands)])
command = 'tmsh modify ltm monitor external {0} {1}'.format(self.want.name, command)
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(command)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
if params:
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.changes.variables:
self.set_variable_on_device(self.changes.variables)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/external/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/external'),
description=dict(),
arguments=dict(),
ip=dict(),
port=dict(),
external_program=dict(),
interval=dict(type='int'),
timeout=dict(type='int'),
state=dict(
default='present',
choices=['present', 'absent']
),
variables=dict(type='dict'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
espadrine/opera | chromium/src/third_party/jinja2/ext.py | 114 | 23910 | # -*- coding: utf-8 -*-
"""
jinja2.ext
~~~~~~~~~~
Jinja extensions allow to add custom tags similar to the way django custom
tags work. By default two example extensions exist: an i18n and a cache
extension.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from collections import deque
from jinja2 import nodes
from jinja2.defaults import *
from jinja2.environment import Environment
from jinja2.runtime import Undefined, concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
from jinja2.utils import contextfunction, import_string, Markup, next
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
rv.identifier = rv.__module__ + '.' + rv.__name__
return rv
class Extension(object):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
__metaclass__ = ExtensionRegistry
#: if this extension parses this is the list of tags it's listening to.
tags = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
In the `ext` folder of the Jinja2 source distribution there is a file
called `inlinegettext.py` which implements a filter that utilizes this
method.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code.
::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(self, name, args=None, kwargs=None, dyn_args=None,
dyn_kwargs=None, lineno=None):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
dyn_args, dyn_kwargs, lineno=lineno)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve('gettext'), *args, **kwargs)
def _make_new_gettext(func):
@contextfunction
def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault('num', __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return ngettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja2."""
tags = set(['trans'])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals['_'] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False
)
def _install(self, translations, newstyle=None):
gettext = getattr(translations, 'ugettext', None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, 'ungettext', None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
lambda x: x,
lambda s, p, n: (n != 1 and (p,) or (s,))[0],
newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
if newstyle is not None:
self.environment.newstyle_gettext = newstyle
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
self.environment.globals.update(
gettext=gettext,
ngettext=ngettext
)
def _uninstall(self, translations):
for key in 'gettext', 'ngettext':
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, basestring):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
variables = {}
while parser.stream.current.type != 'block_end':
if variables:
parser.stream.expect('comma')
# skip colon for python compatibility
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
# expressions
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
plural_expr = var
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural = plural_names = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], 'load')
num_called_num = singular_names[0] == 'num'
# if we have a pluralize block, we parse that too
if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
if parser.stream.current.type != 'block_end':
name = parser.stream.expect('name')
if name.value not in variables:
parser.fail('unknown variable %r for pluralization' %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, 'load')
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail('pluralize without variables', lineno)
node = self._make_node(singular, plural, variables, plural_expr,
bool(referenced),
num_called_num and have_plural)
node.set_lineno(lineno)
return node
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf)
def _make_node(self, singular, plural, variables, plural_expr,
vars_referenced, num_called_num):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
singular = singular.replace('%%', '%')
if plural:
plural = plural.replace('%%', '%')
# singular only:
if plural_expr is None:
gettext = nodes.Name('gettext', 'load')
node = nodes.Call(gettext, [nodes.Const(singular)],
[], None, None)
# singular and plural
else:
ngettext = nodes.Name('ngettext', 'load')
node = nodes.Call(ngettext, [
nodes.Const(singular),
nodes.Const(plural),
plural_expr
], [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if self.environment.newstyle_gettext:
for key, value in variables.iteritems():
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == 'num':
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(node, nodes.Dict([
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]))
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
tags = set(['do'])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(['break', 'continue'])
def parse(self, parser):
token = next(parser.stream)
if token.value == 'break':
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
"""Adds support for a django-like with block."""
tags = set(['with'])
def parse(self, parser):
node = nodes.Scope(lineno=next(parser.stream).lineno)
assignments = []
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect('comma')
target = parser.parse_assign_target()
parser.stream.expect('assign')
expr = parser.parse_expression()
assignments.append(nodes.Assign(target, expr, lineno=lineno))
node.body = assignments + \
list(parser.parse_statements(('name:endwith',),
drop_needle=True))
return node
class AutoEscapeExtension(Extension):
"""Changes auto escape rules for a scope."""
tags = set(['autoescape'])
def parse(self, parser):
node = nodes.ScopedEvalContextModifier(lineno=next(parser.stream).lineno)
node.options = [
nodes.Keyword('autoescape', parser.parse_expression())
]
node.body = parser.parse_statements(('name:endautoescape',),
drop_needle=True)
return nodes.Scope([node])
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if not isinstance(node.node, nodes.Name) or \
node.node.name not in gettext_functions:
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and \
isinstance(arg.value, basestring):
strings.append(arg.value)
else:
strings.append(None)
for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in \
reversed(self.tokens[self.offset:offset]):
if token_type in ('comment', 'linecomment'):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
options.get(key, str(default)).lower() in ('1', 'on', 'yes', 'true')
environment = Environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
getbool(options, 'trim_blocks', TRIM_BLOCKS),
NEWLINE_SEQUENCE, frozenset(extensions),
cache_size=0,
auto_reload=False
)
if getbool(options, 'newstyle_gettext'):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError, e:
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
| bsd-3-clause |
Jusedawg/SickRage | lib/sqlalchemy/ext/declarative/__init__.py | 77 | 47619 | # ext/declarative/__init__.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Synopsis
========
SQLAlchemy object-relational configuration involves the
combination of :class:`.Table`, :func:`.mapper`, and class
objects to define a mapped class.
:mod:`~sqlalchemy.ext.declarative` allows all three to be
expressed at once within the class declaration. As much as
possible, regular SQLAlchemy schema and ORM constructs are
used directly, so that configuration between "classical" ORM
usage and declarative remain highly similar.
As a simple example::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class SomeClass(Base):
__tablename__ = 'some_table'
id = Column(Integer, primary_key=True)
name = Column(String(50))
Above, the :func:`declarative_base` callable returns a new base class from
which all mapped classes should inherit. When the class definition is
completed, a new :class:`.Table` and :func:`.mapper` will have been generated.
The resulting table and mapper are accessible via
``__table__`` and ``__mapper__`` attributes on the
``SomeClass`` class::
# access the mapped Table
SomeClass.__table__
# access the Mapper
SomeClass.__mapper__
Defining Attributes
===================
In the previous example, the :class:`.Column` objects are
automatically named with the name of the attribute to which they are
assigned.
To name columns explicitly with a name distinct from their mapped attribute,
just give the column a name. Below, column "some_table_id" is mapped to the
"id" attribute of `SomeClass`, but in SQL will be represented as
"some_table_id"::
class SomeClass(Base):
__tablename__ = 'some_table'
id = Column("some_table_id", Integer, primary_key=True)
Attributes may be added to the class after its construction, and they will be
added to the underlying :class:`.Table` and
:func:`.mapper` definitions as appropriate::
SomeClass.data = Column('data', Unicode)
SomeClass.related = relationship(RelatedInfo)
Classes which are constructed using declarative can interact freely
with classes that are mapped explicitly with :func:`.mapper`.
It is recommended, though not required, that all tables
share the same underlying :class:`~sqlalchemy.schema.MetaData` object,
so that string-configured :class:`~sqlalchemy.schema.ForeignKey`
references can be resolved without issue.
Accessing the MetaData
=======================
The :func:`declarative_base` base class contains a
:class:`.MetaData` object where newly defined
:class:`.Table` objects are collected. This object is
intended to be accessed directly for
:class:`.MetaData`-specific operations. Such as, to issue
CREATE statements for all tables::
engine = create_engine('sqlite://')
Base.metadata.create_all(engine)
:func:`declarative_base` can also receive a pre-existing
:class:`.MetaData` object, which allows a
declarative setup to be associated with an already
existing traditional collection of :class:`~sqlalchemy.schema.Table`
objects::
mymetadata = MetaData()
Base = declarative_base(metadata=mymetadata)
.. _declarative_configuring_relationships:
Configuring Relationships
=========================
Relationships to other classes are done in the usual way, with the added
feature that the class specified to :func:`~sqlalchemy.orm.relationship`
may be a string name. The "class registry" associated with ``Base``
is used at mapper compilation time to resolve the name into the actual
class object, which is expected to have been defined once the mapper
configuration is used::
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(50))
addresses = relationship("Address", backref="user")
class Address(Base):
__tablename__ = 'addresses'
id = Column(Integer, primary_key=True)
email = Column(String(50))
user_id = Column(Integer, ForeignKey('users.id'))
Column constructs, since they are just that, are immediately usable,
as below where we define a primary join condition on the ``Address``
class using them::
class Address(Base):
__tablename__ = 'addresses'
id = Column(Integer, primary_key=True)
email = Column(String(50))
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship(User, primaryjoin=user_id == User.id)
In addition to the main argument for :func:`~sqlalchemy.orm.relationship`,
other arguments which depend upon the columns present on an as-yet
undefined class may also be specified as strings. These strings are
evaluated as Python expressions. The full namespace available within
this evaluation includes all classes mapped for this declarative base,
as well as the contents of the ``sqlalchemy`` package, including
expression functions like :func:`~sqlalchemy.sql.expression.desc` and
:attr:`~sqlalchemy.sql.expression.func`::
class User(Base):
# ....
addresses = relationship("Address",
order_by="desc(Address.email)",
primaryjoin="Address.user_id==User.id")
For the case where more than one module contains a class of the same name,
string class names can also be specified as module-qualified paths
within any of these string expressions::
class User(Base):
# ....
addresses = relationship("myapp.model.address.Address",
order_by="desc(myapp.model.address.Address.email)",
primaryjoin="myapp.model.address.Address.user_id=="
"myapp.model.user.User.id")
The qualified path can be any partial path that removes ambiguity between
the names. For example, to disambiguate between
``myapp.model.address.Address`` and ``myapp.model.lookup.Address``,
we can specify ``address.Address`` or ``lookup.Address``::
class User(Base):
# ....
addresses = relationship("address.Address",
order_by="desc(address.Address.email)",
primaryjoin="address.Address.user_id=="
"User.id")
.. versionadded:: 0.8
module-qualified paths can be used when specifying string arguments
with Declarative, in order to specify specific modules.
Two alternatives also exist to using string-based attributes. A lambda
can also be used, which will be evaluated after all mappers have been
configured::
class User(Base):
# ...
addresses = relationship(lambda: Address,
order_by=lambda: desc(Address.email),
primaryjoin=lambda: Address.user_id==User.id)
Or, the relationship can be added to the class explicitly after the classes
are available::
User.addresses = relationship(Address,
primaryjoin=Address.user_id==User.id)
.. _declarative_many_to_many:
Configuring Many-to-Many Relationships
======================================
Many-to-many relationships are also declared in the same way
with declarative as with traditional mappings. The
``secondary`` argument to
:func:`.relationship` is as usual passed a
:class:`.Table` object, which is typically declared in the
traditional way. The :class:`.Table` usually shares
the :class:`.MetaData` object used by the declarative base::
keywords = Table(
'keywords', Base.metadata,
Column('author_id', Integer, ForeignKey('authors.id')),
Column('keyword_id', Integer, ForeignKey('keywords.id'))
)
class Author(Base):
__tablename__ = 'authors'
id = Column(Integer, primary_key=True)
keywords = relationship("Keyword", secondary=keywords)
Like other :func:`~sqlalchemy.orm.relationship` arguments, a string is accepted
as well, passing the string name of the table as defined in the
``Base.metadata.tables`` collection::
class Author(Base):
__tablename__ = 'authors'
id = Column(Integer, primary_key=True)
keywords = relationship("Keyword", secondary="keywords")
As with traditional mapping, its generally not a good idea to use
a :class:`.Table` as the "secondary" argument which is also mapped to
a class, unless the :func:`.relationship` is declared with ``viewonly=True``.
Otherwise, the unit-of-work system may attempt duplicate INSERT and
DELETE statements against the underlying table.
.. _declarative_sql_expressions:
Defining SQL Expressions
========================
See :ref:`mapper_sql_expressions` for examples on declaratively
mapping attributes to SQL expressions.
.. _declarative_table_args:
Table Configuration
===================
Table arguments other than the name, metadata, and mapped Column
arguments are specified using the ``__table_args__`` class attribute.
This attribute accommodates both positional as well as keyword
arguments that are normally sent to the
:class:`~sqlalchemy.schema.Table` constructor.
The attribute can be specified in one of two forms. One is as a
dictionary::
class MyClass(Base):
__tablename__ = 'sometable'
__table_args__ = {'mysql_engine':'InnoDB'}
The other, a tuple, where each argument is positional
(usually constraints)::
class MyClass(Base):
__tablename__ = 'sometable'
__table_args__ = (
ForeignKeyConstraint(['id'], ['remote_table.id']),
UniqueConstraint('foo'),
)
Keyword arguments can be specified with the above form by
specifying the last argument as a dictionary::
class MyClass(Base):
__tablename__ = 'sometable'
__table_args__ = (
ForeignKeyConstraint(['id'], ['remote_table.id']),
UniqueConstraint('foo'),
{'autoload':True}
)
Using a Hybrid Approach with __table__
=======================================
As an alternative to ``__tablename__``, a direct
:class:`~sqlalchemy.schema.Table` construct may be used. The
:class:`~sqlalchemy.schema.Column` objects, which in this case require
their names, will be added to the mapping just like a regular mapping
to a table::
class MyClass(Base):
__table__ = Table('my_table', Base.metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50))
)
``__table__`` provides a more focused point of control for establishing
table metadata, while still getting most of the benefits of using declarative.
An application that uses reflection might want to load table metadata elsewhere
and pass it to declarative classes::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
Base.metadata.reflect(some_engine)
class User(Base):
__table__ = metadata.tables['user']
class Address(Base):
__table__ = metadata.tables['address']
Some configuration schemes may find it more appropriate to use ``__table__``,
such as those which already take advantage of the data-driven nature of
:class:`.Table` to customize and/or automate schema definition.
Note that when the ``__table__`` approach is used, the object is immediately
usable as a plain :class:`.Table` within the class declaration body itself,
as a Python class is only another syntactical block. Below this is illustrated
by using the ``id`` column in the ``primaryjoin`` condition of a
:func:`.relationship`::
class MyClass(Base):
__table__ = Table('my_table', Base.metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50))
)
widgets = relationship(Widget,
primaryjoin=Widget.myclass_id==__table__.c.id)
Similarly, mapped attributes which refer to ``__table__`` can be placed inline,
as below where we assign the ``name`` column to the attribute ``_name``,
generating a synonym for ``name``::
from sqlalchemy.ext.declarative import synonym_for
class MyClass(Base):
__table__ = Table('my_table', Base.metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50))
)
_name = __table__.c.name
@synonym_for("_name")
def name(self):
return "Name: %s" % _name
Using Reflection with Declarative
=================================
It's easy to set up a :class:`.Table` that uses ``autoload=True``
in conjunction with a mapped class::
class MyClass(Base):
__table__ = Table('mytable', Base.metadata,
autoload=True, autoload_with=some_engine)
However, one improvement that can be made here is to not
require the :class:`.Engine` to be available when classes are
being first declared. To achieve this, use the
:class:`.DeferredReflection` mixin, which sets up mappings
only after a special ``prepare(engine)`` step is called::
from sqlalchemy.ext.declarative import declarative_base, DeferredReflection
Base = declarative_base(cls=DeferredReflection)
class Foo(Base):
__tablename__ = 'foo'
bars = relationship("Bar")
class Bar(Base):
__tablename__ = 'bar'
# illustrate overriding of "bar.foo_id" to have
# a foreign key constraint otherwise not
# reflected, such as when using MySQL
foo_id = Column(Integer, ForeignKey('foo.id'))
Base.prepare(e)
.. versionadded:: 0.8
Added :class:`.DeferredReflection`.
Mapper Configuration
====================
Declarative makes use of the :func:`~.orm.mapper` function internally
when it creates the mapping to the declared table. The options
for :func:`~.orm.mapper` are passed directly through via the
``__mapper_args__`` class attribute. As always, arguments which reference
locally mapped columns can reference them directly from within the
class declaration::
from datetime import datetime
class Widget(Base):
__tablename__ = 'widgets'
id = Column(Integer, primary_key=True)
timestamp = Column(DateTime, nullable=False)
__mapper_args__ = {
'version_id_col': timestamp,
'version_id_generator': lambda v:datetime.now()
}
.. _declarative_inheritance:
Inheritance Configuration
=========================
Declarative supports all three forms of inheritance as intuitively
as possible. The ``inherits`` mapper keyword argument is not needed
as declarative will determine this from the class itself. The various
"polymorphic" keyword arguments are specified using ``__mapper_args__``.
Joined Table Inheritance
~~~~~~~~~~~~~~~~~~~~~~~~
Joined table inheritance is defined as a subclass that defines its own
table::
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column(Integer, ForeignKey('people.id'), primary_key=True)
primary_language = Column(String(50))
Note that above, the ``Engineer.id`` attribute, since it shares the
same attribute name as the ``Person.id`` attribute, will in fact
represent the ``people.id`` and ``engineers.id`` columns together,
with the "Engineer.id" column taking precedence if queried directly.
To provide the ``Engineer`` class with an attribute that represents
only the ``engineers.id`` column, give it a different attribute name::
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
engineer_id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
primary_language = Column(String(50))
.. versionchanged:: 0.7 joined table inheritance favors the subclass
column over that of the superclass, such as querying above
for ``Engineer.id``. Prior to 0.7 this was the reverse.
.. _declarative_single_table:
Single Table Inheritance
~~~~~~~~~~~~~~~~~~~~~~~~
Single table inheritance is defined as a subclass that does not have
its own table; you just leave out the ``__table__`` and ``__tablename__``
attributes::
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
When the above mappers are configured, the ``Person`` class is mapped
to the ``people`` table *before* the ``primary_language`` column is
defined, and this column will not be included in its own mapping.
When ``Engineer`` then defines the ``primary_language`` column, the
column is added to the ``people`` table so that it is included in the
mapping for ``Engineer`` and is also part of the table's full set of
columns. Columns which are not mapped to ``Person`` are also excluded
from any other single or joined inheriting classes using the
``exclude_properties`` mapper argument. Below, ``Manager`` will have
all the attributes of ``Person`` and ``Manager`` but *not* the
``primary_language`` attribute of ``Engineer``::
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
The attribute exclusion logic is provided by the
``exclude_properties`` mapper argument, and declarative's default
behavior can be disabled by passing an explicit ``exclude_properties``
collection (empty or otherwise) to the ``__mapper_args__``.
Resolving Column Conflicts
^^^^^^^^^^^^^^^^^^^^^^^^^^
Note above that the ``primary_language`` and ``golf_swing`` columns
are "moved up" to be applied to ``Person.__table__``, as a result of their
declaration on a subclass that has no table of its own. A tricky case
comes up when two subclasses want to specify *the same* column, as below::
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
start_date = Column(DateTime)
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
start_date = Column(DateTime)
Above, the ``start_date`` column declared on both ``Engineer`` and ``Manager``
will result in an error::
sqlalchemy.exc.ArgumentError: Column 'start_date' on class
<class '__main__.Manager'> conflicts with existing
column 'people.start_date'
In a situation like this, Declarative can't be sure
of the intent, especially if the ``start_date`` columns had, for example,
different types. A situation like this can be resolved by using
:class:`.declared_attr` to define the :class:`.Column` conditionally, taking
care to return the **existing column** via the parent ``__table__`` if it
already exists::
from sqlalchemy.ext.declarative import declared_attr
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
@declared_attr
def start_date(cls):
"Start date column, if not present already."
return Person.__table__.c.get('start_date', Column(DateTime))
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
@declared_attr
def start_date(cls):
"Start date column, if not present already."
return Person.__table__.c.get('start_date', Column(DateTime))
Above, when ``Manager`` is mapped, the ``start_date`` column is
already present on the ``Person`` class. Declarative lets us return
that :class:`.Column` as a result in this case, where it knows to skip
re-assigning the same column. If the mapping is mis-configured such
that the ``start_date`` column is accidentally re-assigned to a
different table (such as, if we changed ``Manager`` to be joined
inheritance without fixing ``start_date``), an error is raised which
indicates an existing :class:`.Column` is trying to be re-assigned to
a different owning :class:`.Table`.
.. versionadded:: 0.8 :class:`.declared_attr` can be used on a non-mixin
class, and the returned :class:`.Column` or other mapped attribute
will be applied to the mapping as any other attribute. Previously,
the resulting attribute would be ignored, and also result in a warning
being emitted when a subclass was created.
.. versionadded:: 0.8 :class:`.declared_attr`, when used either with a
mixin or non-mixin declarative class, can return an existing
:class:`.Column` already assigned to the parent :class:`.Table`,
to indicate that the re-assignment of the :class:`.Column` should be
skipped, however should still be mapped on the target class,
in order to resolve duplicate column conflicts.
The same concept can be used with mixin classes (see
:ref:`declarative_mixins`)::
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class HasStartDate(object):
@declared_attr
def start_date(cls):
return cls.__table__.c.get('start_date', Column(DateTime))
class Engineer(HasStartDate, Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
class Manager(HasStartDate, Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
The above mixin checks the local ``__table__`` attribute for the column.
Because we're using single table inheritance, we're sure that in this case,
``cls.__table__`` refers to ``People.__table__``. If we were mixing joined-
and single-table inheritance, we might want our mixin to check more carefully
if ``cls.__table__`` is really the :class:`.Table` we're looking for.
Concrete Table Inheritance
~~~~~~~~~~~~~~~~~~~~~~~~~~
Concrete is defined as a subclass which has its own table and sets the
``concrete`` keyword argument to ``True``::
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
name = Column(String(50))
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'concrete':True}
id = Column(Integer, primary_key=True)
primary_language = Column(String(50))
name = Column(String(50))
Usage of an abstract base class is a little less straightforward as it
requires usage of :func:`~sqlalchemy.orm.util.polymorphic_union`,
which needs to be created with the :class:`.Table` objects
before the class is built::
engineers = Table('engineers', Base.metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50)),
Column('primary_language', String(50))
)
managers = Table('managers', Base.metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50)),
Column('golf_swing', String(50))
)
punion = polymorphic_union({
'engineer':engineers,
'manager':managers
}, 'type', 'punion')
class Person(Base):
__table__ = punion
__mapper_args__ = {'polymorphic_on':punion.c.type}
class Engineer(Person):
__table__ = engineers
__mapper_args__ = {'polymorphic_identity':'engineer', 'concrete':True}
class Manager(Person):
__table__ = managers
__mapper_args__ = {'polymorphic_identity':'manager', 'concrete':True}
.. _declarative_concrete_helpers:
Using the Concrete Helpers
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Helper classes provides a simpler pattern for concrete inheritance.
With these objects, the ``__declare_first__`` helper is used to configure the
"polymorphic" loader for the mapper after all subclasses have been declared.
.. versionadded:: 0.7.3
An abstract base can be declared using the
:class:`.AbstractConcreteBase` class::
from sqlalchemy.ext.declarative import AbstractConcreteBase
class Employee(AbstractConcreteBase, Base):
pass
To have a concrete ``employee`` table, use :class:`.ConcreteBase` instead::
from sqlalchemy.ext.declarative import ConcreteBase
class Employee(ConcreteBase, Base):
__tablename__ = 'employee'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee',
'concrete':True}
Either ``Employee`` base can be used in the normal fashion::
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
class Engineer(Employee):
__tablename__ = 'engineer'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
engineer_info = Column(String(40))
__mapper_args__ = {'polymorphic_identity':'engineer',
'concrete':True}
The :class:`.AbstractConcreteBase` class is itself mapped, and can be
used as a target of relationships::
class Company(Base):
__tablename__ = 'company'
id = Column(Integer, primary_key=True)
employees = relationship("Employee",
primaryjoin="Company.id == Employee.company_id")
.. versionchanged:: 0.9.3 Support for use of :class:`.AbstractConcreteBase`
as the target of a :func:`.relationship` has been improved.
It can also be queried directly::
for employee in session.query(Employee).filter(Employee.name == 'qbert'):
print(employee)
.. _declarative_mixins:
Mixin and Custom Base Classes
==============================
A common need when using :mod:`~sqlalchemy.ext.declarative` is to
share some functionality, such as a set of common columns, some common
table options, or other mapped properties, across many
classes. The standard Python idioms for this is to have the classes
inherit from a base which includes these common features.
When using :mod:`~sqlalchemy.ext.declarative`, this idiom is allowed
via the usage of a custom declarative base class, as well as a "mixin" class
which is inherited from in addition to the primary base. Declarative
includes several helper features to make this work in terms of how
mappings are declared. An example of some commonly mixed-in
idioms is below::
from sqlalchemy.ext.declarative import declared_attr
class MyMixin(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {'mysql_engine': 'InnoDB'}
__mapper_args__= {'always_refresh': True}
id = Column(Integer, primary_key=True)
class MyModel(MyMixin, Base):
name = Column(String(1000))
Where above, the class ``MyModel`` will contain an "id" column
as the primary key, a ``__tablename__`` attribute that derives
from the name of the class itself, as well as ``__table_args__``
and ``__mapper_args__`` defined by the ``MyMixin`` mixin class.
There's no fixed convention over whether ``MyMixin`` precedes
``Base`` or not. Normal Python method resolution rules apply, and
the above example would work just as well with::
class MyModel(Base, MyMixin):
name = Column(String(1000))
This works because ``Base`` here doesn't define any of the
variables that ``MyMixin`` defines, i.e. ``__tablename__``,
``__table_args__``, ``id``, etc. If the ``Base`` did define
an attribute of the same name, the class placed first in the
inherits list would determine which attribute is used on the
newly defined class.
Augmenting the Base
~~~~~~~~~~~~~~~~~~~
In addition to using a pure mixin, most of the techniques in this
section can also be applied to the base class itself, for patterns that
should apply to all classes derived from a particular base. This is achieved
using the ``cls`` argument of the :func:`.declarative_base` function::
from sqlalchemy.ext.declarative import declared_attr
class Base(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {'mysql_engine': 'InnoDB'}
id = Column(Integer, primary_key=True)
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base(cls=Base)
class MyModel(Base):
name = Column(String(1000))
Where above, ``MyModel`` and all other classes that derive from ``Base`` will
have a table name derived from the class name, an ``id`` primary key column,
as well as the "InnoDB" engine for MySQL.
Mixing in Columns
~~~~~~~~~~~~~~~~~
The most basic way to specify a column on a mixin is by simple
declaration::
class TimestampMixin(object):
created_at = Column(DateTime, default=func.now())
class MyModel(TimestampMixin, Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
name = Column(String(1000))
Where above, all declarative classes that include ``TimestampMixin``
will also have a column ``created_at`` that applies a timestamp to
all row insertions.
Those familiar with the SQLAlchemy expression language know that
the object identity of clause elements defines their role in a schema.
Two ``Table`` objects ``a`` and ``b`` may both have a column called
``id``, but the way these are differentiated is that ``a.c.id``
and ``b.c.id`` are two distinct Python objects, referencing their
parent tables ``a`` and ``b`` respectively.
In the case of the mixin column, it seems that only one
:class:`.Column` object is explicitly created, yet the ultimate
``created_at`` column above must exist as a distinct Python object
for each separate destination class. To accomplish this, the declarative
extension creates a **copy** of each :class:`.Column` object encountered on
a class that is detected as a mixin.
This copy mechanism is limited to simple columns that have no foreign
keys, as a :class:`.ForeignKey` itself contains references to columns
which can't be properly recreated at this level. For columns that
have foreign keys, as well as for the variety of mapper-level constructs
that require destination-explicit context, the
:class:`~.declared_attr` decorator is provided so that
patterns common to many classes can be defined as callables::
from sqlalchemy.ext.declarative import declared_attr
class ReferenceAddressMixin(object):
@declared_attr
def address_id(cls):
return Column(Integer, ForeignKey('address.id'))
class User(ReferenceAddressMixin, Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
Where above, the ``address_id`` class-level callable is executed at the
point at which the ``User`` class is constructed, and the declarative
extension can use the resulting :class:`.Column` object as returned by
the method without the need to copy it.
.. versionchanged:: > 0.6.5
Rename 0.6.5 ``sqlalchemy.util.classproperty``
into :class:`~.declared_attr`.
Columns generated by :class:`~.declared_attr` can also be
referenced by ``__mapper_args__`` to a limited degree, currently
by ``polymorphic_on`` and ``version_id_col``, by specifying the
classdecorator itself into the dictionary - the declarative extension
will resolve them at class construction time::
class MyMixin:
@declared_attr
def type_(cls):
return Column(String(50))
__mapper_args__= {'polymorphic_on':type_}
class MyModel(MyMixin, Base):
__tablename__='test'
id = Column(Integer, primary_key=True)
Mixing in Relationships
~~~~~~~~~~~~~~~~~~~~~~~
Relationships created by :func:`~sqlalchemy.orm.relationship` are provided
with declarative mixin classes exclusively using the
:class:`.declared_attr` approach, eliminating any ambiguity
which could arise when copying a relationship and its possibly column-bound
contents. Below is an example which combines a foreign key column and a
relationship so that two classes ``Foo`` and ``Bar`` can both be configured to
reference a common target class via many-to-one::
class RefTargetMixin(object):
@declared_attr
def target_id(cls):
return Column('target_id', ForeignKey('target.id'))
@declared_attr
def target(cls):
return relationship("Target")
class Foo(RefTargetMixin, Base):
__tablename__ = 'foo'
id = Column(Integer, primary_key=True)
class Bar(RefTargetMixin, Base):
__tablename__ = 'bar'
id = Column(Integer, primary_key=True)
class Target(Base):
__tablename__ = 'target'
id = Column(Integer, primary_key=True)
Using Advanced Relationship Arguments (e.g. ``primaryjoin``, etc.)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
:func:`~sqlalchemy.orm.relationship` definitions which require explicit
primaryjoin, order_by etc. expressions should in all but the most
simplistic cases use **late bound** forms
for these arguments, meaning, using either the string form or a lambda.
The reason for this is that the related :class:`.Column` objects which are to
be configured using ``@declared_attr`` are not available to another
``@declared_attr`` attribute; while the methods will work and return new
:class:`.Column` objects, those are not the :class:`.Column` objects that
Declarative will be using as it calls the methods on its own, thus using
*different* :class:`.Column` objects.
The canonical example is the primaryjoin condition that depends upon
another mixed-in column::
class RefTargetMixin(object):
@declared_attr
def target_id(cls):
return Column('target_id', ForeignKey('target.id'))
@declared_attr
def target(cls):
return relationship(Target,
primaryjoin=Target.id==cls.target_id # this is *incorrect*
)
Mapping a class using the above mixin, we will get an error like::
sqlalchemy.exc.InvalidRequestError: this ForeignKey's parent column is not
yet associated with a Table.
This is because the ``target_id`` :class:`.Column` we've called upon in our ``target()``
method is not the same :class:`.Column` that declarative is actually going to map
to our table.
The condition above is resolved using a lambda::
class RefTargetMixin(object):
@declared_attr
def target_id(cls):
return Column('target_id', ForeignKey('target.id'))
@declared_attr
def target(cls):
return relationship(Target,
primaryjoin=lambda: Target.id==cls.target_id
)
or alternatively, the string form (which ultmately generates a lambda)::
class RefTargetMixin(object):
@declared_attr
def target_id(cls):
return Column('target_id', ForeignKey('target.id'))
@declared_attr
def target(cls):
return relationship("Target",
primaryjoin="Target.id==%s.target_id" % cls.__name__
)
Mixing in deferred(), column_property(), and other MapperProperty classes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Like :func:`~sqlalchemy.orm.relationship`, all
:class:`~sqlalchemy.orm.interfaces.MapperProperty` subclasses such as
:func:`~sqlalchemy.orm.deferred`, :func:`~sqlalchemy.orm.column_property`,
etc. ultimately involve references to columns, and therefore, when
used with declarative mixins, have the :class:`.declared_attr`
requirement so that no reliance on copying is needed::
class SomethingMixin(object):
@declared_attr
def dprop(cls):
return deferred(Column(Integer))
class Something(SomethingMixin, Base):
__tablename__ = "something"
Mixing in Association Proxy and Other Attributes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Mixins can specify user-defined attributes as well as other extension
units such as :func:`.association_proxy`. The usage of
:class:`.declared_attr` is required in those cases where the attribute must
be tailored specifically to the target subclass. An example is when
constructing multiple :func:`.association_proxy` attributes which each
target a different type of child object. Below is an
:func:`.association_proxy` / mixin example which provides a scalar list of
string values to an implementing class::
from sqlalchemy import Column, Integer, ForeignKey, String
from sqlalchemy.orm import relationship
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declarative_base, declared_attr
Base = declarative_base()
class HasStringCollection(object):
@declared_attr
def _strings(cls):
class StringAttribute(Base):
__tablename__ = cls.string_table_name
id = Column(Integer, primary_key=True)
value = Column(String(50), nullable=False)
parent_id = Column(Integer,
ForeignKey('%s.id' % cls.__tablename__),
nullable=False)
def __init__(self, value):
self.value = value
return relationship(StringAttribute)
@declared_attr
def strings(cls):
return association_proxy('_strings', 'value')
class TypeA(HasStringCollection, Base):
__tablename__ = 'type_a'
string_table_name = 'type_a_strings'
id = Column(Integer(), primary_key=True)
class TypeB(HasStringCollection, Base):
__tablename__ = 'type_b'
string_table_name = 'type_b_strings'
id = Column(Integer(), primary_key=True)
Above, the ``HasStringCollection`` mixin produces a :func:`.relationship`
which refers to a newly generated class called ``StringAttribute``. The
``StringAttribute`` class is generated with it's own :class:`.Table`
definition which is local to the parent class making usage of the
``HasStringCollection`` mixin. It also produces an :func:`.association_proxy`
object which proxies references to the ``strings`` attribute onto the ``value``
attribute of each ``StringAttribute`` instance.
``TypeA`` or ``TypeB`` can be instantiated given the constructor
argument ``strings``, a list of strings::
ta = TypeA(strings=['foo', 'bar'])
tb = TypeA(strings=['bat', 'bar'])
This list will generate a collection
of ``StringAttribute`` objects, which are persisted into a table that's
local to either the ``type_a_strings`` or ``type_b_strings`` table::
>>> print ta._strings
[<__main__.StringAttribute object at 0x10151cd90>,
<__main__.StringAttribute object at 0x10151ce10>]
When constructing the :func:`.association_proxy`, the
:class:`.declared_attr` decorator must be used so that a distinct
:func:`.association_proxy` object is created for each of the ``TypeA``
and ``TypeB`` classes.
.. versionadded:: 0.8 :class:`.declared_attr` is usable with non-mapped
attributes, including user-defined attributes as well as
:func:`.association_proxy`.
Controlling table inheritance with mixins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``__tablename__`` attribute in conjunction with the hierarchy of
classes involved in a declarative mixin scenario controls what type of
table inheritance, if any,
is configured by the declarative extension.
If the ``__tablename__`` is computed by a mixin, you may need to
control which classes get the computed attribute in order to get the
type of table inheritance you require.
For example, if you had a mixin that computes ``__tablename__`` but
where you wanted to use that mixin in a single table inheritance
hierarchy, you can explicitly specify ``__tablename__`` as ``None`` to
indicate that the class should not have a table mapped::
from sqlalchemy.ext.declarative import declared_attr
class Tablename:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
class Person(Tablename, Base):
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
Alternatively, you can make the mixin intelligent enough to only
return a ``__tablename__`` in the event that no table is already
mapped in the inheritance hierarchy. To help with this, a
:func:`~sqlalchemy.ext.declarative.has_inherited_table` helper
function is provided that returns ``True`` if a parent class already
has a mapped table.
As an example, here's a mixin that will only allow single table
inheritance::
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.declarative import has_inherited_table
class Tablename(object):
@declared_attr
def __tablename__(cls):
if has_inherited_table(cls):
return None
return cls.__name__.lower()
class Person(Tablename, Base):
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
primary_language = Column(String(50))
__mapper_args__ = {'polymorphic_identity': 'engineer'}
Combining Table/Mapper Arguments from Multiple Mixins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the case of ``__table_args__`` or ``__mapper_args__``
specified with declarative mixins, you may want to combine
some parameters from several mixins with those you wish to
define on the class iteself. The
:class:`.declared_attr` decorator can be used
here to create user-defined collation routines that pull
from multiple collections::
from sqlalchemy.ext.declarative import declared_attr
class MySQLSettings(object):
__table_args__ = {'mysql_engine':'InnoDB'}
class MyOtherMixin(object):
__table_args__ = {'info':'foo'}
class MyModel(MySQLSettings, MyOtherMixin, Base):
__tablename__='my_model'
@declared_attr
def __table_args__(cls):
args = dict()
args.update(MySQLSettings.__table_args__)
args.update(MyOtherMixin.__table_args__)
return args
id = Column(Integer, primary_key=True)
Creating Indexes with Mixins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To define a named, potentially multicolumn :class:`.Index` that applies to all
tables derived from a mixin, use the "inline" form of :class:`.Index` and
establish it as part of ``__table_args__``::
class MyMixin(object):
a = Column(Integer)
b = Column(Integer)
@declared_attr
def __table_args__(cls):
return (Index('test_idx_%s' % cls.__tablename__, 'a', 'b'),)
class MyModel(MyMixin, Base):
__tablename__ = 'atable'
c = Column(Integer,primary_key=True)
Special Directives
==================
``__declare_last__()``
~~~~~~~~~~~~~~~~~~~~~~
The ``__declare_last__()`` hook allows definition of
a class level function that is automatically called by the
:meth:`.MapperEvents.after_configured` event, which occurs after mappings are
assumed to be completed and the 'configure' step has finished::
class MyClass(Base):
@classmethod
def __declare_last__(cls):
""
# do something with mappings
.. versionadded:: 0.7.3
``__declare_first__()``
~~~~~~~~~~~~~~~~~~~~~~~
Like ``__declare_last__()``, but is called at the beginning of mapper configuration
via the :meth:`.MapperEvents.before_configured` event::
class MyClass(Base):
@classmethod
def __declare_first__(cls):
""
# do something before mappings are configured
.. versionadded:: 0.9.3
.. _declarative_abstract:
``__abstract__``
~~~~~~~~~~~~~~~~~~~
``__abstract__`` causes declarative to skip the production
of a table or mapper for the class entirely. A class can be added within a
hierarchy in the same way as mixin (see :ref:`declarative_mixins`), allowing
subclasses to extend just from the special class::
class SomeAbstractBase(Base):
__abstract__ = True
def some_helpful_method(self):
""
@declared_attr
def __mapper_args__(cls):
return {"helpful mapper arguments":True}
class MyMappedClass(SomeAbstractBase):
""
One possible use of ``__abstract__`` is to use a distinct
:class:`.MetaData` for different bases::
Base = declarative_base()
class DefaultBase(Base):
__abstract__ = True
metadata = MetaData()
class OtherBase(Base):
__abstract__ = True
metadata = MetaData()
Above, classes which inherit from ``DefaultBase`` will use one
:class:`.MetaData` as the registry of tables, and those which inherit from
``OtherBase`` will use a different one. The tables themselves can then be
created perhaps within distinct databases::
DefaultBase.metadata.create_all(some_engine)
OtherBase.metadata_create_all(some_other_engine)
.. versionadded:: 0.7.3
Class Constructor
=================
As a convenience feature, the :func:`declarative_base` sets a default
constructor on classes which takes keyword arguments, and assigns them
to the named attributes::
e = Engineer(primary_language='python')
Sessions
========
Note that ``declarative`` does nothing special with sessions, and is
only intended as an easier way to configure mappers and
:class:`~sqlalchemy.schema.Table` objects. A typical application
setup using :class:`~sqlalchemy.orm.scoping.scoped_session` might look like::
engine = create_engine('postgresql://scott:tiger@localhost/test')
Session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Mapped instances then make usage of
:class:`~sqlalchemy.orm.session.Session` in the usual way.
"""
from .api import declarative_base, synonym_for, comparable_using, \
instrument_declarative, ConcreteBase, AbstractConcreteBase, \
DeclarativeMeta, DeferredReflection, has_inherited_table,\
declared_attr, as_declarative
__all__ = ['declarative_base', 'synonym_for', 'has_inherited_table',
'comparable_using', 'instrument_declarative', 'declared_attr',
'ConcreteBase', 'AbstractConcreteBase', 'DeclarativeMeta',
'DeferredReflection']
| gpl-3.0 |
KNMI/VERCE | verce-hpc-pe/src/networkx/algorithms/simple_paths.py | 33 | 3813 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 by
# Sergio Nery Simoes <sergionery@gmail.com>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Sérgio Nery Simões <sergionery@gmail.com>',
'Aric Hagberg <aric.hagberg@gmail.com>'])
__all__ = ['all_simple_paths']
def all_simple_paths(G, source, target, cutoff=None):
"""Generate all simple paths in the graph G from source to target.
A simple path is a path with no repeated nodes.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
path_generator: generator
A generator that produces lists of simple paths. If there are no paths
between the source and target within the given cutoff the generator
produces no output.
Examples
--------
>>> G = nx.complete_graph(4)
>>> for path in nx.all_simple_paths(G, source=0, target=3):
... print(path)
...
[0, 1, 2, 3]
[0, 1, 3]
[0, 2, 1, 3]
[0, 2, 3]
[0, 3]
>>> paths = nx.all_simple_paths(G, source=0, target=3, cutoff=2)
>>> print(list(paths))
[[0, 1, 3], [0, 2, 3], [0, 3]]
Notes
-----
This algorithm uses a modified depth-first search to generate the
paths [1]_. A single path can be found in `O(V+E)` time but the
number of simple paths in a graph can be very large, e.g. `O(n!)` in
the complete graph of order n.
References
----------
.. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms",
Addison Wesley Professional, 3rd ed., 2001.
See Also
--------
all_shortest_paths, shortest_path
"""
if source not in G:
raise nx.NetworkXError('source node %s not in graph'%source)
if target not in G:
raise nx.NetworkXError('target node %s not in graph'%target)
if cutoff is None:
cutoff = len(G)-1
if G.is_multigraph():
return _all_simple_paths_multigraph(G, source, target, cutoff=cutoff)
else:
return _all_simple_paths_graph(G, source, target, cutoff=cutoff)
def _all_simple_paths_graph(G, source, target, cutoff=None):
if cutoff < 1:
return
visited = [source]
stack = [iter(G[source])]
while stack:
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
elif len(visited) < cutoff:
if child == target:
yield visited + [target]
elif child not in visited:
visited.append(child)
stack.append(iter(G[child]))
else: #len(visited) == cutoff:
if child == target or target in children:
yield visited + [target]
stack.pop()
visited.pop()
def _all_simple_paths_multigraph(G, source, target, cutoff=None):
if cutoff < 1:
return
visited = [source]
stack = [(v for u,v in G.edges(source))]
while stack:
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
elif len(visited) < cutoff:
if child == target:
yield visited + [target]
elif child not in visited:
visited.append(child)
stack.append((v for u,v in G.edges(child)))
else: #len(visited) == cutoff:
count = ([child]+list(children)).count(target)
for i in range(count):
yield visited + [target]
stack.pop()
visited.pop()
| mit |
Yen-Chung-En/2015cdb_W12 | static/Brython3.1.1-20150328-091302/Lib/gc.py | 743 | 3548 | """This module provides access to the garbage collector for reference cycles.
enable() -- Enable automatic garbage collection.
disable() -- Disable automatic garbage collection.
isenabled() -- Returns true if automatic collection is enabled.
collect() -- Do a full collection right now.
get_count() -- Return the current collection counts.
set_debug() -- Set debugging flags.
get_debug() -- Get debugging flags.
set_threshold() -- Set the collection thresholds.
get_threshold() -- Return the current the collection thresholds.
get_objects() -- Return a list of all objects tracked by the collector.
is_tracked() -- Returns true if a given object is tracked.
get_referrers() -- Return the list of objects that refer to an object.
get_referents() -- Return the list of objects that an object refers to.
"""
DEBUG_COLLECTABLE = 2
DEBUG_LEAK = 38
DEBUG_SAVEALL = 32
DEBUG_STATS = 1
DEBUG_UNCOLLECTABLE = 4
class __loader__:
pass
callbacks = []
def collect(*args,**kw):
"""collect([generation]) -> n
With no arguments, run a full collection. The optional argument
may be an integer specifying which generation to collect. A ValueError
is raised if the generation number is invalid.
The number of unreachable objects is returned.
"""
pass
def disable(*args,**kw):
"""disable() -> None
Disable automatic garbage collection.
"""
pass
def enable(*args,**kw):
"""enable() -> None
Enable automatic garbage collection.
"""
pass
garbage = []
def get_count(*args,**kw):
"""get_count() -> (count0, count1, count2)
Return the current collection counts
"""
pass
def get_debug(*args,**kw):
"""get_debug() -> flags
Get the garbage collection debugging flags.
"""
pass
def get_objects(*args,**kw):
"""get_objects() -> [...]
Return a list of objects tracked by the collector (excluding the list
returned).
"""
pass
def get_referents(*args,**kw):
"""get_referents(*objs) -> list Return the list of objects that are directly referred to by objs."""
pass
def get_referrers(*args,**kw):
"""get_referrers(*objs) -> list Return the list of objects that directly refer to any of objs."""
pass
def get_threshold(*args,**kw):
"""get_threshold() -> (threshold0, threshold1, threshold2)
Return the current collection thresholds
"""
pass
def is_tracked(*args,**kw):
"""is_tracked(obj) -> bool
Returns true if the object is tracked by the garbage collector.
Simple atomic objects will return false.
"""
pass
def isenabled(*args,**kw):
"""isenabled() -> status
Returns true if automatic garbage collection is enabled.
"""
pass
def set_debug(*args,**kw):
"""set_debug(flags) -> None
Set the garbage collection debugging flags. Debugging information is
written to sys.stderr.
flags is an integer and can have the following bits turned on:
DEBUG_STATS - Print statistics during collection.
DEBUG_COLLECTABLE - Print collectable objects found.
DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects found.
DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
DEBUG_LEAK - Debug leaking programs (everything but STATS).
"""
pass
def set_threshold(*args,**kw):
"""set_threshold(threshold0, [threshold1, threshold2]) -> None
Sets the collection thresholds. Setting threshold0 to zero disables
collection.
"""
pass
| agpl-3.0 |
topicusonderwijs/zxing-ios | cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/packaging/__init__.py | 34 | 10758 | """SCons.Tool.Packaging
SCons Packaging Tool.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/__init__.py 5023 2010/06/14 22:05:46 scons"
import SCons.Environment
from SCons.Variables import *
from SCons.Errors import *
from SCons.Util import is_List, make_path_relative
from SCons.Warnings import warn, Warning
import os, imp
import SCons.Defaults
__all__ = [ 'src_targz', 'src_tarbz2', 'src_zip', 'tarbz2', 'targz', 'zip', 'rpm', 'msi', 'ipk' ]
#
# Utility and Builder function
#
def Tag(env, target, source, *more_tags, **kw_tags):
""" Tag a file with the given arguments, just sets the accordingly named
attribute on the file object.
TODO: FIXME
"""
if not target:
target=source
first_tag=None
else:
first_tag=source
if first_tag:
kw_tags[first_tag[0]] = ''
if len(kw_tags) == 0 and len(more_tags) == 0:
raise UserError("No tags given.")
# XXX: sanity checks
for x in more_tags:
kw_tags[x] = ''
if not SCons.Util.is_List(target):
target=[target]
else:
# hmm, sometimes the target list, is a list of a list
# make sure it is flattened prior to processing.
# TODO: perhaps some bug ?!?
target=env.Flatten(target)
for t in target:
for (k,v) in kw_tags.items():
# all file tags have to start with PACKAGING_, so we can later
# differentiate between "normal" object attributes and the
# packaging attributes. As the user should not be bothered with
# that, the prefix will be added here if missing.
#if not k.startswith('PACKAGING_'):
if k[:10] != 'PACKAGING_':
k='PACKAGING_'+k
setattr(t, k, v)
def Package(env, target=None, source=None, **kw):
""" Entry point for the package tool.
"""
# check if we need to find the source files ourself
if not source:
source = env.FindInstalledFiles()
if len(source)==0:
raise UserError("No source for Package() given")
# decide which types of packages shall be built. Can be defined through
# four mechanisms: command line argument, keyword argument,
# environment argument and default selection( zip or tar.gz ) in that
# order.
try: kw['PACKAGETYPE']=env['PACKAGETYPE']
except KeyError: pass
if not kw.get('PACKAGETYPE'):
from SCons.Script import GetOption
kw['PACKAGETYPE'] = GetOption('package_type')
if kw['PACKAGETYPE'] == None:
if 'Tar' in env['BUILDERS']:
kw['PACKAGETYPE']='targz'
elif 'Zip' in env['BUILDERS']:
kw['PACKAGETYPE']='zip'
else:
raise UserError("No type for Package() given")
PACKAGETYPE=kw['PACKAGETYPE']
if not is_List(PACKAGETYPE):
PACKAGETYPE=PACKAGETYPE.split(',')
# load the needed packagers.
def load_packager(type):
try:
file,path,desc=imp.find_module(type, __path__)
return imp.load_module(type, file, path, desc)
except ImportError, e:
raise EnvironmentError("packager %s not available: %s"%(type,str(e)))
packagers=list(map(load_packager, PACKAGETYPE))
# set up targets and the PACKAGEROOT
try:
# fill up the target list with a default target name until the PACKAGETYPE
# list is of the same size as the target list.
if not target: target = []
size_diff = len(PACKAGETYPE)-len(target)
default_name = "%(NAME)s-%(VERSION)s"
if size_diff>0:
default_target = default_name%kw
target.extend( [default_target]*size_diff )
if 'PACKAGEROOT' not in kw:
kw['PACKAGEROOT'] = default_name%kw
except KeyError, e:
raise SCons.Errors.UserError( "Missing Packagetag '%s'"%e.args[0] )
# setup the source files
source=env.arg2nodes(source, env.fs.Entry)
# call the packager to setup the dependencies.
targets=[]
try:
for packager in packagers:
t=[target.pop(0)]
t=packager.package(env,t,source, **kw)
targets.extend(t)
assert( len(target) == 0 )
except KeyError, e:
raise SCons.Errors.UserError( "Missing Packagetag '%s' for %s packager"\
% (e.args[0],packager.__name__) )
except TypeError, e:
# this exception means that a needed argument for the packager is
# missing. As our packagers get their "tags" as named function
# arguments we need to find out which one is missing.
from inspect import getargspec
args,varargs,varkw,defaults=getargspec(packager.package)
if defaults!=None:
args=args[:-len(defaults)] # throw away arguments with default values
args.remove('env')
args.remove('target')
args.remove('source')
# now remove any args for which we have a value in kw.
args=[x for x in args if x not in kw]
if len(args)==0:
raise # must be a different error, so reraise
elif len(args)==1:
raise SCons.Errors.UserError( "Missing Packagetag '%s' for %s packager"\
% (args[0],packager.__name__) )
else:
raise SCons.Errors.UserError( "Missing Packagetags '%s' for %s packager"\
% (", ".join(args),packager.__name__) )
target=env.arg2nodes(target, env.fs.Entry)
targets.extend(env.Alias( 'package', targets ))
return targets
#
# SCons tool initialization functions
#
added = None
def generate(env):
from SCons.Script import AddOption
global added
if not added:
added = 1
AddOption('--package-type',
dest='package_type',
default=None,
type="string",
action="store",
help='The type of package to create.')
try:
env['BUILDERS']['Package']
env['BUILDERS']['Tag']
except KeyError:
env['BUILDERS']['Package'] = Package
env['BUILDERS']['Tag'] = Tag
def exists(env):
return 1
# XXX
def options(opts):
opts.AddVariables(
EnumVariable( 'PACKAGETYPE',
'the type of package to create.',
None, allowed_values=list(map( str, __all__ )),
ignorecase=2
)
)
#
# Internal utility functions
#
def copy_attr(f1, f2):
""" copies the special packaging file attributes from f1 to f2.
"""
#pattrs = [x for x in dir(f1) if not hasattr(f2, x) and\
# x.startswith('PACKAGING_')]
copyit = lambda x: not hasattr(f2, x) and x[:10] == 'PACKAGING_'
pattrs = list(filter(copyit, dir(f1)))
for attr in pattrs:
setattr(f2, attr, getattr(f1, attr))
def putintopackageroot(target, source, env, pkgroot, honor_install_location=1):
""" Uses the CopyAs builder to copy all source files to the directory given
in pkgroot.
If honor_install_location is set and the copied source file has an
PACKAGING_INSTALL_LOCATION attribute, the PACKAGING_INSTALL_LOCATION is
used as the new name of the source file under pkgroot.
The source file will not be copied if it is already under the the pkgroot
directory.
All attributes of the source file will be copied to the new file.
"""
# make sure the packageroot is a Dir object.
if SCons.Util.is_String(pkgroot): pkgroot=env.Dir(pkgroot)
if not SCons.Util.is_List(source): source=[source]
new_source = []
for file in source:
if SCons.Util.is_String(file): file = env.File(file)
if file.is_under(pkgroot):
new_source.append(file)
else:
if hasattr(file, 'PACKAGING_INSTALL_LOCATION') and\
honor_install_location:
new_name=make_path_relative(file.PACKAGING_INSTALL_LOCATION)
else:
new_name=make_path_relative(file.get_path())
new_file=pkgroot.File(new_name)
new_file=env.CopyAs(new_file, file)[0]
copy_attr(file, new_file)
new_source.append(new_file)
return (target, new_source)
def stripinstallbuilder(target, source, env):
""" strips the install builder action from the source list and stores
the final installation location as the "PACKAGING_INSTALL_LOCATION" of
the source of the source file. This effectively removes the final installed
files from the source list while remembering the installation location.
It also warns about files which have no install builder attached.
"""
def has_no_install_location(file):
return not (file.has_builder() and\
hasattr(file.builder, 'name') and\
(file.builder.name=="InstallBuilder" or\
file.builder.name=="InstallAsBuilder"))
if len(list(filter(has_no_install_location, source))):
warn(Warning, "there are files to package which have no\
InstallBuilder attached, this might lead to irreproducible packages")
n_source=[]
for s in source:
if has_no_install_location(s):
n_source.append(s)
else:
for ss in s.sources:
n_source.append(ss)
copy_attr(s, ss)
setattr(ss, 'PACKAGING_INSTALL_LOCATION', s.get_path())
return (target, n_source)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
HalcyonChimera/osf.io | tests/test_cas_authentication.py | 4 | 14397 | # -*- coding: utf-8 -*-
import furl
import responses
import mock
from nose.tools import * # noqa: F403
import unittest
from framework.auth import cas
from tests.base import OsfTestCase, fake
from osf_tests.factories import UserFactory
def make_successful_response(user):
return cas.CasResponse(
authenticated=True,
user=user._id,
attributes={
'accessToken': fake.md5()
}
)
def make_failure_response():
return cas.CasResponse(
authenticated=False,
user=None,
)
def make_external_response(release=True, unicode=False):
attributes = {
'accessToken': fake.md5(),
}
if release:
attributes.update({
'given-names': fake.first_name() if not unicode else u'нет',
'family-name': fake.last_name() if not unicode else u'Да',
})
return cas.CasResponse(
authenticated=True,
user='OrcidProfile#{}'.format(fake.numerify('####-####-####-####')),
attributes=attributes
)
def generate_external_user_with_resp(service_url, user=True, release=True):
"""
Generate mock user, external credential and cas response for tests.
:param service_url: the service url
:param user: set to `False` if user does not exists
:param release: set to `False` if attributes are not released due to privacy settings
:return: existing user object or new user, valid external credential, valid cas response
"""
cas_resp = make_external_response(release=release)
validated_credentials = cas.validate_external_credential(cas_resp.user)
if user:
user = UserFactory.build()
user.external_identity = {
validated_credentials['provider']: {
validated_credentials['id']: 'VERIFIED'
}
}
user.save()
return user, validated_credentials, cas_resp
else:
user = {
'external_id_provider': validated_credentials['provider'],
'external_id': validated_credentials['id'],
'fullname': '',
'access_token': cas_resp.attributes['accessToken'],
'service_url': service_url,
}
return user, validated_credentials, cas_resp
RESPONSE_TEMPLATE = """
<cas:serviceResponse xmlns:cas='http://www.yale.edu/tp/cas'>
<cas:authenticationSuccess>
<cas:user>{user_id}</cas:user>
<cas:attributes>
<cas:isFromNewLogin>true</cas:isFromNewLogin>
<cas:authenticationDate>Tue May 19 02:20:19 UTC 2015</cas:authenticationDate>
<cas:givenName>{given_name}</cas:givenName>
<cas:familyName>{family_name}</cas:familyName>
<cas:longTermAuthenticationRequestTokenUsed>true</cas:longTermAuthenticationRequestTokenUsed>
<cas:accessToken>{access_token}</cas:accessToken>
<cas:username>{username}</cas:username>
</cas:attributes>
</cas:authenticationSuccess>
</cas:serviceResponse>
"""
def make_service_validation_response_body(user, access_token=None):
token = access_token or fake.md5()
return RESPONSE_TEMPLATE.format(
user_id=user._id,
given_name=user.given_name,
family_name=user.family_name,
username=user.username,
access_token=token
)
def test_parse_authorization_header():
token = fake.md5()
valid = 'Bearer {}'.format(token)
assert_equal(cas.parse_auth_header(valid), token)
missing_token = 'Bearer '
with assert_raises(cas.CasTokenError):
cas.parse_auth_header(missing_token)
class TestCASClient(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.base_url = 'http://accounts.test.test'
self.client = cas.CasClient(self.base_url)
@responses.activate
def test_service_validate(self):
user = UserFactory()
url = furl.furl(self.base_url)
url.path.segments.extend(('p3', 'serviceValidate',))
service_url = 'http://test.osf.io'
ticket = fake.md5()
body = make_service_validation_response_body(user, ticket)
responses.add(
responses.Response(
responses.GET,
url.url,
body=body,
status=200,
)
)
resp = self.client.service_validate(ticket, service_url)
assert_true(resp.authenticated)
@responses.activate
def test_service_validate_invalid_ticket_raises_error(self):
url = furl.furl(self.base_url)
url.path.segments.extend(('p3', 'serviceValidate',))
service_url = 'http://test.osf.io'
# Return error response
responses.add(
responses.Response(
responses.GET,
url.url,
body='invalid ticket...',
status=500,
)
)
with assert_raises(cas.CasHTTPError):
self.client.service_validate('invalid', service_url)
@responses.activate
def test_profile_invalid_access_token_raises_error(self):
url = furl.furl(self.base_url)
url.path.segments.extend(('oauth2', 'profile',))
responses.add(
responses.Response(
responses.GET,
url.url,
status=500,
)
)
with assert_raises(cas.CasHTTPError):
self.client.profile('invalid-access-token')
@responses.activate
def test_application_token_revocation_succeeds(self):
url = self.client.get_auth_token_revocation_url()
client_id= 'fake_id'
client_secret = 'fake_secret'
responses.add(
responses.Response(
responses.POST,
url,
body={'client_id': client_id,
'client_secret': client_secret},
status=204
)
)
res = self.client.revoke_application_tokens(client_id, client_secret)
assert_equal(res, True)
@responses.activate
def test_application_token_revocation_fails(self):
url = self.client.get_auth_token_revocation_url()
client_id= 'fake_id'
client_secret = 'fake_secret'
responses.add(
responses.Response(
responses.POST,
url,
body={'client_id': client_id,
'client_secret': client_secret},
status=400
)
)
with assert_raises(cas.CasHTTPError):
res = self.client.revoke_application_tokens(client_id, client_secret)
@unittest.skip('finish me')
def test_profile_valid_access_token_returns_cas_response(self):
assert 0
@unittest.skip('finish me')
def test_get_login_url(self):
assert 0
@unittest.skip('finish me')
def test_get_logout_url(self):
assert 0
class TestCASTicketAuthentication(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = UserFactory()
@mock.patch('framework.auth.cas.get_user_from_cas_resp')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_success(self, mock_service_validate, mock_get_user_from_cas_resp):
mock_service_validate.return_value = make_successful_response(self.user)
mock_get_user_from_cas_resp.return_value = (self.user, None, 'authenticate')
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(resp.status_code, 302)
assert_equal(mock_service_validate.call_count, 1)
assert_equal(mock_get_user_from_cas_resp.call_count, 1)
@mock.patch('framework.auth.cas.get_user_from_cas_resp')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_failure(self, mock_service_validate, mock_get_user_from_cas_resp):
mock_service_validate.return_value = make_failure_response()
mock_get_user_from_cas_resp.return_value = (None, None, None)
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(resp.status_code, 302)
assert_equal(mock_service_validate.call_count, 1)
assert_equal(mock_get_user_from_cas_resp.call_count, 0)
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_invalidates_verification_key(self, mock_service_validate):
self.user.verification_key = fake.md5()
self.user.save()
mock_service_validate.return_value = make_successful_response(self.user)
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
self.user.reload()
assert_true(self.user.verification_key is None)
class TestCASExternalLogin(OsfTestCase):
def setUp(self):
super(TestCASExternalLogin, self).setUp()
self.user = UserFactory()
def test_get_user_from_cas_resp_already_authorized(self):
mock_response = make_external_response()
validated_creds = cas.validate_external_credential(mock_response.user)
self.user.external_identity = {
validated_creds['provider']: {
validated_creds['id']: 'VERIFIED'
}
}
self.user.save()
user, external_credential, action = cas.get_user_from_cas_resp(mock_response)
assert_equal(user._id, self.user._id)
assert_equal(external_credential, validated_creds)
assert_equal(action, 'authenticate')
def test_get_user_from_cas_resp_not_authorized(self):
user, external_credential, action = cas.get_user_from_cas_resp(make_external_response())
assert_equal(user, None)
assert_true(external_credential is not None)
assert_equal(action, 'external_first_login')
@mock.patch('framework.auth.cas.get_user_from_cas_resp')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_with_user(self, mock_service_validate, mock_get_user_from_cas_resp):
mock_response = make_external_response()
mock_service_validate.return_value = mock_response
validated_creds = cas.validate_external_credential(mock_response.user)
self.user.external_identity = {
validated_creds['provider']: {
validated_creds['id']: 'VERIFIED'
}
}
self.user.save()
mock_get_user_from_cas_resp.return_value = (self.user, validated_creds, 'authenticate')
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(mock_service_validate.call_count, 1)
assert_true(mock_get_user_from_cas_resp.call_count, 1)
assert_equal(resp.status_code, 302)
assert_in('/logout?service=', resp.headers['Location'])
assert_in('/login?service=', resp.headers['Location'])
@mock.patch('framework.auth.cas.get_user_from_cas_resp')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_no_user(self, mock_service_validate, mock_get_user_from_cas_resp):
mock_response = make_external_response()
mock_service_validate.return_value = mock_response
validated_creds = cas.validate_external_credential(mock_response.user)
mock_get_user_from_cas_resp.return_value = (None, validated_creds, 'external_first_login')
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(mock_service_validate.call_count, 1)
assert_true(mock_get_user_from_cas_resp.call_count, 1)
assert_equal(resp.status_code, 302)
assert_equal(resp.location, '/external-login/email')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_generates_new_verification_key(self, mock_service_validate):
self.user.verification_key = fake.md5()
self.user.save()
mock_response = make_external_response()
validated_creds = cas.validate_external_credential(mock_response.user)
self.user.external_identity = {
validated_creds['provider']: {
validated_creds['id']: 'VERIFIED'
}
}
self.user.save()
mock_service_validate.return_value = mock_response
ticket = fake.md5()
service_url = 'http://localhost:5000/'
verification_key = self.user.verification_key
resp = cas.make_response_from_ticket(ticket, service_url)
self.user.reload()
assert_not_equal(self.user.verification_key, verification_key)
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_handles_unicode(self, mock_service_validate):
mock_response = make_external_response(unicode=True)
mock_service_validate.return_value = mock_response
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(resp.status_code, 302)
assert_equal(mock_service_validate.call_count, 1)
first_call_args = mock_service_validate.call_args[0]
assert_equal(first_call_args[0], ticket)
assert_equal(first_call_args[1], 'http://localhost:5000/')
@mock.patch('framework.auth.cas.CasClient.service_validate')
def test_make_response_from_ticket_handles_non_unicode(self, mock_service_validate):
mock_response = make_external_response()
mock_service_validate.return_value = mock_response
ticket = fake.md5()
service_url = 'http://localhost:5000/'
resp = cas.make_response_from_ticket(ticket, service_url)
assert_equal(resp.status_code, 302)
assert_equal(mock_service_validate.call_count, 1)
first_call_args = mock_service_validate.call_args[0]
assert_equal(first_call_args[0], ticket)
assert_equal(first_call_args[1], 'http://localhost:5000/')
| apache-2.0 |
JohnGriffiths/dipy | scratch/very_scratch/eddy_currents.py | 22 | 1282 | import numpy as np
import dipy as dp
import nibabel as ni
dname = '/home/eg01/Data_Backup/Data/Eleftherios/CBU090133_METHODS/20090227_145404/Series_003_CBU_DTI_64D_iso_1000'
#dname = '/home/eg01/Data_Backup/Data/Frank_Eleftherios/frank/20100511_m030y_cbu100624/08_ep2d_advdiff_101dir_DSI'
data,affine,bvals,gradients=dp.load_dcm_dir(dname)
'''
rot=np.array([[1,0,0,0],
[0,np.cos(np.pi/2),-np.sin(np.pi/2),0],
[0,np.sin(np.pi/2), np.cos(np.pi/2),0],
[0,0,0,1]])
from scipy.ndimage import affine_transform as aff
naffine=np.dot(affine,rot)
'''
data[:,:,:,1]
source=ni.Nifti1Image(data[:,:,:,1],affine)
target=ni.Nifti1Image(data[:,:,:,0],affine)
#similarity 'cc', 'cr', 'crl1', 'mi', je', 'ce', 'nmi', 'smi'. 'cr'
similarity='cr'
#interp 'pv', 'tri'
interp = 'tri'
#subsampling None or sequence (3,)
subsampling=None
#search 'affine', 'rigid', 'similarity' or ['rigid','affine']
search='affine'
#optimizer 'simplex', 'powell', 'steepest', 'cg', 'bfgs' or
#sequence of optimizers
optimizer= 'powell'
T=dp.volume_register(source,target,similarity,\
interp,subsampling,search,)
sourceT=dp.volume_transform(source, T.inv(), reference=target)
s=source.get_data()
t=target.get_data()
sT=sourceT.get_data()
| bsd-3-clause |
staticlibs/android-ndk-r9d-arm-linux-androideabi-4.8 | lib/python2.7/sqlite3/test/transactions.py | 127 | 7347 | #-*- coding: ISO-8859-1 -*-
# pysqlite2/test/transactions.py: tests transactions
#
# Copyright (C) 2005-2007 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import sys
import os, unittest
import sqlite3 as sqlite
def get_db_path():
return "sqlite_testdb"
class TransactionTests(unittest.TestCase):
def setUp(self):
try:
os.remove(get_db_path())
except OSError:
pass
self.con1 = sqlite.connect(get_db_path(), timeout=0.1)
self.cur1 = self.con1.cursor()
self.con2 = sqlite.connect(get_db_path(), timeout=0.1)
self.cur2 = self.con2.cursor()
def tearDown(self):
self.cur1.close()
self.con1.close()
self.cur2.close()
self.con2.close()
try:
os.unlink(get_db_path())
except OSError:
pass
def CheckDMLdoesAutoCommitBefore(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.cur1.execute("create table test2(j)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
def CheckInsertStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 0)
def CheckUpdateStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("update test set i=6")
self.cur2.execute("select i from test")
res = self.cur2.fetchone()[0]
self.assertEqual(res, 5)
def CheckDeleteStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("delete from test")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
def CheckReplaceStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("replace into test(i) values (6)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
self.assertEqual(res[0][0], 5)
def CheckToggleAutoCommit(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.isolation_level = None
self.assertEqual(self.con1.isolation_level, None)
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
self.con1.isolation_level = "DEFERRED"
self.assertEqual(self.con1.isolation_level , "DEFERRED")
self.cur1.execute("insert into test(i) values (5)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
def CheckRaiseTimeout(self):
if sqlite.sqlite_version_info < (3, 2, 2):
# This will fail (hang) on earlier versions of sqlite.
# Determine exact version it was fixed. 3.2.1 hangs.
return
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
try:
self.cur2.execute("insert into test(i) values (5)")
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
pass
except:
self.fail("should have raised an OperationalError")
def CheckLocking(self):
"""
This tests the improved concurrency with pysqlite 2.3.4. You needed
to roll back con2 before you could commit con1.
"""
if sqlite.sqlite_version_info < (3, 2, 2):
# This will fail (hang) on earlier versions of sqlite.
# Determine exact version it was fixed. 3.2.1 hangs.
return
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
try:
self.cur2.execute("insert into test(i) values (5)")
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
pass
except:
self.fail("should have raised an OperationalError")
# NO self.con2.rollback() HERE!!!
self.con1.commit()
def CheckRollbackCursorConsistency(self):
"""
Checks if cursors on the connection are set into a "reset" state
when a rollback is done on the connection.
"""
con = sqlite.connect(":memory:")
cur = con.cursor()
cur.execute("create table test(x)")
cur.execute("insert into test(x) values (5)")
cur.execute("select 1 union select 2 union select 3")
con.rollback()
try:
cur.fetchall()
self.fail("InterfaceError should have been raised")
except sqlite.InterfaceError, e:
pass
except:
self.fail("InterfaceError should have been raised")
class SpecialCommandTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.cur = self.con.cursor()
def CheckVacuum(self):
self.cur.execute("create table test(i)")
self.cur.execute("insert into test(i) values (5)")
self.cur.execute("vacuum")
def CheckDropTable(self):
self.cur.execute("create table test(i)")
self.cur.execute("insert into test(i) values (5)")
self.cur.execute("drop table test")
def CheckPragma(self):
self.cur.execute("create table test(i)")
self.cur.execute("insert into test(i) values (5)")
self.cur.execute("pragma count_changes=1")
def tearDown(self):
self.cur.close()
self.con.close()
def suite():
default_suite = unittest.makeSuite(TransactionTests, "Check")
special_command_suite = unittest.makeSuite(SpecialCommandTests, "Check")
return unittest.TestSuite((default_suite, special_command_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| gpl-2.0 |
FrankWang33/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
yinquan529/platform-external-chromium_org | tools/PRESUBMIT.py | 43 | 1548 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for bisect/perf trybot.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
import imp
import os
def _ExamineConfigFiles(input_api):
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith('run-bisect-perf-regression.cfg') and
not f.LocalPath().endswith('run-perf-test.cfg')):
continue
try:
cfg_file = imp.load_source('config', os.path.basename(f.LocalPath()))
for k, v in cfg_file.config.iteritems():
if v:
return f.LocalPath()
except (IOError, AttributeError, TypeError):
return f.LocalPath()
return None
def _CheckNoChangesToBisectConfigFile(input_api, output_api):
results = _ExamineConfigFiles(input_api)
if results:
return [output_api.PresubmitError(
'The bisection config file should only contain a config dict with '
'empty fields. Changes to this file should never be submitted.',
items=[results])]
return []
def CommonChecks(input_api, output_api):
results = []
results.extend(_CheckNoChangesToBisectConfigFile(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
| bsd-3-clause |
thesuperzapper/tensorflow | tensorflow/python/kernel_tests/sparse_serialization_ops_test.py | 99 | 8330 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SerializeSparse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SerializeSparseTest(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
if dtype is None:
dtype = dtypes.int32
return sparse_tensor_lib.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
[2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def testSerializeDeserializeMany(self):
with self.test_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = sparse_ops.serialize_sparse(sp_input0)
serialized1 = sparse_ops.serialize_sparse(sp_input1)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = sparse_ops.deserialize_many_sparse(
serialized_concat, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0])
self.assertAllEqual(combined_values[:6], sp_input0[1])
self.assertAllEqual(combined_values[6:], sp_input1[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
def testFeedSerializeDeserializeMany(self):
with self.test_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = sparse_ops.serialize_sparse(sp_input0)
serialized1 = sparse_ops.serialize_sparse(sp_input1)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = sparse_ops.deserialize_many_sparse(
serialized_concat, dtype=dtypes.int32)
combined_indices, combined_values, combined_shape = sess.run(
sp_deserialized, {sp_input0: input0_val,
sp_input1: input1_val})
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], input1_val[0])
self.assertAllEqual(combined_values[:6], input0_val[1])
self.assertAllEqual(combined_values[6:], input1_val[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
def testSerializeManyDeserializeManyRoundTrip(self):
with self.test_session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
serialized = sparse_ops.serialize_many_sparse(sparse_tensor)
deserialized = sparse_ops.deserialize_many_sparse(
serialized, dtype=dtypes.string)
serialized_value, deserialized_value = sess.run(
[serialized, deserialized],
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertEqual(serialized_value.shape, (4, 3))
self.assertAllEqual(deserialized_value.indices, indices_value)
self.assertAllEqual(deserialized_value.values, values_value)
self.assertAllEqual(deserialized_value.dense_shape, shape_value)
def testDeserializeFailsWrongType(self):
with self.test_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
serialized0 = sparse_ops.serialize_sparse(sp_input0)
serialized1 = sparse_ops.serialize_sparse(sp_input1)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = sparse_ops.deserialize_many_sparse(
serialized_concat, dtype=dtypes.int64)
with self.assertRaisesOpError(
r"Requested SparseTensor of type int64 but "
r"SparseTensor\[0\].values.dtype\(\) == int32"):
sess.run(sp_deserialized,
{sp_input0: input0_val,
sp_input1: input1_val})
def testDeserializeFailsInconsistentRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
sp_input1 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_1x1x1()
serialized0 = sparse_ops.serialize_sparse(sp_input0)
serialized1 = sparse_ops.serialize_sparse(sp_input1)
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = sparse_ops.deserialize_many_sparse(
serialized_concat, dtype=dtypes.int32)
with self.assertRaisesOpError(
r"Inconsistent rank across SparseTensors: rank prior to "
r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"):
sess.run(sp_deserialized,
{sp_input0: input0_val,
sp_input1: input1_val})
def testDeserializeFailsInvalidProto(self):
with self.test_session(use_gpu=False) as sess:
sp_input0 = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
serialized0 = sparse_ops.serialize_sparse(sp_input0)
serialized1 = ["a", "b", "c"]
serialized_concat = array_ops.stack([serialized0, serialized1])
sp_deserialized = sparse_ops.deserialize_many_sparse(
serialized_concat, dtype=dtypes.int32)
with self.assertRaisesOpError(
r"Could not parse serialized_sparse\[1, 0\]"):
sess.run(sp_deserialized, {sp_input0: input0_val})
if __name__ == "__main__":
test.main()
| apache-2.0 |
telefonicaid/fiware-cloto | fiware_cloto/environments/log.py | 4 | 1219 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright 2014 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
#
__author__ = 'gjp'
import logging
from fiware_cloto.cloto_settings.settings import LOGGING_PATH
logger = logging.getLogger('RuleEngine')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(LOGGING_PATH + '/RuleEngine.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s policymanager.cloto [-] %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
| apache-2.0 |
marado/youtube-dl | youtube_dl/extractor/npo.py | 1 | 12656 | from __future__ import unicode_literals
from .subtitles import SubtitlesInfoExtractor
from .common import InfoExtractor
from ..utils import (
fix_xml_ampersands,
parse_duration,
qualities,
strip_jsonp,
unified_strdate,
url_basename,
)
class NPOBaseIE(SubtitlesInfoExtractor):
def _get_token(self, video_id):
token_page = self._download_webpage(
'http://ida.omroep.nl/npoplayer/i.js',
video_id, note='Downloading token')
return self._search_regex(
r'npoplayer\.token = "(.+?)"', token_page, 'token')
class NPOIE(NPOBaseIE):
IE_NAME = 'npo.nl'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/(?!live|radio)[^/]+/[^/]+/(?P<id>[^/?]+)'
_TESTS = [
{
'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
'md5': '4b3f9c429157ec4775f2c9cb7b911016',
'info_dict': {
'id': 'VPWON_1220719',
'ext': 'm4v',
'title': 'Nieuwsuur',
'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
'upload_date': '20140622',
},
},
{
'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
'info_dict': {
'id': 'VARA_101191800',
'ext': 'm4v',
'title': 'De Mega Mike & Mega Thomas show',
'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
'upload_date': '20090227',
'duration': 2400,
},
},
{
'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht',
'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
'upload_date': '20130225',
'duration': 3000,
},
},
{
'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
'info_dict': {
'id': 'WO_VPRO_043706',
'ext': 'wmv',
'title': 'De nieuwe mens - Deel 1',
'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
'duration': 4680,
},
'params': {
# mplayer mms download
'skip_download': True,
}
},
# non asf in streams
{
'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
'md5': 'b3da13de374cbe2d5332a7e910bef97f',
'info_dict': {
'id': 'WO_NOS_762771',
'ext': 'mp4',
'title': 'Hoe gaat Europa verder na Parijs?',
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._get_info(video_id)
def _get_info(self, video_id):
metadata = self._download_json(
'http://e.omroep.nl/metadata/aflevering/%s' % video_id,
video_id,
# We have to remove the javascript callback
transform_source=strip_jsonp,
)
token = self._get_token(video_id)
formats = []
pubopties = metadata.get('pubopties')
if pubopties:
quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
for format_id in pubopties:
format_info = self._download_json(
'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s'
% (video_id, format_id, token),
video_id, 'Downloading %s JSON' % format_id)
if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
continue
streams = format_info.get('streams')
if streams:
video_info = self._download_json(
streams[0] + '&type=json',
video_id, 'Downloading %s stream JSON' % format_id)
else:
video_info = format_info
video_url = video_info.get('url')
if not video_url:
continue
if format_id == 'adaptive':
formats.extend(self._extract_m3u8_formats(video_url, video_id))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
})
streams = metadata.get('streams')
if streams:
for i, stream in enumerate(streams):
stream_url = stream.get('url')
if not stream_url:
continue
if '.asf' not in stream_url:
formats.append({
'url': stream_url,
'quality': stream.get('kwaliteit'),
})
continue
asx = self._download_xml(
stream_url, video_id,
'Downloading stream %d ASX playlist' % i,
transform_source=fix_xml_ampersands)
ref = asx.find('./ENTRY/Ref')
if ref is None:
continue
video_url = ref.get('href')
if not video_url:
continue
formats.append({
'url': video_url,
'ext': stream.get('formaat', 'asf'),
'quality': stream.get('kwaliteit'),
})
self._sort_formats(formats)
subtitles = {}
if metadata.get('tt888') == 'ja':
subtitles['nl'] = 'http://e.omroep.nl/tt888/%s' % video_id
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
subtitles = self.extract_subtitles(video_id, subtitles)
return {
'id': video_id,
'title': metadata['titel'],
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'upload_date': unified_strdate(metadata.get('gidsdatum')),
'duration': parse_duration(metadata.get('tijdsduur')),
'formats': formats,
'subtitles': subtitles,
}
class NPOLiveIE(NPOBaseIE):
IE_NAME = 'npo.nl:live'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/live/(?P<id>.+)'
_TEST = {
'url': 'http://www.npo.nl/live/npo-1',
'info_dict': {
'id': 'LI_NEDERLAND1_136692',
'display_id': 'npo-1',
'ext': 'mp4',
'title': 're:^Nederland 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'Livestream',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
live_id = self._search_regex(
r'data-prid="([^"]+)"', webpage, 'live id')
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % live_id,
display_id, transform_source=strip_jsonp)
token = self._get_token(display_id)
formats = []
streams = metadata.get('streams')
if streams:
for stream in streams:
stream_type = stream.get('type').lower()
if stream_type == 'ss':
continue
stream_info = self._download_json(
'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp'
% (stream.get('url'), token),
display_id, 'Downloading %s JSON' % stream_type)
if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0):
continue
stream_url = self._download_json(
stream_info['stream'], display_id,
'Downloading %s URL' % stream_type,
transform_source=strip_jsonp)
if stream_type == 'hds':
f4m_formats = self._extract_f4m_formats(stream_url, display_id)
# f4m downloader downloads only piece of live stream
for f4m_format in f4m_formats:
f4m_format['preference'] = -1
formats.extend(f4m_formats)
elif stream_type == 'hls':
formats.extend(self._extract_m3u8_formats(stream_url, display_id, 'mp4'))
else:
formats.append({
'url': stream_url,
})
self._sort_formats(formats)
return {
'id': live_id,
'display_id': display_id,
'title': self._live_title(metadata['titel']),
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'formats': formats,
'is_live': True,
}
class NPORadioIE(InfoExtractor):
IE_NAME = 'npo.nl:radio'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)/?$'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-1',
'info_dict': {
'id': 'radio-1',
'ext': 'mp3',
'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
@staticmethod
def _html_get_attribute_regex(attribute):
return r'{0}\s*=\s*\'([^\']+)\''.format(attribute)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
self._html_get_attribute_regex('data-channel'), webpage, 'title')
stream = self._parse_json(
self._html_search_regex(self._html_get_attribute_regex('data-streams'), webpage, 'data-streams'),
video_id)
codec = stream.get('codec')
return {
'id': video_id,
'url': stream['url'],
'title': self._live_title(title),
'acodec': codec,
'ext': codec,
'is_live': True,
}
class NPORadioFragmentIE(InfoExtractor):
IE_NAME = 'npo.nl:radio:fragment'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/[^/]+/fragment/(?P<id>\d+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-5/fragment/174356',
'md5': 'dd8cc470dad764d0fdc70a9a1e2d18c2',
'info_dict': {
'id': '174356',
'ext': 'mp3',
'title': 'Jubileumconcert Willeke Alberti',
},
}
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
title = self._html_search_regex(
r'href="/radio/[^/]+/fragment/%s" title="([^"]+)"' % audio_id,
webpage, 'title')
audio_url = self._search_regex(
r"data-streams='([^']+)'", webpage, 'audio url')
return {
'id': audio_id,
'url': audio_url,
'title': title,
}
class TegenlichtVproIE(NPOIE):
IE_NAME = 'tegenlicht.vpro.nl'
_VALID_URL = r'https?://tegenlicht\.vpro\.nl/afleveringen/.*?'
_TESTS = [
{
'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht',
'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
'upload_date': '20130225',
},
},
]
def _real_extract(self, url):
name = url_basename(url)
webpage = self._download_webpage(url, name)
urn = self._html_search_meta('mediaurn', webpage)
info_page = self._download_json(
'http://rs.vpro.nl/v2/api/media/%s.json' % urn, name)
return self._get_info(info_page['mid'])
| unlicense |
jjmleiro/hue | desktop/core/ext-py/Pygments-1.3.1/pygments/lexers/functional.py | 55 | 28768 | # -*- coding: utf-8 -*-
"""
pygments.lexers.functional
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for functional languages.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, \
String, Number, Punctuation, Literal, Generic
__all__ = ['SchemeLexer', 'CommonLispLexer', 'HaskellLexer', 'LiterateHaskellLexer',
'OcamlLexer', 'ErlangLexer', 'ErlangShellLexer']
class SchemeLexer(RegexLexer):
"""
A Scheme lexer, parsing a stream and outputting the tokens
needed to highlight scheme code.
This lexer could be most probably easily subclassed to parse
other LISP-Dialects like Common Lisp, Emacs Lisp or AutoLisp.
This parser is checked with pastes from the LISP pastebin
at http://paste.lisp.org/ to cover as much syntax as possible.
It supports the full Scheme syntax as defined in R5RS.
*New in Pygments 0.6.*
"""
name = 'Scheme'
aliases = ['scheme', 'scm']
filenames = ['*.scm']
mimetypes = ['text/x-scheme', 'application/x-scheme']
# list of known keywords and builtins taken form vim 6.4 scheme.vim
# syntax file.
keywords = [
'lambda', 'define', 'if', 'else', 'cond', 'and', 'or', 'case', 'let',
'let*', 'letrec', 'begin', 'do', 'delay', 'set!', '=>', 'quote',
'quasiquote', 'unquote', 'unquote-splicing', 'define-syntax',
'let-syntax', 'letrec-syntax', 'syntax-rules'
]
builtins = [
'*', '+', '-', '/', '<', '<=', '=', '>', '>=', 'abs', 'acos', 'angle',
'append', 'apply', 'asin', 'assoc', 'assq', 'assv', 'atan',
'boolean?', 'caaaar', 'caaadr', 'caaar', 'caadar', 'caaddr', 'caadr',
'caar', 'cadaar', 'cadadr', 'cadar', 'caddar', 'cadddr', 'caddr',
'cadr', 'call-with-current-continuation', 'call-with-input-file',
'call-with-output-file', 'call-with-values', 'call/cc', 'car',
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr',
'cdr', 'ceiling', 'char->integer', 'char-alphabetic?', 'char-ci<=?',
'char-ci<?', 'char-ci=?', 'char-ci>=?', 'char-ci>?', 'char-downcase',
'char-lower-case?', 'char-numeric?', 'char-ready?', 'char-upcase',
'char-upper-case?', 'char-whitespace?', 'char<=?', 'char<?', 'char=?',
'char>=?', 'char>?', 'char?', 'close-input-port', 'close-output-port',
'complex?', 'cons', 'cos', 'current-input-port', 'current-output-port',
'denominator', 'display', 'dynamic-wind', 'eof-object?', 'eq?',
'equal?', 'eqv?', 'eval', 'even?', 'exact->inexact', 'exact?', 'exp',
'expt', 'floor', 'for-each', 'force', 'gcd', 'imag-part',
'inexact->exact', 'inexact?', 'input-port?', 'integer->char',
'integer?', 'interaction-environment', 'lcm', 'length', 'list',
'list->string', 'list->vector', 'list-ref', 'list-tail', 'list?',
'load', 'log', 'magnitude', 'make-polar', 'make-rectangular',
'make-string', 'make-vector', 'map', 'max', 'member', 'memq', 'memv',
'min', 'modulo', 'negative?', 'newline', 'not', 'null-environment',
'null?', 'number->string', 'number?', 'numerator', 'odd?',
'open-input-file', 'open-output-file', 'output-port?', 'pair?',
'peek-char', 'port?', 'positive?', 'procedure?', 'quotient',
'rational?', 'rationalize', 'read', 'read-char', 'real-part', 'real?',
'remainder', 'reverse', 'round', 'scheme-report-environment',
'set-car!', 'set-cdr!', 'sin', 'sqrt', 'string', 'string->list',
'string->number', 'string->symbol', 'string-append', 'string-ci<=?',
'string-ci<?', 'string-ci=?', 'string-ci>=?', 'string-ci>?',
'string-copy', 'string-fill!', 'string-length', 'string-ref',
'string-set!', 'string<=?', 'string<?', 'string=?', 'string>=?',
'string>?', 'string?', 'substring', 'symbol->string', 'symbol?',
'tan', 'transcript-off', 'transcript-on', 'truncate', 'values',
'vector', 'vector->list', 'vector-fill!', 'vector-length',
'vector-ref', 'vector-set!', 'vector?', 'with-input-from-file',
'with-output-to-file', 'write', 'write-char', 'zero?'
]
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~|-]+'
tokens = {
'root' : [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'\s+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
# support for uncommon kinds of numbers -
# have to figure out what the characters mean
#(r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"#\\([()/'\".'_!§$%& ?=+-]{1}|[a-zA-Z0-9]+)", String.Char),
# constants
(r'(#t|#f)', Name.Constant),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# highlight the keywords
('(%s)' % '|'.join([
re.escape(entry) + ' ' for entry in keywords]),
Keyword
),
# first variable in a quoted string like
# '(this is syntactic sugar)
(r"(?<='\()" + valid_name, Name.Variable),
(r"(?<=#\()" + valid_name, Name.Variable),
# highlight the builtins
("(?<=\()(%s)" % '|'.join([
re.escape(entry) + ' ' for entry in builtins]),
Name.Builtin
),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
}
class CommonLispLexer(RegexLexer):
"""
A Common Lisp lexer.
*New in Pygments 0.9.*
"""
name = 'Common Lisp'
aliases = ['common-lisp', 'cl']
filenames = ['*.cl', '*.lisp', '*.el'] # use for Elisp too
mimetypes = ['text/x-common-lisp']
flags = re.IGNORECASE | re.MULTILINE
### couple of useful regexes
# characters that are not macro-characters and can be used to begin a symbol
nonmacro = r'\\.|[a-zA-Z0-9!$%&*+-/<=>?@\[\]^_{}~]'
constituent = nonmacro + '|[#.:]'
terminated = r'(?=[ "()\'\n,;`])' # whitespace or terminating macro characters
### symbol token, reverse-engineered from hyperspec
# Take a deep breath...
symbol = r'(\|[^|]+\||(?:%s)(?:%s)*)' % (nonmacro, constituent)
def __init__(self, **options):
from pygments.lexers._clbuiltins import BUILTIN_FUNCTIONS, \
SPECIAL_FORMS, MACROS, LAMBDA_LIST_KEYWORDS, DECLARATIONS, \
BUILTIN_TYPES, BUILTIN_CLASSES
self.builtin_function = BUILTIN_FUNCTIONS
self.special_forms = SPECIAL_FORMS
self.macros = MACROS
self.lambda_list_keywords = LAMBDA_LIST_KEYWORDS
self.declarations = DECLARATIONS
self.builtin_types = BUILTIN_TYPES
self.builtin_classes = BUILTIN_CLASSES
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Variable:
if value in self.builtin_function:
yield index, Name.Builtin, value
continue
if value in self.special_forms:
yield index, Keyword, value
continue
if value in self.macros:
yield index, Name.Builtin, value
continue
if value in self.lambda_list_keywords:
yield index, Keyword, value
continue
if value in self.declarations:
yield index, Keyword, value
continue
if value in self.builtin_types:
yield index, Keyword.Type, value
continue
if value in self.builtin_classes:
yield index, Name.Class, value
continue
yield index, token, value
tokens = {
'root' : [
('', Text, 'body'),
],
'multiline-comment' : [
(r'#\|', Comment.Multiline, '#push'), # (cf. Hyperspec 2.4.8.19)
(r'\|#', Comment.Multiline, '#pop'),
(r'[^|#]+', Comment.Multiline),
(r'[|#]', Comment.Multiline),
],
'commented-form' : [
(r'\(', Comment.Preproc, '#push'),
(r'\)', Comment.Preproc, '#pop'),
(r'[^()]+', Comment.Preproc),
],
'body' : [
# whitespace
(r'\s+', Text),
# single-line comment
(r';.*$', Comment.Single),
# multi-line comment
(r'#\|', Comment.Multiline, 'multiline-comment'),
# encoding comment (?)
(r'#\d*Y.*$', Comment.Special),
# strings and characters
(r'"(\\.|[^"\\])*"', String),
# quoting
(r":" + symbol, String.Symbol),
(r"'" + symbol, String.Symbol),
(r"'", Operator),
(r"`", Operator),
# decimal numbers
(r'[-+]?\d+\.?' + terminated, Number.Integer),
(r'[-+]?\d+/\d+' + terminated, Number),
(r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' \
+ terminated, Number.Float),
# sharpsign strings and characters
(r"#\\." + terminated, String.Char),
(r"#\\" + symbol, String.Char),
# vector
(r'#\(', Operator, 'body'),
# bitstring
(r'#\d*\*[01]*', Literal.Other),
# uninterned symbol
(r'#:' + symbol, String.Symbol),
# read-time and load-time evaluation
(r'#[.,]', Operator),
# function shorthand
(r'#\'', Name.Function),
# binary rational
(r'#[bB][+-]?[01]+(/[01]+)?', Number),
# octal rational
(r'#[oO][+-]?[0-7]+(/[0-7]+)?', Number.Oct),
# hex rational
(r'#[xX][+-]?[0-9a-fA-F]+(/[0-9a-fA-F]+)?', Number.Hex),
# radix rational
(r'#\d+[rR][+-]?[0-9a-zA-Z]+(/[0-9a-zA-Z]+)?', Number),
# complex
(r'(#[cC])(\()', bygroups(Number, Punctuation), 'body'),
# array
(r'(#\d+[aA])(\()', bygroups(Literal.Other, Punctuation), 'body'),
# structure
(r'(#[sS])(\()', bygroups(Literal.Other, Punctuation), 'body'),
# path
(r'#[pP]?"(\\.|[^"])*"', Literal.Other),
# reference
(r'#\d+=', Operator),
(r'#\d+#', Operator),
# read-time comment
(r'#+nil' + terminated + '\s*\(', Comment.Preproc, 'commented-form'),
# read-time conditional
(r'#[+-]', Operator),
# special operators that should have been parsed already
(r'(,@|,|\.)', Operator),
# special constants
(r'(t|nil)' + terminated, Name.Constant),
# functions and variables
(r'\*' + symbol + '\*', Name.Variable.Global),
(symbol, Name.Variable),
# parentheses
(r'\(', Punctuation, 'body'),
(r'\)', Punctuation, '#pop'),
],
}
class HaskellLexer(RegexLexer):
"""
A Haskell lexer based on the lexemes defined in the Haskell 98 Report.
*New in Pygments 0.8.*
"""
name = 'Haskell'
aliases = ['haskell', 'hs']
filenames = ['*.hs']
mimetypes = ['text/x-haskell']
reserved = ['case','class','data','default','deriving','do','else',
'if','in','infix[lr]?','instance',
'let','newtype','of','then','type','where','_']
ascii = ['NUL','SOH','[SE]TX','EOT','ENQ','ACK',
'BEL','BS','HT','LF','VT','FF','CR','S[OI]','DLE',
'DC[1-4]','NAK','SYN','ETB','CAN',
'EM','SUB','ESC','[FGRU]S','SP','DEL']
tokens = {
'root': [
# Whitespace:
(r'\s+', Text),
#(r'--\s*|.*$', Comment.Doc),
(r'--(?![!#$%&*+./<=>?@\^|_~]).*?$', Comment.Single),
(r'{-', Comment.Multiline, 'comment'),
# Lexemes:
# Identifiers
(r'\bimport\b', Keyword.Reserved, 'import'),
(r'\bmodule\b', Keyword.Reserved, 'module'),
(r'\berror\b', Name.Exception),
(r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'^[_a-z][\w\']*', Name.Function),
(r'[_a-z][\w\']*', Name),
(r'[A-Z][\w\']*', Keyword.Type),
# Operators
(r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator
(r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
(r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Character/String Literals
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
# Special
(r'\[\]', Keyword.Type),
(r'\(\)', Name.Builtin),
(r'[][(),;`{}]', Punctuation),
],
'import': [
# Import statements
(r'\s+', Text),
# after "funclist" state
(r'\)', Punctuation, '#pop'),
(r'qualified\b', Keyword),
# import X as Y
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(as)(\s+)([A-Z][a-zA-Z0-9_.]*)',
bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'),
# import X hiding (functions)
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(hiding)(\s+)(\()',
bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'),
# import X (functions)
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
# import X
(r'[a-zA-Z0-9_.]+', Name.Namespace, '#pop'),
],
'module': [
(r'\s+', Text),
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
(r'[A-Z][a-zA-Z0-9_.]*', Name.Namespace, '#pop'),
],
'funclist': [
(r'\s+', Text),
(r'[A-Z][a-zA-Z0-9_]*', Keyword.Type),
(r'[_a-z][\w\']+', Name.Function),
(r'--.*$', Comment.Single),
(r'{-', Comment.Multiline, 'comment'),
(r',', Punctuation),
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
# (HACK, but it makes sense to push two instances, believe me)
(r'\(', Punctuation, ('funclist', 'funclist')),
(r'\)', Punctuation, '#pop:2'),
],
'comment': [
# Multiline Comments
(r'[^-{}]+', Comment.Multiline),
(r'{-', Comment.Multiline, '#push'),
(r'-}', Comment.Multiline, '#pop'),
(r'[-{}]', Comment.Multiline),
],
'character': [
# Allows multi-chars, incorrectly.
(r"[^\\']", String.Char),
(r"\\", String.Escape, 'escape'),
("'", String.Char, '#pop'),
],
'string': [
(r'[^\\"]+', String),
(r"\\", String.Escape, 'escape'),
('"', String, '#pop'),
],
'escape': [
(r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
(r'\^[][A-Z@\^_]', String.Escape, '#pop'),
('|'.join(ascii), String.Escape, '#pop'),
(r'o[0-7]+', String.Escape, '#pop'),
(r'x[\da-fA-F]+', String.Escape, '#pop'),
(r'\d+', String.Escape, '#pop'),
(r'\s+\\', String.Escape, '#pop'),
],
}
line_re = re.compile('.*?\n')
bird_re = re.compile(r'(>[ \t]*)(.*\n)')
class LiterateHaskellLexer(Lexer):
"""
For Literate Haskell (Bird-style or LaTeX) source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
*New in Pygments 0.9.*
"""
name = 'Literate Haskell'
aliases = ['lhs', 'literate-haskell']
filenames = ['*.lhs']
mimetypes = ['text/x-literate-haskell']
def get_tokens_unprocessed(self, text):
hslexer = HaskellLexer(**self.options)
style = self.options.get('litstyle')
if style is None:
style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird'
code = ''
insertions = []
if style == 'bird':
# bird-style
for match in line_re.finditer(text):
line = match.group()
m = bird_re.match(line)
if m:
insertions.append((len(code),
[(0, Comment.Special, m.group(1))]))
code += m.group(2)
else:
insertions.append((len(code), [(0, Text, line)]))
else:
# latex-style
from pygments.lexers.text import TexLexer
lxlexer = TexLexer(**self.options)
codelines = 0
latex = ''
for match in line_re.finditer(text):
line = match.group()
if codelines:
if line.lstrip().startswith('\\end{code}'):
codelines = 0
latex += line
else:
code += line
elif line.lstrip().startswith('\\begin{code}'):
codelines = 1
latex += line
insertions.append((len(code),
list(lxlexer.get_tokens_unprocessed(latex))))
latex = ''
else:
latex += line
insertions.append((len(code),
list(lxlexer.get_tokens_unprocessed(latex))))
for item in do_insertions(insertions, hslexer.get_tokens_unprocessed(code)):
yield item
class OcamlLexer(RegexLexer):
"""
For the OCaml language.
*New in Pygments 0.7.*
"""
name = 'OCaml'
aliases = ['ocaml']
filenames = ['*.ml', '*.mli', '*.mll', '*.mly']
mimetypes = ['text/x-ocaml']
keywords = [
'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
'downto', 'else', 'end', 'exception', 'external', 'false',
'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
'type', 'val', 'virtual', 'when', 'while', 'with'
]
keyopts = [
'!=','#','&','&&','\(','\)','\*','\+',',','-',
'-\.','->','\.','\.\.',':','::',':=',':>',';',';;','<',
'<-','=','>','>]','>}','\?','\?\?','\[','\[<','\[>','\[\|',
']','_','`','{','{<','\|','\|]','}','~'
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = ['unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array']
tokens = {
'escape-sequence': [
(r'\\[\"\'ntbr]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b([A-Z][A-Za-z0-9_\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][A-Za-z0-9_\']*)', Name.Class),
(r'\(\*', Comment, 'comment'),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Binary),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^\\"]+', String.Double),
include('escape-sequence'),
(r'\\\n', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][A-Za-z0-9_\']*', Name.Class, '#pop'),
(r'[a-z][a-z0-9_\']*', Name, '#pop'),
],
}
class ErlangLexer(RegexLexer):
"""
For the Erlang functional programming language.
Blame Jeremy Thurgood (http://jerith.za.net/).
*New in Pygments 0.9.*
"""
name = 'Erlang'
aliases = ['erlang']
filenames = ['*.erl', '*.hrl']
mimetypes = ['text/x-erlang']
keywords = [
'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if',
'let', 'of', 'query', 'receive', 'try', 'when',
]
builtins = [ # See erlang(3) man page
'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list',
'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions',
'byte_size', 'cancel_timer', 'check_process_code', 'delete_module',
'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit',
'float', 'float_to_list', 'fun_info', 'fun_to_list',
'function_exported', 'garbage_collect', 'get', 'get_keys',
'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary',
'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean',
'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list',
'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record',
'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom',
'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom',
'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple',
'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5',
'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor',
'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2',
'pid_to_list', 'port_close', 'port_command', 'port_connect',
'port_control', 'port_call', 'port_info', 'port_to_list',
'process_display', 'process_flag', 'process_info', 'purge_module',
'put', 'read_timer', 'ref_to_list', 'register', 'resume_process',
'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie',
'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor',
'spawn_opt', 'split_binary', 'start_timer', 'statistics',
'suspend_process', 'system_flag', 'system_info', 'system_monitor',
'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered',
'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list',
'universaltime_to_localtime', 'unlink', 'unregister', 'whereis'
]
operators = r'(\+|-|\*|/|<|>|=|==|/=|=:=|=/=|=<|>=|\+\+|--|<-|!)'
word_operators = [
'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor',
'div', 'not', 'or', 'orelse', 'rem', 'xor'
]
atom_re = r"(?:[a-z][a-zA-Z0-9_]*|'[^\n']*[^\\]')"
variable_re = r'(?:[A-Z_][a-zA-Z0-9_]*)'
escape_re = r'(?:\\(?:[bdefnrstv\'"\\/]|[0-7][0-7]?[0-7]?|\^[a-zA-Z]))'
macro_re = r'(?:'+variable_re+r'|'+atom_re+r')'
base_re = r'(?:[2-9]|[12][0-9]|3[0-6])'
tokens = {
'root': [
(r'\s+', Text),
(r'%.*\n', Comment),
('(' + '|'.join(keywords) + r')\b', Keyword),
('(' + '|'.join(builtins) + r')\b', Name.Builtin),
('(' + '|'.join(word_operators) + r')\b', Operator.Word),
(r'^-', Punctuation, 'directive'),
(operators, Operator),
(r'"', String, 'string'),
(r'<<', Name.Label),
(r'>>', Name.Label),
(r'('+atom_re+')(:)', bygroups(Name.Namespace, Punctuation)),
(r'^('+atom_re+r')(\s*)(\()', bygroups(Name.Function, Text, Punctuation)),
(r'[+-]?'+base_re+r'#[0-9a-zA-Z]+', Number.Integer),
(r'[+-]?\d+', Number.Integer),
(r'[+-]?\d+.\d+', Number.Float),
(r'[][:_@\".{}()|;,]', Punctuation),
(variable_re, Name.Variable),
(atom_re, Name),
(r'\?'+macro_re, Name.Constant),
(r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char),
(r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label),
],
'string': [
(escape_re, String.Escape),
(r'"', String, '#pop'),
(r'~[0-9.*]*[~#+bBcdefginpPswWxX]', String.Interpol),
(r'[^"\\~]+', String),
(r'~', String),
],
'directive': [
(r'(define)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'),
(r'(record)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'),
(atom_re, Name.Entity, '#pop'),
],
}
class ErlangShellLexer(Lexer):
"""
Shell sessions in erl (for Erlang code).
*New in Pygments 1.1.*
"""
name = 'Erlang erl session'
aliases = ['erl']
filenames = ['*.erl-sh']
mimetypes = ['text/x-erl-shellsession']
_prompt_re = re.compile(r'\d+>(?=\s|\Z)')
def get_tokens_unprocessed(self, text):
erlexer = ErlangLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('*'):
yield match.start(), Generic.Traceback, line
else:
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
| apache-2.0 |
kustodian/ansible | lib/ansible/module_utils/redhat.py | 141 | 10351 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), James Laska
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import shutil
import tempfile
import types
from ansible.module_utils.six.moves import configparser
class RegistrationBase(object):
def __init__(self, module, username=None, password=None):
self.module = module
self.username = username
self.password = password
def configure(self):
raise NotImplementedError("Must be implemented by a sub-class")
def enable(self):
# Remove any existing redhat.repo
redhat_repo = '/etc/yum.repos.d/redhat.repo'
if os.path.isfile(redhat_repo):
os.unlink(redhat_repo)
def register(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unregister(self):
raise NotImplementedError("Must be implemented by a sub-class")
def unsubscribe(self):
raise NotImplementedError("Must be implemented by a sub-class")
def update_plugin_conf(self, plugin, enabled=True):
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
if os.path.isfile(plugin_conf):
tmpfd, tmpfile = tempfile.mkstemp()
shutil.copy2(plugin_conf, tmpfile)
cfg = configparser.ConfigParser()
cfg.read([tmpfile])
if enabled:
cfg.set('main', 'enabled', 1)
else:
cfg.set('main', 'enabled', 0)
fd = open(tmpfile, 'w+')
cfg.write(fd)
fd.close()
self.module.atomic_move(tmpfile, plugin_conf)
def subscribe(self, **kwargs):
raise NotImplementedError("Must be implemented by a sub-class")
class Rhsm(RegistrationBase):
def __init__(self, module, username=None, password=None):
RegistrationBase.__init__(self, module, username, password)
self.config = self._read_config()
self.module = module
def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
'''
Load RHSM configuration from /etc/rhsm/rhsm.conf.
Returns:
* ConfigParser object
'''
# Read RHSM defaults ...
cp = configparser.ConfigParser()
cp.read(rhsm_conf)
# Add support for specifying a default value w/o having to standup some configuration
# Yeah, I know this should be subclassed ... but, oh well
def get_option_default(self, key, default=''):
sect, opt = key.split('.', 1)
if self.has_section(sect) and self.has_option(sect, opt):
return self.get(sect, opt)
else:
return default
cp.get_option = types.MethodType(get_option_default, cp, configparser.ConfigParser)
return cp
def enable(self):
'''
Enable the system to receive updates from subscription-manager.
This involves updating affected yum plugins and removing any
conflicting yum repositories.
'''
RegistrationBase.enable(self)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', True)
def configure(self, **kwargs):
'''
Configure the system as directed for registration with RHN
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'config']
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
# non-configuration parameters and replace '_' with '.'. For example,
# 'server_hostname' becomes '--system.hostname'.
for k, v in kwargs.items():
if re.search(r'^(system|rhsm)_', k):
args.append('--%s=%s' % (k.replace('_', '.'), v))
self.module.run_command(args, check_rc=True)
@property
def is_registered(self):
'''
Determine whether the current system
Returns:
* Boolean - whether the current system is currently registered to
RHN.
'''
args = ['subscription-manager', 'identity']
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
if rc == 0:
return True
else:
return False
def register(self, username, password, autosubscribe, activationkey):
'''
Register the current system to the provided RHN server
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'register']
# Generate command arguments
if activationkey:
args.append('--activationkey "%s"' % activationkey)
else:
if autosubscribe:
args.append('--autosubscribe')
if username:
args.extend(['--username', username])
if password:
args.extend(['--password', password])
# Do the needful...
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unsubscribe(self):
'''
Unsubscribe a system from all subscribed channels
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'unsubscribe', '--all']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unregister(self):
'''
Unregister a currently registered system
Raises:
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'unregister']
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
self.update_plugin_conf('rhnplugin', False)
self.update_plugin_conf('subscription-manager', False)
def subscribe(self, regexp):
'''
Subscribe current system to available pools matching the specified
regular expression
Raises:
* Exception - if error occurs while running command
'''
# Available pools ready for subscription
available_pools = RhsmPools(self.module)
for pool in available_pools.filter(regexp):
pool.subscribe()
class RhsmPool(object):
'''
Convenience class for housing subscription information
'''
def __init__(self, module, **kwargs):
self.module = module
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return str(self.__getattribute__('_name'))
def subscribe(self):
args = "subscription-manager subscribe --pool %s" % self.PoolId
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
if rc == 0:
return True
else:
return False
class RhsmPools(object):
"""
This class is used for manipulating pools subscriptions with RHSM
"""
def __init__(self, module):
self.module = module
self.products = self._load_product_list()
def __iter__(self):
return self.products.__iter__()
def _load_product_list(self):
"""
Loads list of all available pools for system in data structure
"""
args = "subscription-manager list --available"
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
products = []
for line in stdout.split('\n'):
# Remove leading+trailing whitespace
line = line.strip()
# An empty line implies the end of an output group
if len(line) == 0:
continue
# If a colon ':' is found, parse
elif ':' in line:
(key, value) = line.split(':', 1)
key = key.strip().replace(" ", "") # To unify
value = value.strip()
if key in ['ProductName', 'SubscriptionName']:
# Remember the name for later processing
products.append(RhsmPool(self.module, _name=value, key=value))
elif products:
# Associate value with most recently recorded product
products[-1].__setattr__(key, value)
# FIXME - log some warning?
# else:
# warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
return products
def filter(self, regexp='^$'):
'''
Return a list of RhsmPools whose name matches the provided regular expression
'''
r = re.compile(regexp)
for product in self.products:
if r.search(product._name):
yield product
| gpl-3.0 |
CMLL/taiga-back | taiga/projects/history/migrations/0007_set_bloked_note_and_is_blocked_in_snapshots.py | 16 | 1624 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.core.exceptions import ObjectDoesNotExist
from taiga.projects.history.services import (make_key_from_model_object,
get_model_from_key,
get_pk_from_key)
def set_current_values_of_blocked_note_and_is_blocked_to_the_last_snapshot(apps, schema_editor):
HistoryEntry = apps.get_model("history", "HistoryEntry")
for history_entry in HistoryEntry.objects.filter(is_snapshot=True).order_by("created_at"):
model = get_model_from_key(history_entry.key)
pk = get_pk_from_key(history_entry.key)
try:
obj = model.objects.get(pk=pk)
save = False
if hasattr(obj, "is_blocked") and "is_blocked" not in history_entry.snapshot:
history_entry.snapshot["is_blocked"] = obj.is_blocked
save = True
if hasattr(obj, "blocked_note") and "blocked_note" not in history_entry.snapshot:
history_entry.snapshot["blocked_note"] = obj.blocked_note
save = True
if save:
history_entry.save()
except ObjectDoesNotExist as e:
pass
class Migration(migrations.Migration):
dependencies = [
('history', '0006_fix_json_field_not_null'),
('userstories', '0009_remove_userstory_is_archived'),
('tasks', '0005_auto_20150114_0954'),
('issues', '0004_auto_20150114_0954'),
]
operations = [
migrations.RunPython(set_current_values_of_blocked_note_and_is_blocked_to_the_last_snapshot),
]
| agpl-3.0 |
thfabian/serialbox2 | src/serialbox-python/serialbox/slice.py | 2 | 1163 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
##
## S E R I A L B O X
##
## This file is distributed under terms of BSD license.
## See LICENSE.txt for more information.
##
##===------------------------------------------------------------------------------------------===##
##
## This file contains the slicing functionality of the serializer.
##
##===------------------------------------------------------------------------------------------===##
class _Slice(object):
"""Specification of the slice indices which are used for partial loading of serialized data.
To avoid instantiation, use the global object serialbox.Slice.
>>> Slice[:, :, 5]
(slice(None, None, None), slice(None, None, None), 5)
See `here <https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html>`_.
"""
def __init__(self):
pass
def __getitem__(self, slice):
""" Define a slice.
:return: Tuple of slices
:rtype: tuble[slice]
"""
return slice
Slice = _Slice()
| bsd-2-clause |
fredericlepied/ansible | lib/ansible/modules/storage/zfs/zpool_facts.py | 9 | 6172 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam Števko <adam.stevko@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zpool_facts
short_description: Gather facts about ZFS pools.
description:
- Gather facts from ZFS pool properties.
version_added: "2.3"
author: Adam Števko (@xen0l)
options:
name:
description:
- ZFS pool name.
aliases: [ "pool", "zpool" ]
required: false
parsable:
description:
- Specifies if property values should be displayed in machine
friendly format.
type: bool
default: False
required: false
properties:
description:
- Specifies which dataset properties should be queried in comma-separated format.
For more information about dataset properties, check zpool(1M) man page.
aliases: [ "props" ]
default: all
required: false
'''
EXAMPLES = '''
# Gather facts about ZFS pool rpool
zpool_facts: pool=rpool
# Gather space usage about all imported ZFS pools
zpool_facts: properties='free,size'
debug: msg='ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.'
with_items: '{{ ansible_zfs_pools }}'
'''
RETURN = '''
name:
description: ZFS pool name
returned: always
type: string
sample: rpool
parsable:
description: if parsable output should be provided in machine friendly format.
returned: if 'parsable' is set to True
type: boolean
sample: True
zfs_pools:
description: ZFS pool facts
returned: always
type: string
sample:
{
"allocated": "3.46G",
"altroot": "-",
"autoexpand": "off",
"autoreplace": "off",
"bootfs": "rpool/ROOT/openindiana",
"cachefile": "-",
"capacity": "6%",
"comment": "-",
"dedupditto": "0",
"dedupratio": "1.00x",
"delegation": "on",
"expandsize": "-",
"failmode": "wait",
"feature@async_destroy": "enabled",
"feature@bookmarks": "enabled",
"feature@edonr": "enabled",
"feature@embedded_data": "active",
"feature@empty_bpobj": "active",
"feature@enabled_txg": "active",
"feature@extensible_dataset": "enabled",
"feature@filesystem_limits": "enabled",
"feature@hole_birth": "active",
"feature@large_blocks": "enabled",
"feature@lz4_compress": "active",
"feature@multi_vdev_crash_dump": "enabled",
"feature@sha512": "enabled",
"feature@skein": "enabled",
"feature@spacemap_histogram": "active",
"fragmentation": "3%",
"free": "46.3G",
"freeing": "0",
"guid": "15729052870819522408",
"health": "ONLINE",
"leaked": "0",
"listsnapshots": "off",
"name": "rpool",
"readonly": "off",
"size": "49.8G",
"version": "-"
}
'''
from collections import defaultdict
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
class ZPoolFacts(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.parsable = module.params['parsable']
self.properties = module.params['properties']
self._pools = defaultdict(dict)
self.facts = []
def pool_exists(self):
cmd = [self.module.get_bin_path('zpool')]
cmd.append('list')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def get_facts(self):
cmd = [self.module.get_bin_path('zpool')]
cmd.append('get')
cmd.append('-H')
if self.parsable:
cmd.append('-p')
cmd.append('-o')
cmd.append('name,property,value')
cmd.append(self.properties)
if self.name:
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
for line in out.splitlines():
pool, property, value = line.split('\t')
self._pools[pool].update({property: value})
for k, v in iteritems(self._pools):
v.update({'name': k})
self.facts.append(v)
return {'ansible_zfs_pools': self.facts}
else:
self.module.fail_json(msg='Error while trying to get facts about ZFS pool: %s' % self.name,
stderr=err,
rc=rc)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=False, aliases=['pool', 'zpool'], type='str'),
parsable=dict(required=False, default=False, type='bool'),
properties=dict(required=False, default='all', type='str'),
),
supports_check_mode=True
)
zpool_facts = ZPoolFacts(module)
result = {}
result['changed'] = False
result['name'] = zpool_facts.name
if zpool_facts.parsable:
result['parsable'] = zpool_facts.parsable
if zpool_facts.name is not None:
if zpool_facts.pool_exists():
result['ansible_facts'] = zpool_facts.get_facts()
else:
module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name)
else:
result['ansible_facts'] = zpool_facts.get_facts()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/resources/types/mobile_device_constant.py | 1 | 2416 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import mobile_device_type
__protobuf__ = proto.module(
package='google.ads.googleads.v6.resources',
marshal='google.ads.googleads.v6',
manifest={
'MobileDeviceConstant',
},
)
class MobileDeviceConstant(proto.Message):
r"""A mobile device constant.
Attributes:
resource_name (str):
Output only. The resource name of the mobile device
constant. Mobile device constant resource names have the
form:
``mobileDeviceConstants/{criterion_id}``
id (int):
Output only. The ID of the mobile device
constant.
name (str):
Output only. The name of the mobile device.
manufacturer_name (str):
Output only. The manufacturer of the mobile
device.
operating_system_name (str):
Output only. The operating system of the
mobile device.
type_ (google.ads.googleads.v6.enums.types.MobileDeviceTypeEnum.MobileDeviceType):
Output only. The type of mobile device.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
id = proto.Field(
proto.INT64,
number=7,
optional=True,
)
name = proto.Field(
proto.STRING,
number=8,
optional=True,
)
manufacturer_name = proto.Field(
proto.STRING,
number=9,
optional=True,
)
operating_system_name = proto.Field(
proto.STRING,
number=10,
optional=True,
)
type_ = proto.Field(
proto.ENUM,
number=6,
enum=mobile_device_type.MobileDeviceTypeEnum.MobileDeviceType,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
Asana/bazel | third_party/protobuf/3.0.0/python/google/protobuf/internal/api_implementation.py | 83 | 4872 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Determine which implementation of the protobuf API is used in this process.
"""
import os
import warnings
import sys
try:
# pylint: disable=g-import-not-at-top
from google.protobuf.internal import _api_implementation
# The compile-time constants in the _api_implementation module can be used to
# switch to a certain implementation of the Python API at build time.
_api_version = _api_implementation.api_version
_proto_extension_modules_exist_in_build = True
except ImportError:
_api_version = -1 # Unspecified by compiler flags.
_proto_extension_modules_exist_in_build = False
if _api_version == 1:
raise ValueError('api_version=1 is no longer supported.')
if _api_version < 0: # Still unspecified?
try:
# The presence of this module in a build allows the proto implementation to
# be upgraded merely via build deps rather than a compiler flag or the
# runtime environment variable.
# pylint: disable=g-import-not-at-top
from google.protobuf import _use_fast_cpp_protos
# Work around a known issue in the classic bootstrap .par import hook.
if not _use_fast_cpp_protos:
raise ImportError('_use_fast_cpp_protos import succeeded but was None')
del _use_fast_cpp_protos
_api_version = 2
except ImportError:
if _proto_extension_modules_exist_in_build:
if sys.version_info[0] >= 3: # Python 3 defaults to C++ impl v2.
_api_version = 2
# TODO(b/17427486): Make Python 2 default to C++ impl v2.
_default_implementation_type = (
'python' if _api_version <= 0 else 'cpp')
# This environment variable can be used to switch to a certain implementation
# of the Python API, overriding the compile-time constants in the
# _api_implementation module. Right now only 'python' and 'cpp' are valid
# values. Any other value will be ignored.
_implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION',
_default_implementation_type)
if _implementation_type != 'python':
_implementation_type = 'cpp'
if 'PyPy' in sys.version and _implementation_type == 'cpp':
warnings.warn('PyPy does not work yet with cpp protocol buffers. '
'Falling back to the python implementation.')
_implementation_type = 'python'
# This environment variable can be used to switch between the two
# 'cpp' implementations, overriding the compile-time constants in the
# _api_implementation module. Right now only '2' is supported. Any other
# value will cause an error to be raised.
_implementation_version_str = os.getenv(
'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION', '2')
if _implementation_version_str != '2':
raise ValueError(
'unsupported PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION: "' +
_implementation_version_str + '" (supported versions: 2)'
)
_implementation_version = int(_implementation_version_str)
# Usage of this function is discouraged. Clients shouldn't care which
# implementation of the API is in use. Note that there is no guarantee
# that differences between APIs will be maintained.
# Please don't use this function if possible.
def Type():
return _implementation_type
# See comment on 'Type' above.
def Version():
return _implementation_version
| apache-2.0 |
spencerjanssen/Flexget | gen-changelog.py | 19 | 1769 | # Writes a changelog in trac WikiFormatting based on a git log
from __future__ import unicode_literals, division, absolute_import
import codecs
from itertools import ifilter
import os
import re
import subprocess
import sys
from bs4 import BeautifulSoup
import dateutil.parser
import requests
out_path = 'ChangeLog'
if len(sys.argv) > 1:
dir_name = os.path.dirname(sys.argv[1])
if dir_name and not os.path.isdir(dir_name):
print 'Output dir doesn\'t exist: %s' % sys.argv[1]
sys.exit(1)
out_path = sys.argv[1]
ua_response = requests.get('http://flexget.com/wiki/UpgradeActions')
ua_soup = BeautifulSoup(ua_response.text, 'html5lib')
# 1.0.3280 was last revision on svn
git_log_output = subprocess.check_output(['git', 'log', '--pretty=%n---%n.%d%n%ci%n%h%n%s%n%-b%n---%n',
'--topo-order', '--decorate=full','refs/tags/1.0.3280..HEAD'])
git_log_iter = ifilter(None, git_log_output.decode('utf-8').splitlines())
with codecs.open(out_path, 'w', encoding='utf-8') as out_file:
for line in git_log_iter:
assert line == '---'
tag = re.search('refs/tags/([\d.]+)', next(git_log_iter))
date = dateutil.parser.parse(next(git_log_iter))
commit_hash = next(git_log_iter)
body = list(iter(git_log_iter.next, '---'))
if tag:
ver = tag.group(1)
ua_link = ''
result = ua_soup.find('h3', text=re.compile(' %s$' % re.escape(ver)))
if result:
ua_link = '^[wiki:UpgradeActions#%s upgrade actions]^ ' % result['id']
out_file.write('\n=== %s (%s) %s===\n\n' % (ver, date.strftime('%Y.%m.%d'), ua_link))
out_file.write(' * (%s) %s\n' % (commit_hash, '[[BR]]\n '.join(body)))
| mit |
ravindrapanda/tensorflow | tensorflow/contrib/nccl/python/ops/nccl_ops.py | 23 | 8121 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for GPU collective operations implemented using NVIDIA nccl."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.contrib.nccl.ops import gen_nccl_ops
from tensorflow.contrib.util import loader
from tensorflow.python.eager import context
from tensorflow.python.framework import device
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_nccl_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile('_nccl_ops.so'))
def all_sum(tensors):
"""Returns a list of tensors with the all-reduce sum across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to sum; must be assigned
to GPU devices.
Returns:
List of tensors, each with the sum of the input tensors, where tensor i has
the same device as `tensors[i]`.
"""
return _apply_all_reduce('sum', tensors)
@ops.RegisterGradient('NcclAllReduce')
def _all_sum_grad(op, grad):
"""The gradients for `all_sum`.
Args:
op: The `all_sum` `Operation` that we are differentiating.
grad: Gradient with respect to the output of the `all_sum` op.
Returns:
The gradient with respect to the output of `all_sum`.
Raises:
LookupError: If `reduction` is not `sum`.
"""
if op.get_attr('reduction') != 'sum':
raise LookupError('No gradient defined for NcclAllReduce except sum.')
_check_device(grad, expected=op.device)
num_devices = op.get_attr('num_devices')
shared_name = op.get_attr('shared_name') + '_grad'
with ops.device(op.device):
return gen_nccl_ops.nccl_all_reduce(
input=grad,
reduction='sum',
num_devices=num_devices,
shared_name=shared_name)
def all_prod(tensors):
"""Returns a list of tensors with the all-reduce product across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to multiply; must be assigned
to GPU devices.
Returns:
List of tensors, each with the product of the input tensors, where tensor i
has the same device as `tensors[i]`.
"""
return _apply_all_reduce('prod', tensors)
def all_min(tensors):
"""Returns a list of tensors with the all-reduce min across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to reduce; must be assigned
to GPU devices.
Returns:
List of tensors, each with the minimum of the input tensors, where tensor i
has the same device as `tensors[i]`.
"""
return _apply_all_reduce('min', tensors)
def all_max(tensors):
"""Returns a list of tensors with the all-reduce max across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to reduce; must be assigned
to GPU devices.
Returns:
List of tensors, each with the maximum of the input tensors, where tensor i
has the same device as `tensors[i]`.
"""
return _apply_all_reduce('max', tensors)
def reduce_sum(tensors):
"""Returns a tensor with the reduce sum across `tensors`.
The computation is done with a reduce operation, so only one tensor is
returned.
Args:
tensors: The input tensors across which to sum; must be assigned
to GPU devices.
Returns:
A tensor containing the sum of the input tensors.
Raises:
LookupError: If context is not currently using a GPU device.
"""
return _apply_reduce('sum', tensors)
@ops.RegisterGradient('NcclReduce')
def _reduce_sum_grad(op, grad):
"""The gradients for input `Operation` of `reduce_sum`.
Args:
op: The `sum send` `Operation` that we are differentiating.
grad: Gradient with respect to the output of the `reduce_sum` op.
Returns:
The gradient with respect to the input of `reduce_sum` op.
Raises:
LookupError: If the reduction attribute of op is not `sum`.
"""
if op.get_attr('reduction') != 'sum':
raise LookupError('No gradient defined for NcclReduce except sum.')
_check_device(grad, expected=op.device)
with ops.device(op.device):
result = gen_nccl_ops.nccl_broadcast(input=grad, shape=grad.shape)
return [result] * len(op.inputs)
def broadcast(tensor):
"""Returns a tensor that can be efficiently transferred to other devices.
Args:
tensor: The tensor to send; must be assigned to a GPU device.
Returns:
A tensor with the value of `src_tensor`, which can be used as input to
ops on other GPU devices.
"""
_check_graph_mode()
_check_device(tensor)
with ops.device(tensor.device):
return gen_nccl_ops.nccl_broadcast(input=tensor, shape=tensor.shape)
@ops.RegisterGradient('NcclBroadcast')
def _broadcast_grad(op, accumulated_grad):
"""The gradients for input `Operation` of `broadcast`.
Args:
op: The `broadcast send` `Operation` that we are differentiating.
accumulated_grad: Accumulated gradients with respect to the output of the
`broadcast` op.
Returns:
Gradients with respect to the input of `broadcast`.
"""
# Grab inputs of accumulated_grad and replace accumulation with reduce_sum.
grads = [t for t in accumulated_grad.op.inputs]
for t in grads:
_check_device(t)
with ops.device(op.device):
return gen_nccl_ops.nccl_reduce(input=grads, reduction='sum')
def _apply_all_reduce(reduction, tensors):
"""Helper function for all_* functions."""
if not tensors:
raise ValueError('Must pass >0 tensors to all reduce operations')
_check_graph_mode()
shared_name = _get_shared_name()
res = []
for t in tensors:
_check_device(t)
with ops.device(t.device):
res.append(
gen_nccl_ops.nccl_all_reduce(
input=t,
reduction=reduction,
num_devices=len(tensors),
shared_name=shared_name))
return res
def _apply_reduce(reduction, tensors):
"""Helper function for reduce_* functions."""
if not tensors:
raise ValueError('Must pass >0 tensors to reduce operations')
_check_graph_mode()
for t in tensors:
_check_device(t)
result = gen_nccl_ops.nccl_reduce(input=tensors, reduction=reduction)
try:
next(t for t in tensors if t.device == result.device)
except StopIteration:
raise ValueError('One input tensor must be assigned to current device')
return result
_lock = threading.Lock()
_shared_name_counter = 0
def _get_shared_name():
global _shared_name_counter
with _lock:
val = _shared_name_counter
_shared_name_counter += 1
return 'c%s' % val
def _check_device(tensor, expected=None):
if not device.canonical_name(tensor.device):
raise ValueError('Device assignment required for nccl collective ops')
if expected and expected != tensor.device:
raise ValueError('Expected device %s, got %s' % (expected, tensor.device))
def _check_graph_mode():
if context.in_eager_mode():
raise ValueError('Nccl ops are not supported in eager mode')
| apache-2.0 |
lalanza808/rharvest | rharvest.py | 1 | 2791 | #!/usr/bin/env python
"""
Image scraper utilizing Reddit Python API, PRAW.
Requires PRAW library installed, or pip installed to get it.
Choose a subreddit, it identifies pictures in that sub
and downloads to specified directory.
"""
__author__ = 'lance - github.com/lalanza808'
################################################################################
# Library Import
# Builtin libs
from urllib import urlretrieve
from os import path,system,getenv
from time import strftime, sleep
import ConfigParser
import argparse
# 3rd party libs
try:
import praw
except ImportError:
print("\nPython library PRAW not installed.\n\nTry:\n\nsudo pip install praw")
exit()
################################################################################
# Variable Declaration
# Open config
configfile = '{}/.config/rharvest.conf'.format(getenv('HOME'))
configparse = ConfigParser.ConfigParser()
configparse.readfp(open(configfile))
savedir = configparse.get('rharvest', 'savedir')
maxthreads = configparse.get('rharvest', 'maxthreads')
useragent = configparse.get('rharvest', 'useragent')
curtime = strftime(str(configparse.get('rharvest', 'timeformat')))
counter = 1
args = ''
parser = argparse.ArgumentParser(description='Command line Reddit image scraper. Supply the subreddit(s) separated by commas (no space), supply the number of images.')
parser.add_argument('--sub', '-s', type=str, dest='subreddit', help='The SubReddit name(s), separated by commas. Eg: minimalwallpaper,gifs,funny', required=True)
parser.add_argument('--count', '-c', type=int, dest='count', help='The amount of images you wish to download', required=True)
parser.add_argument('--dest', '-d', type=str, dest='dest', help='Save location. Set manually here or it defaults to value in config file', required=False)
args = parser.parse_args()
if args.dest == None:
savedir = configparse.get('rharvest', 'savedir')
else:
savedir = args.dest
def main():
CreateFolders(args.subreddit)
def CreateFolders(subs):
subs = subs.split(',')
for sub in subs:
if not path.exists("{}/{}/{}".format(savedir, sub, curtime)):
print("Creating new directory : {}/{}/{}".format(savedir, sub, str(curtime)))
sleep(2)
system("mkdir -p {}/{}/{}".format(savedir, sub, curtime))
DownloadImages(sub)
def DownloadImages(sub):
global counter
ua = praw.Reddit(useragent)
subscrape = ua.get_subreddit(sub).get_hot(limit=(args.count + args.count))
for image in subscrape:
extension = image.url[-4:]
if extension == '.jpg' or extension == '.png' or extension == '.gif':
img = path.basename(image.url)
dlas = "{}/{}/{}/{}.{}".format(savedir, sub, curtime, str(counter), img)
urlretrieve(image.url, dlas)
print "Downloading {} as {}".format(img, dlas)
counter += 1
if __name__ == "__main__":
main()
| mit |
StackPointCloud/ansible-modules-extras | cloud/amazon/ec2_vpc_route_table_facts.py | 65 | 3706 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_route_table_facts
short_description: Gather facts about ec2 VPC route tables in AWS
description:
- Gather facts about ec2 VPC route tables in AWS
version_added: "2.0"
author: "Rob White (@wimnat)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all VPC route tables
- ec2_vpc_route_table_facts:
# Gather facts about a particular VPC route table using route table ID
- ec2_vpc_route_table_facts:
filters:
route-table-id: rtb-00112233
# Gather facts about any VPC route table with a tag key Name and value Example
- ec2_vpc_route_table_facts:
filters:
"tag:Name": Example
# Gather facts about any VPC route table within VPC with ID vpc-abcdef00
- ec2_vpc_route_table_facts:
filters:
vpc-id: vpc-abcdef00
'''
try:
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_route_table_info(route_table):
# Add any routes to array
routes = []
for route in route_table.routes:
routes.append(route.__dict__)
route_table_info = { 'id': route_table.id,
'routes': routes,
'tags': route_table.tags,
'vpc_id': route_table.vpc_id
}
return route_table_info
def list_ec2_vpc_route_tables(connection, module):
filters = module.params.get("filters")
route_table_dict_array = []
try:
all_route_tables = connection.get_all_route_tables(filters=filters)
except BotoServerError as e:
module.fail_json(msg=e.message)
for route_table in all_route_tables:
route_table_dict_array.append(get_route_table_info(route_table))
module.exit_json(route_tables=route_table_dict_array)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters = dict(default=None, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
list_ec2_vpc_route_tables(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
airspeed-velocity/asv | test/test_feed.py | 2 | 3410 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import sys
import datetime
import xml.etree.ElementTree as etree
import xml.dom.minidom
import pytest
from asv import feed
try:
import feedparser
HAVE_FEEDPARSER = True
except ImportError:
HAVE_FEEDPARSER = False
try:
import feedvalidator
HAVE_FEEDVALIDATOR = True
except ImportError:
HAVE_FEEDVALIDATOR = False
def prettify_xml(text):
return xml.dom.minidom.parseString(text).toprettyxml()
def dummy_feed_xml():
entry_1 = feed.FeedEntry(title='Some title', updated=datetime.datetime(1993, 1, 1))
entry_2 = feed.FeedEntry(title='Another title', updated=datetime.datetime(1990, 1, 1),
link='http://foo', content='More text', id_context=['something'],
id_date=datetime.datetime(2000, 1, 1))
stream = io.BytesIO()
feed.write_atom(stream, [entry_1, entry_2], author='Me', title='Feed title',
address='baz.com')
return stream.getvalue()
def test_dummy_xml():
xml = dummy_feed_xml()
text = xml.decode('utf-8').replace('>', '>\n')
expected = """\
<?xml version='1.0' encoding='utf-8'?>
<feed xmlns="http://www.w3.org/2005/Atom">
<id>
tag:baz.com,1970-01-01:/82438e6f2527536e1271ba04e05f31b7fcbef238753fb5069b1fd52a9242173a</id>
<author>
<name>
Me</name>
</author>
<title xml:lang="en">
Feed title</title>
<updated>
1993-01-01T00:00:00Z</updated>
<entry>
<id>
tag:baz.com,1993-01-01:/9c12e06399d193907df13570525d9887b7f8e8f5ff23ddd7e9938416d490ff78</id>
<title xml:lang="en">
Some title</title>
<updated>
1993-01-01T00:00:00Z</updated>
<content xml:lang="en">
</content>
</entry>
<entry>
<id>
tag:baz.com,2000-01-01:/abd78e0420c232c75f3e7582946dac13e18a54b0b5542fbc3159458f8b16fd4f</id>
<title xml:lang="en">
Another title</title>
<updated>
1990-01-01T00:00:00Z</updated>
<link href="http://foo" />
<content type="html" xml:lang="en">
More text</content>
</entry>
</feed>
"""
expected2 = expected.replace('type="html" xml:lang="en"', 'xml:lang="en" type="html"')
assert text == expected or text == expected2
@pytest.mark.skipif(not HAVE_FEEDPARSER, reason="test requires feedparser module")
def test_feedparser():
# Check the result parses as a feed
xml = dummy_feed_xml()
feed = feedparser.parse(xml)
assert feed['entries'][0]['title'] == 'Some title'
assert feed['entries'][1]['content'][0]['type'] == 'text/html'
assert feed['entries'][1]['content'][0]['value'] == 'More text'
assert feed['entries'][1]['links'] == [{'href': 'http://foo',
'type': 'text/html',
'rel': 'alternate'}]
@pytest.mark.skipif(not HAVE_FEEDVALIDATOR, reason="test requires feedvalidator module")
def test_feedvalidator():
xml = prettify_xml(dummy_feed_xml())
result = feedvalidator.validateString(xml)
ok_messages = (feedvalidator.ValidValue, feedvalidator.MissingSelf)
assert result['feedType'] == feedvalidator.TYPE_ATOM
for message in result['loggedEvents']:
if not isinstance(message, ok_messages):
print(xml)
print(message.params)
assert isinstance(message, ok_messages), message
| bsd-3-clause |
dzimine/mistral | mistral/openstack/common/py3kcompat/urlutils.py | 13 | 1744 | #
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Python2/Python3 compatibility layer for OpenStack
"""
import six
if six.PY3:
# python3
import urllib.error
import urllib.parse
import urllib.request
urlencode = urllib.parse.urlencode
urljoin = urllib.parse.urljoin
quote = urllib.parse.quote
parse_qsl = urllib.parse.parse_qsl
unquote = urllib.parse.unquote
urlparse = urllib.parse.urlparse
urlsplit = urllib.parse.urlsplit
urlunsplit = urllib.parse.urlunsplit
SplitResult = urllib.parse.SplitResult
urlopen = urllib.request.urlopen
URLError = urllib.error.URLError
pathname2url = urllib.request.pathname2url
else:
# python2
import urllib
import urllib2
import urlparse
urlencode = urllib.urlencode
quote = urllib.quote
unquote = urllib.unquote
parse = urlparse
parse_qsl = parse.parse_qsl
urljoin = parse.urljoin
urlparse = parse.urlparse
urlsplit = parse.urlsplit
urlunsplit = parse.urlunsplit
SplitResult = parse.SplitResult
urlopen = urllib2.urlopen
URLError = urllib2.URLError
pathname2url = urllib.pathname2url
| apache-2.0 |
CMSS-BCRDB/RDS | trove/tests/unittests/common/test_wsgi.py | 5 | 1549 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import trove.common.wsgi as wsgi
import webob
import testtools
from testtools.matchers import Equals, Is, Not
class TestWsgi(testtools.TestCase):
def test_process_request(self):
middleware = wsgi.ContextMiddleware("test_trove")
req = webob.BaseRequest({})
token = 'MI23fdf2defg123'
user_id = 'test_user_id'
req.headers = {
'X-User': 'do not use - deprecated',
'X-User-ID': user_id,
'X-Auth-Token': token,
'X-Service-Catalog': '[]'
}
req.environ = {}
# invocation
middleware.process_request(req)
# assertions
ctx = req.environ[wsgi.CONTEXT_KEY]
self.assertThat(ctx, Not(Is(None)))
self.assertThat(ctx.user, Equals(user_id))
self.assertThat(ctx.auth_token, Equals(token))
self.assertEqual(0, len(ctx.service_catalog))
| apache-2.0 |
mfraezz/osf.io | osf_tests/test_generate_sitemap.py | 9 | 7286 | import os
import pytest
import mock
import shutil
import tempfile
import xml
from future.moves.urllib.parse import urljoin
from scripts import generate_sitemap
from osf_tests.factories import (AuthUserFactory, ProjectFactory, RegistrationFactory, CollectionFactory,
PreprintFactory, PreprintProviderFactory, EmbargoFactory, UnconfirmedUserFactory)
from website import settings
def get_all_sitemap_urls():
# Create temporary directory for the sitemaps to be generated
generate_sitemap.main()
# Parse the generated XML sitemap file
with open(os.path.join(settings.STATIC_FOLDER, 'sitemaps/sitemap_0.xml')) as f:
tree = xml.etree.ElementTree.parse(f)
shutil.rmtree(settings.STATIC_FOLDER)
# Get all the urls in the sitemap
# Note: namespace was defined in the XML file, therefore necessary to include in tag
namespace = '{http://www.sitemaps.org/schemas/sitemap/0.9}'
urls = [element.text for element in tree.iter(namespace + 'loc')]
return urls
@pytest.mark.django_db
class TestGenerateSitemap:
@pytest.fixture(autouse=True)
def user_admin_project_public(self):
return AuthUserFactory()
@pytest.fixture(autouse=True)
def user_unconfirmed(self):
return UnconfirmedUserFactory()
@pytest.fixture(autouse=True)
def user_admin_project_private(self):
return AuthUserFactory()
@pytest.fixture(autouse=True)
def project_registration_public(self, user_admin_project_public):
return ProjectFactory(creator=user_admin_project_public, is_public=True)
@pytest.fixture(autouse=True)
def project_preprint_osf(self, user_admin_project_public):
return ProjectFactory(creator=user_admin_project_public, is_public=True)
@pytest.fixture(autouse=True)
def project_preprint_other(self, user_admin_project_public):
return ProjectFactory(creator=user_admin_project_public, is_public=True)
@pytest.fixture(autouse=True)
def project_private(self, user_admin_project_private):
return ProjectFactory(creator=user_admin_project_private, is_public=False)
@pytest.fixture(autouse=True)
def project_deleted(self, user_admin_project_public):
return ProjectFactory(creator=user_admin_project_public, is_deleted=True)
@pytest.fixture(autouse=True)
def registration_active(self, user_admin_project_public, project_registration_public):
return RegistrationFactory(project=project_registration_public,
creator=user_admin_project_public,
is_public=True)
@pytest.fixture(autouse=True)
def registration_embargoed(self, user_admin_project_public, project_registration_public):
return RegistrationFactory(project=project_registration_public,
creator=user_admin_project_public,
embargo=EmbargoFactory(user=user_admin_project_public))
@pytest.fixture(autouse=True)
def collection(self, user_admin_project_public):
return CollectionFactory(creator=user_admin_project_public)
@pytest.fixture(autouse=True)
def provider_osf(self):
# Note: at least a provider whose _id == 'osf' have to exist for the script to work
return PreprintProviderFactory(_id='osf', name='osfprovider')
@pytest.fixture(autouse=True)
def provider_other(self):
return PreprintProviderFactory(_id='adl', name='anotherprovider')
@pytest.fixture(autouse=True)
def preprint_osf(self, project_preprint_osf, user_admin_project_public, provider_osf):
return PreprintFactory(project=project_preprint_osf,
creator=user_admin_project_public,
provider=provider_osf)
@pytest.fixture(autouse=True)
def preprint_other(self, project_preprint_other, user_admin_project_public, provider_other):
return PreprintFactory(project=project_preprint_other,
creator=user_admin_project_public,
provider=provider_other)
@pytest.fixture(autouse=True)
def all_included_links(self, user_admin_project_public, user_admin_project_private, project_registration_public,
project_preprint_osf, project_preprint_other,
registration_active, provider_other, preprint_osf,
preprint_other):
# Return urls of all fixtures
urls_to_include = [item['loc'] for item in settings.SITEMAP_STATIC_URLS]
urls_to_include.extend([
user_admin_project_public.url,
user_admin_project_private.url,
project_registration_public.url,
project_preprint_osf.url,
project_preprint_other.url,
registration_active.url,
'/preprints/{}/'.format(preprint_osf._id),
'/preprints/{}/{}/'.format(provider_other._id, preprint_other._id),
'/{}/download/?format=pdf'.format(preprint_osf._id),
'/{}/download/?format=pdf'.format(preprint_other._id)
])
urls_to_include = [urljoin(settings.DOMAIN, item) for item in urls_to_include]
return urls_to_include
@pytest.fixture()
def create_tmp_directory(self):
return tempfile.mkdtemp()
def test_all_links_included(self, all_included_links, create_tmp_directory):
with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory):
urls = get_all_sitemap_urls()
urls_to_include = all_included_links
assert len(urls_to_include) == len(urls)
assert set(urls_to_include) == set(urls)
def test_unconfirmed_user_not_included(self, create_tmp_directory, user_unconfirmed):
with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory):
urls = get_all_sitemap_urls()
assert urljoin(settings.DOMAIN, user_unconfirmed.url) not in urls
def test_collection_link_not_included(self, collection, create_tmp_directory):
with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory):
urls = get_all_sitemap_urls()
assert urljoin(settings.DOMAIN, collection.url) not in urls
def test_private_project_link_not_included(self, project_private, create_tmp_directory):
with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory):
urls = get_all_sitemap_urls()
assert urljoin(settings.DOMAIN, project_private.url) not in urls
def test_embargoed_registration_link_not_included(self, registration_embargoed, create_tmp_directory):
with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory):
urls = get_all_sitemap_urls()
assert urljoin(settings.DOMAIN, registration_embargoed.url) not in urls
def test_deleted_project_link_not_included(self, project_deleted, create_tmp_directory):
with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory):
urls = get_all_sitemap_urls()
assert urljoin(settings.DOMAIN, project_deleted.url) not in urls
| apache-2.0 |
viveksh13/gymkhana | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/_collections.py | 484 | 10454 | from collections import Mapping, MutableMapping
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
from .packages.six import iterkeys, itervalues, PY3
__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
values = list(itervalues(self._container))
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return list(iterkeys(self._container))
_dict_setitem = dict.__setitem__
_dict_getitem = dict.__getitem__
_dict_delitem = dict.__delitem__
_dict_contains = dict.__contains__
_dict_setdefault = dict.setdefault
class HTTPHeaderDict(dict):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
"""
def __init__(self, headers=None, **kwargs):
dict.__init__(self)
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key, val):
return _dict_setitem(self, key.lower(), (key, val))
def __getitem__(self, key):
val = _dict_getitem(self, key.lower())
return ', '.join(val[1:])
def __delitem__(self, key):
return _dict_delitem(self, key.lower())
def __contains__(self, key):
return _dict_contains(self, key.lower())
def __eq__(self, other):
if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
return False
if not isinstance(other, type(self)):
other = type(self)(other)
return dict((k1, self[k1]) for k1 in self) == dict((k2, other[k2]) for k2 in other)
def __ne__(self, other):
return not self.__eq__(other)
values = MutableMapping.values
get = MutableMapping.get
update = MutableMapping.update
if not PY3: # Python 2
iterkeys = MutableMapping.iterkeys
itervalues = MutableMapping.itervalues
__marker = object()
def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
# Using the MutableMapping function directly fails due to the private marker.
# Using ordinary dict.pop would expose the internal structures.
# So let's reinvent the wheel.
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def discard(self, key):
try:
del self[key]
except KeyError:
pass
def add(self, key, val):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
key_lower = key.lower()
new_vals = key, val
# Keep the common case aka no item present as fast as possible
vals = _dict_setdefault(self, key_lower, new_vals)
if new_vals is not vals:
# new_vals was not inserted, as there was a previous one
if isinstance(vals, list):
# If already several items got inserted, we have a list
vals.append(val)
else:
# vals should be a tuple then, i.e. only one item so far
# Need to convert the tuple to list for further extension
_dict_setitem(self, key_lower, [vals[0], vals[1], val])
def extend(self, *args, **kwargs):
"""Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError("extend() takes at most 1 positional "
"arguments ({} given)".format(len(args)))
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, Mapping):
for key in other:
self.add(key, other[key])
elif hasattr(other, "keys"):
for key in other.keys():
self.add(key, other[key])
else:
for key, value in other:
self.add(key, value)
for key, value in kwargs.items():
self.add(key, value)
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = _dict_getitem(self, key.lower())
except KeyError:
return []
else:
if isinstance(vals, tuple):
return [vals[1]]
else:
return vals[1:]
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
def __repr__(self):
return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
def _copy_from(self, other):
for key in other:
val = _dict_getitem(other, key)
if isinstance(val, list):
# Don't need to convert tuples
val = list(val)
_dict_setitem(self, key, val)
def copy(self):
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = _dict_getitem(self, key)
for val in vals[1:]:
yield vals[0], val
def itermerged(self):
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = _dict_getitem(self, key)
yield val[0], ', '.join(val[1:])
def items(self):
return list(self.iteritems())
@classmethod
def from_httplib(cls, message): # Python 2
"""Read headers from a Python 2 httplib message object."""
# python2.7 does not expose a proper API for exporting multiheaders
# efficiently. This function re-reads raw lines from the message
# object and extracts the multiheaders properly.
headers = []
for line in message.headers:
if line.startswith((' ', '\t')):
key, value = headers[-1]
headers[-1] = (key, value + '\r\n' + line.rstrip())
continue
key, value = line.split(':', 1)
headers.append((key, value.strip()))
return cls(headers)
| apache-2.0 |
tenvick/hugula | Client/tools/site-packages/PIL/ImageDraw.py | 13 | 11655 | #
# The Python Imaging Library
# $Id: ImageDraw.py 2817 2006-10-07 15:34:03Z fredrik $
#
# drawing interface operations
#
# History:
# 1996-04-13 fl Created (experimental)
# 1996-08-07 fl Filled polygons, ellipses.
# 1996-08-13 fl Added text support
# 1998-06-28 fl Handle I and F images
# 1998-12-29 fl Added arc; use arc primitive to draw ellipses
# 1999-01-10 fl Added shape stuff (experimental)
# 1999-02-06 fl Added bitmap support
# 1999-02-11 fl Changed all primitives to take options
# 1999-02-20 fl Fixed backwards compatibility
# 2000-10-12 fl Copy on write, when necessary
# 2001-02-18 fl Use default ink for bitmap/text also in fill mode
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing
# 2002-12-11 fl Refactored low-level drawing API (work in progress)
# 2004-08-26 fl Made Draw() a factory function, added getdraw() support
# 2004-09-04 fl Added width support to line primitive
# 2004-09-10 fl Added font mode handling
# 2006-06-19 fl Added font bearing support (getmask2)
#
# Copyright (c) 1997-2006 by Secret Labs AB
# Copyright (c) 1996-2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image, ImageColor
try:
import warnings
except ImportError:
warnings = None
##
# A simple 2D drawing interface for PIL images.
# <p>
# Application code should use the <b>Draw</b> factory, instead of
# directly.
class ImageDraw:
##
# Create a drawing instance.
#
# @param im The image to draw in.
# @param mode Optional mode to use for color values. For RGB
# images, this argument can be RGB or RGBA (to blend the
# drawing into the image). For all other modes, this argument
# must be the same as the image mode. If omitted, the mode
# defaults to the mode of the image.
def __init__(self, im, mode=None):
im.load()
if im.readonly:
im._copy() # make it writable
blend = 0
if mode is None:
mode = im.mode
if mode != im.mode:
if mode == "RGBA" and im.mode == "RGB":
blend = 1
else:
raise ValueError("mode mismatch")
if mode == "P":
self.palette = im.palette
else:
self.palette = None
self.im = im.im
self.draw = Image.core.draw(self.im, blend)
self.mode = mode
if mode in ("I", "F"):
self.ink = self.draw.draw_ink(1, mode)
else:
self.ink = self.draw.draw_ink(-1, mode)
if mode in ("1", "P", "I", "F"):
# FIXME: fix Fill2 to properly support matte for I+F images
self.fontmode = "1"
else:
self.fontmode = "L" # aliasing is okay for other modes
self.fill = 0
self.font = None
##
# Set the default pen color.
def setink(self, ink):
# compatibility
if warnings:
warnings.warn(
"'setink' is deprecated; use keyword arguments instead",
DeprecationWarning, stacklevel=2
)
if Image.isStringType(ink):
ink = ImageColor.getcolor(ink, self.mode)
if self.palette and not Image.isNumberType(ink):
ink = self.palette.getcolor(ink)
self.ink = self.draw.draw_ink(ink, self.mode)
##
# Set the default background color.
def setfill(self, onoff):
# compatibility
if warnings:
warnings.warn(
"'setfill' is deprecated; use keyword arguments instead",
DeprecationWarning, stacklevel=2
)
self.fill = onoff
##
# Set the default font.
def setfont(self, font):
# compatibility
self.font = font
##
# Get the current default font.
def getfont(self):
if not self.font:
# FIXME: should add a font repository
import ImageFont
self.font = ImageFont.load_default()
return self.font
def _getink(self, ink, fill=None):
if ink is None and fill is None:
if self.fill:
fill = self.ink
else:
ink = self.ink
else:
if ink is not None:
if Image.isStringType(ink):
ink = ImageColor.getcolor(ink, self.mode)
if self.palette and not Image.isNumberType(ink):
ink = self.palette.getcolor(ink)
ink = self.draw.draw_ink(ink, self.mode)
if fill is not None:
if Image.isStringType(fill):
fill = ImageColor.getcolor(fill, self.mode)
if self.palette and not Image.isNumberType(fill):
fill = self.palette.getcolor(fill)
fill = self.draw.draw_ink(fill, self.mode)
return ink, fill
##
# Draw an arc.
def arc(self, xy, start, end, fill=None):
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_arc(xy, start, end, ink)
##
# Draw a bitmap.
def bitmap(self, xy, bitmap, fill=None):
bitmap.load()
ink, fill = self._getink(fill)
if ink is None:
ink = fill
if ink is not None:
self.draw.draw_bitmap(xy, bitmap.im, ink)
##
# Draw a chord.
def chord(self, xy, start, end, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_chord(xy, start, end, fill, 1)
if ink is not None:
self.draw.draw_chord(xy, start, end, ink, 0)
##
# Draw an ellipse.
def ellipse(self, xy, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_ellipse(xy, fill, 1)
if ink is not None:
self.draw.draw_ellipse(xy, ink, 0)
##
# Draw a line, or a connected sequence of line segments.
def line(self, xy, fill=None, width=0):
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_lines(xy, ink, width)
##
# (Experimental) Draw a shape.
def shape(self, shape, fill=None, outline=None):
# experimental
shape.close()
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_outline(shape, fill, 1)
if ink is not None:
self.draw.draw_outline(shape, ink, 0)
##
# Draw a pieslice.
def pieslice(self, xy, start, end, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_pieslice(xy, start, end, fill, 1)
if ink is not None:
self.draw.draw_pieslice(xy, start, end, ink, 0)
##
# Draw one or more individual pixels.
def point(self, xy, fill=None):
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_points(xy, ink)
##
# Draw a polygon.
def polygon(self, xy, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_polygon(xy, fill, 1)
if ink is not None:
self.draw.draw_polygon(xy, ink, 0)
##
# Draw a rectangle.
def rectangle(self, xy, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_rectangle(xy, fill, 1)
if ink is not None:
self.draw.draw_rectangle(xy, ink, 0)
##
# Draw text.
def text(self, xy, text, fill=None, font=None, anchor=None):
ink, fill = self._getink(fill)
if font is None:
font = self.getfont()
if ink is None:
ink = fill
if ink is not None:
try:
mask, offset = font.getmask2(text, self.fontmode)
xy = xy[0] + offset[0], xy[1] + offset[1]
except AttributeError:
try:
mask = font.getmask(text, self.fontmode)
except TypeError:
mask = font.getmask(text)
self.draw.draw_bitmap(xy, mask, ink)
##
# Get the size of a given string, in pixels.
def textsize(self, text, font=None):
if font is None:
font = self.getfont()
return font.getsize(text)
##
# A simple 2D drawing interface for PIL images.
#
# @param im The image to draw in.
# @param mode Optional mode to use for color values. For RGB
# images, this argument can be RGB or RGBA (to blend the
# drawing into the image). For all other modes, this argument
# must be the same as the image mode. If omitted, the mode
# defaults to the mode of the image.
def Draw(im, mode=None):
try:
return im.getdraw(mode)
except AttributeError:
return ImageDraw(im, mode)
# experimental access to the outline API
try:
Outline = Image.core.outline
except:
Outline = None
##
# (Experimental) A more advanced 2D drawing interface for PIL images,
# based on the WCK interface.
#
# @param im The image to draw in.
# @param hints An optional list of hints.
# @return A (drawing context, drawing resource factory) tuple.
def getdraw(im=None, hints=None):
# FIXME: this needs more work!
# FIXME: come up with a better 'hints' scheme.
handler = None
if not hints or "nicest" in hints:
try:
import _imagingagg
handler = _imagingagg
except ImportError:
pass
if handler is None:
import ImageDraw2
handler = ImageDraw2
if im:
im = handler.Draw(im)
return im, handler
##
# (experimental) Fills a bounded region with a given color.
#
# @param image Target image.
# @param xy Seed position (a 2-item coordinate tuple).
# @param value Fill color.
# @param border Optional border value. If given, the region consists of
# pixels with a color different from the border color. If not given,
# the region consists of pixels having the same color as the seed
# pixel.
def floodfill(image, xy, value, border=None):
"Fill bounded region."
# based on an implementation by Eric S. Raymond
pixel = image.load()
x, y = xy
try:
background = pixel[x, y]
if background == value:
return # seed point already has fill color
pixel[x, y] = value
except IndexError:
return # seed point outside image
edge = [(x, y)]
if border is None:
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
try:
p = pixel[s, t]
except IndexError:
pass
else:
if p == background:
pixel[s, t] = value
newedge.append((s, t))
edge = newedge
else:
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
try:
p = pixel[s, t]
except IndexError:
pass
else:
if p != value and p != border:
pixel[s, t] = value
newedge.append((s, t))
edge = newedge
| mit |
6/GeoDJ | geodj/settings.py | 1 | 5663 | import os
# Override these on production env
os.environ.setdefault("APP_ENV", "development")
os.environ.setdefault("SECRET_KEY", "^uhrm48x9y=1f&+$bg=oc(#23mp0*g5k%8+si9tdz7&4_xk&lf")
if os.environ["APP_ENV"] == "development":
try:
# Add secret ENV varibales for development (e.g. API keys) to secrets.py
import secrets
os.environ.setdefault("LASTFM_API_KEY", secrets.LASTFM_API_KEY)
except:
pass
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = os.environ['APP_ENV'] != 'production'
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {}
if os.environ["APP_ENV"] == "production":
import dj_database_url
DATABASES['default'] = dj_database_url.config()
else:
DATABASES['default'] = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'geodj_development',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = 'staticfiles'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ["SECRET_KEY"]
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'geodj.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'geodj.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'geodj',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
| mit |
Zhongqilong/mykbengineer | kbe/src/lib/python/Lib/encodings/unicode_internal.py | 827 | 1196 | """ Python 'unicode-internal' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.unicode_internal_encode
decode = codecs.unicode_internal_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.unicode_internal_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.unicode_internal_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='unicode-internal',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| lgpl-3.0 |
40223108/-2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/xml/sax/handler.py | 925 | 13922 | """
This module contains the core classes of version 2.0 of SAX for Python.
This file provides only default classes with absolutely minimum
functionality, from which drivers and applications can be subclassed.
Many of these classes are empty and are included only as documentation
of the interfaces.
$Id$
"""
version = '2.0beta'
#============================================================================
#
# HANDLER INTERFACES
#
#============================================================================
# ===== ERRORHANDLER =====
class ErrorHandler:
"""Basic interface for SAX error handlers.
If you create an object that implements this interface, then
register the object with your XMLReader, the parser will call the
methods in your object to report all warnings and errors. There
are three levels of errors available: warnings, (possibly)
recoverable errors, and unrecoverable errors. All methods take a
SAXParseException as the only parameter."""
def error(self, exception):
"Handle a recoverable error."
raise exception
def fatalError(self, exception):
"Handle a non-recoverable error."
raise exception
def warning(self, exception):
"Handle a warning."
print(exception)
# ===== CONTENTHANDLER =====
class ContentHandler:
"""Interface for receiving logical document content events.
This is the main callback interface in SAX, and the one most
important to applications. The order of events in this interface
mirrors the order of the information in the document."""
def __init__(self):
self._locator = None
def setDocumentLocator(self, locator):
"""Called by the parser to give the application a locator for
locating the origin of document events.
SAX parsers are strongly encouraged (though not absolutely
required) to supply a locator: if it does so, it must supply
the locator to the application by invoking this method before
invoking any of the other methods in the DocumentHandler
interface.
The locator allows the application to determine the end
position of any document-related event, even if the parser is
not reporting an error. Typically, the application will use
this information for reporting its own errors (such as
character content that does not match an application's
business rules). The information returned by the locator is
probably not sufficient for use with a search engine.
Note that the locator will return correct information only
during the invocation of the events in this interface. The
application should not attempt to use it at any other time."""
self._locator = locator
def startDocument(self):
"""Receive notification of the beginning of a document.
The SAX parser will invoke this method only once, before any
other methods in this interface or in DTDHandler (except for
setDocumentLocator)."""
def endDocument(self):
"""Receive notification of the end of a document.
The SAX parser will invoke this method only once, and it will
be the last method invoked during the parse. The parser shall
not invoke this method until it has either abandoned parsing
(because of an unrecoverable error) or reached the end of
input."""
def startPrefixMapping(self, prefix, uri):
"""Begin the scope of a prefix-URI Namespace mapping.
The information from this event is not necessary for normal
Namespace processing: the SAX XML reader will automatically
replace prefixes for element and attribute names when the
http://xml.org/sax/features/namespaces feature is true (the
default).
There are cases, however, when applications need to use
prefixes in character data or in attribute values, where they
cannot safely be expanded automatically; the
start/endPrefixMapping event supplies the information to the
application to expand prefixes in those contexts itself, if
necessary.
Note that start/endPrefixMapping events are not guaranteed to
be properly nested relative to each-other: all
startPrefixMapping events will occur before the corresponding
startElement event, and all endPrefixMapping events will occur
after the corresponding endElement event, but their order is
not guaranteed."""
def endPrefixMapping(self, prefix):
"""End the scope of a prefix-URI mapping.
See startPrefixMapping for details. This event will always
occur after the corresponding endElement event, but the order
of endPrefixMapping events is not otherwise guaranteed."""
def startElement(self, name, attrs):
"""Signals the start of an element in non-namespace mode.
The name parameter contains the raw XML 1.0 name of the
element type as a string and the attrs parameter holds an
instance of the Attributes class containing the attributes of
the element."""
def endElement(self, name):
"""Signals the end of an element in non-namespace mode.
The name parameter contains the name of the element type, just
as with the startElement event."""
def startElementNS(self, name, qname, attrs):
"""Signals the start of an element in namespace mode.
The name parameter contains the name of the element type as a
(uri, localname) tuple, the qname parameter the raw XML 1.0
name used in the source document, and the attrs parameter
holds an instance of the Attributes class containing the
attributes of the element.
The uri part of the name tuple is None for elements which have
no namespace."""
def endElementNS(self, name, qname):
"""Signals the end of an element in namespace mode.
The name parameter contains the name of the element type, just
as with the startElementNS event."""
def characters(self, content):
"""Receive notification of character data.
The Parser will call this method to report each chunk of
character data. SAX parsers may return all contiguous
character data in a single chunk, or they may split it into
several chunks; however, all of the characters in any single
event must come from the same external entity so that the
Locator provides useful information."""
def ignorableWhitespace(self, whitespace):
"""Receive notification of ignorable whitespace in element content.
Validating Parsers must use this method to report each chunk
of ignorable whitespace (see the W3C XML 1.0 recommendation,
section 2.10): non-validating parsers may also use this method
if they are capable of parsing and using content models.
SAX parsers may return all contiguous whitespace in a single
chunk, or they may split it into several chunks; however, all
of the characters in any single event must come from the same
external entity, so that the Locator provides useful
information."""
def processingInstruction(self, target, data):
"""Receive notification of a processing instruction.
The Parser will invoke this method once for each processing
instruction found: note that processing instructions may occur
before or after the main document element.
A SAX parser should never report an XML declaration (XML 1.0,
section 2.8) or a text declaration (XML 1.0, section 4.3.1)
using this method."""
def skippedEntity(self, name):
"""Receive notification of a skipped entity.
The Parser will invoke this method once for each entity
skipped. Non-validating processors may skip entities if they
have not seen the declarations (because, for example, the
entity was declared in an external DTD subset). All processors
may skip external entities, depending on the values of the
http://xml.org/sax/features/external-general-entities and the
http://xml.org/sax/features/external-parameter-entities
properties."""
# ===== DTDHandler =====
class DTDHandler:
"""Handle DTD events.
This interface specifies only those DTD events required for basic
parsing (unparsed entities and attributes)."""
def notationDecl(self, name, publicId, systemId):
"Handle a notation declaration event."
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
"Handle an unparsed entity declaration event."
# ===== ENTITYRESOLVER =====
class EntityResolver:
"""Basic interface for resolving entities. If you create an object
implementing this interface, then register the object with your
Parser, the parser will call the method in your object to
resolve all external entities. Note that DefaultHandler implements
this interface with the default behaviour."""
def resolveEntity(self, publicId, systemId):
"""Resolve the system identifier of an entity and return either
the system identifier to read from as a string, or an InputSource
to read from."""
return systemId
#============================================================================
#
# CORE FEATURES
#
#============================================================================
feature_namespaces = "http://xml.org/sax/features/namespaces"
# true: Perform Namespace processing (default).
# false: Optionally do not perform Namespace processing
# (implies namespace-prefixes).
# access: (parsing) read-only; (not parsing) read/write
feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes"
# true: Report the original prefixed names and attributes used for Namespace
# declarations.
# false: Do not report attributes used for Namespace declarations, and
# optionally do not report original prefixed names (default).
# access: (parsing) read-only; (not parsing) read/write
feature_string_interning = "http://xml.org/sax/features/string-interning"
# true: All element names, prefixes, attribute names, Namespace URIs, and
# local names are interned using the built-in intern function.
# false: Names are not necessarily interned, although they may be (default).
# access: (parsing) read-only; (not parsing) read/write
feature_validation = "http://xml.org/sax/features/validation"
# true: Report all validation errors (implies external-general-entities and
# external-parameter-entities).
# false: Do not report validation errors.
# access: (parsing) read-only; (not parsing) read/write
feature_external_ges = "http://xml.org/sax/features/external-general-entities"
# true: Include all external general (text) entities.
# false: Do not include external general entities.
# access: (parsing) read-only; (not parsing) read/write
feature_external_pes = "http://xml.org/sax/features/external-parameter-entities"
# true: Include all external parameter entities, including the external
# DTD subset.
# false: Do not include any external parameter entities, even the external
# DTD subset.
# access: (parsing) read-only; (not parsing) read/write
all_features = [feature_namespaces,
feature_namespace_prefixes,
feature_string_interning,
feature_validation,
feature_external_ges,
feature_external_pes]
#============================================================================
#
# CORE PROPERTIES
#
#============================================================================
property_lexical_handler = "http://xml.org/sax/properties/lexical-handler"
# data type: xml.sax.sax2lib.LexicalHandler
# description: An optional extension handler for lexical events like comments.
# access: read/write
property_declaration_handler = "http://xml.org/sax/properties/declaration-handler"
# data type: xml.sax.sax2lib.DeclHandler
# description: An optional extension handler for DTD-related events other
# than notations and unparsed entities.
# access: read/write
property_dom_node = "http://xml.org/sax/properties/dom-node"
# data type: org.w3c.dom.Node
# description: When parsing, the current DOM node being visited if this is
# a DOM iterator; when not parsing, the root DOM node for
# iteration.
# access: (parsing) read-only; (not parsing) read/write
property_xml_string = "http://xml.org/sax/properties/xml-string"
# data type: String
# description: The literal string of characters that was the source for
# the current event.
# access: read-only
property_encoding = "http://www.python.org/sax/properties/encoding"
# data type: String
# description: The name of the encoding to assume for input data.
# access: write: set the encoding, e.g. established by a higher-level
# protocol. May change during parsing (e.g. after
# processing a META tag)
# read: return the current encoding (possibly established through
# auto-detection.
# initial value: UTF-8
#
property_interning_dict = "http://www.python.org/sax/properties/interning-dict"
# data type: Dictionary
# description: The dictionary used to intern common strings in the document
# access: write: Request that the parser uses a specific dictionary, to
# allow interning across different documents
# read: return the current interning dictionary, or None
#
all_properties = [property_lexical_handler,
property_dom_node,
property_declaration_handler,
property_xml_string,
property_encoding,
property_interning_dict]
| gpl-3.0 |
userzimmermann/python-moretools | moretools/_types.py | 1 | 2808 | # python-moretools
#
# many more basic tools for python 2/3
# extending itertools, functools and operator
#
# Copyright (C) 2011-2016 Stefan Zimmermann <zimmermann.code@gmail.com>
#
# python-moretools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-moretools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with python-moretools. If not, see <http://www.gnu.org/licenses/>.
from ._common import *
from ._simpledict import SimpleDictType
from six.moves import UserString, UserList, UserDict
number_types = integer_types + (float, complex)
string_types = (string_types) + (UserString,)
list_types = (list, UserList)
dict_types = (dict, UserDict, SimpleDictType)
def isintclass(cls):
return issubclass(cls, int)
isinttype = isintclass
def isint(value):
return isinstance(value, int)
if PY2:
def islongclass(cls):
return issubclass(cls, long)
islongtype = islongclass
def islong(value):
return isinstance(value, long)
def isintegerclass(cls):
return issubclass(cls, integer_types)
isintegertype = isintegerclass
def isinteger(value):
return isinstance(value, integer_types)
def isfloatclass(cls):
return issubclass(cls, float)
isfloattype = isfloatclass
def isfloat(value):
return isinstance(value, float)
def iscomplexclass(cls):
return issubclass(cls, complex)
iscomplextype = iscomplexclass
def iscomplex(value):
return isinstance(value, complex)
def isnumberclass(cls):
return issubclass(cls, number_types)
isnumbertype = isnumberclass
def isnumber(value):
return isinstance(value, number_types)
def isstringclass(cls):
return issubclass(cls, string_types)
isstringtype = isstringclass
def isstring(value):
return isinstance(value, string_types)
def istupleclass(cls):
return issubclass(cls, tuple)
istupletype = istupleclass
def istuple(value):
return isinstance(value, tuple)
def islistclass(cls):
return issubclass(cls, list_types)
islisttype = islistclass
def islist(value):
return isinstance(value, list_types)
def issetclass(cls):
return issubclass(cls, set)
issettype = issetclass
def isset(value):
return isinstance(value, set)
def isdictclass(cls):
return issubclass(cls, dict_types)
isdicttype = isdictclass
def isdict(value):
return isinstance(value, dict_types)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.