repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
superchilli/webapp | venv/lib/python2.7/site-packages/requests/packages/charade/compat.py | 2943 | 1157 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
| mit |
mmuellner/vigra | docsrc/post.py | 11 | 5960 | #!/usr/bin/env python
import re
import glob
import sys
if len(sys.argv) != 3:
print 'usage: python post.py directory versionNumber'
sys.exit(1)
path = str(sys.argv[1])
insertVersion = re.compile(r'VERSION_VERSION_VERSION')
insertSTLLink = re.compile(r'WWW_STL_DOCU')
# tested with doxygen 1.7.5.1
hasAnchorDetails = re.compile(r'<a (class="anchor" |)name="_?details"( id="details"|)>')
detailsHeading1 = re.compile(r'''<a name="_details"></a><h2>Detailed Description</h2>
<h3>''')
detailsHeading2 = re.compile(r'<a name="_details"></a><h2>Detailed Description</h2>')
# tested with doxygen 1.7.5.1 and 1.8.2
detailsHeading3 = re.compile(r'<a name="details" id="details"></a><h2( class="groupheader"|)>Detailed Description</h2>')
mainHeading1 = re.compile(r'''(<!-- Generated by Doxygen \d+\.\d+\.\d+ -->)
( <div class="navpath">.*
</div>
|)<div class="contents">
<h1>(.*)( Class Template Reference| Struct Template Reference| Class Reference| Struct Reference)(<br>
<small>
.*</small>
|)</h1>''')
mainHeading2 = re.compile(r'''(<!-- Generated by Doxygen \d+\.\d+\.\d+ -->)
( <div class="navpath">.*
</div>
<div class="contents">
|<div class="contents">
|)<h1>(.*)()(<br>
<small>
.*</small>
|)</h1>''')
# tested with doxygen 1.5.6
mainHeading3 = re.compile(r'''(<!-- Generated by Doxygen \d+\.\d+\.\d+ -->)
(<div class="header">
<div class="headertitle">
)<h1>(.*)</h1>(.*)()
</div>
<div class="contents">''')
# tested with doxygen 1.7.5.1 and 1.7.6.1
mainHeading4 = re.compile(r'''(<!-- Generated by Doxygen .+ -->
</div>)
(<div class="header">
<div class="headertitle">
)<div class="title">(.*)</div> </div>(.*)()
</div>(?:<!--header-->)?
<div class="contents">''')
# tested with doxygen 1.8.2
mainHeading5 = re.compile(r'''(<!-- Generated by Doxygen .+ -->
</div><!-- top -->)
(<div class="header">
<div class="headertitle">
)<div class="title">(.*)</div> </div>(.*)()
</div>(?:<!--header-->)?
<div class="contents">''')
mainHeadingReplacement = '''\\1
<div class="contents">
<table class="main_heading">
<tr>
%s<td width="100%%">\\3\\5
</td>
<td align=right><a href="http://hci.iwr.uni-heidelberg.de/vigra/"><IMG border=0 ALT="VIGRA" SRC="documents/vigra.gif" title="VIGRA Homepage"></a></td></tr>
</table><p>
'''
# tested with doxygen 1.7.5.1
headingSummary = re.compile(r'''(<!-- Generated by Doxygen .+ -->
</div>
<div class="header">)
<div class="summary">
(?s).*?</div>''')
# tested with doxygen 1.8.2
headingSummary2 = re.compile(r'''(<!-- Generated by Doxygen .+ -->
</div><!-- top -->
<div class="header">)
<div class="summary">
(?s).*?</div>''')
# tested with doxygen 1.7.5.1
headingNavpath = re.compile(r'''(<!-- Generated by Doxygen .+ -->)
<div id="nav-path" class="navpath">(?s).*?</div>''')
# tested with doxygen 1.8.2
headingNavpath2 = re.compile(r'''(<!-- Generated by Doxygen .+ -->)
<div id="nav-path" class="navpath">
<ul>
.* </ul>
</div>''')
detailsLink = '''<td align=left>
<A HREF ="#_details" ><IMG BORDER=0 ALT="details" title="Detailed Description" SRC="documents/pfeilGross.gif"></A>
</td>
'''
indexPageHeading = re.compile(r'''((?:<p>)?<a class="anchor" (?:name|id)="_details"></a> (?:</p>\n)?<center> </center>)<h2>(<a class="anchor" (?:name|id)="Main">(?:</a>)?)
(.*)
<center> Version''')
indexPageHeadingReplacement = '''\\1 <h2 class="details_section">\\2
\\3
<center> Version'''
templateDeclaration = re.compile('''<tr><td class="memTemplParams" nowrap colspan="2">([^<]*)</td></tr>\s*
<tr><td class="memTemplItemLeft" nowrap align="right" valign="top">[^<]*</td><td class="memTemplItemRight" valign="bottom"><a class="el" href=".*#([^"]+)">''')
templateDocumentation = '''(<a class="anchor" name="%s"></a>.*
<div class="memitem">
<div class="memproto">\s*
<table class="memname">
<tr>)'''
templateDocumentationReplacement = '''\\1
<td colspan="4" class="memtemplate">%s</td></tr><tr>'''
def convertHeadings(text):
if hasAnchorDetails.search(text):
text = detailsHeading1.sub('<a name="_details"></a><h2 class="details_section">Detailed Description</h2>\n<h3 class="details_section">', \
text, 1)
text = detailsHeading2.sub(r'<a name="_details"></a><h2 class="details_section">Detailed Description</h2>', text, 1)
text = detailsHeading3.sub(r'<a name="_details" id="details"></a><h2 class="details_section">Detailed Description</h2>', text, 1)
mhr = mainHeadingReplacement % detailsLink
else:
mhr = mainHeadingReplacement % ''
text = headingNavpath.sub("\\1", text, 1)
text = headingNavpath2.sub("\\1", text, 1)
text = headingSummary.sub("\\1", text, 1)
text = headingSummary2.sub("\\1", text, 1)
text = mainHeading1.sub(mhr, text, 1)
text = mainHeading2.sub(mhr, text, 1)
text = mainHeading3.sub(mhr, text, 1)
text = mainHeading4.sub(mhr, text, 1)
text = mainHeading5.sub(mhr, text, 1)
return text
def insertMissingTemplateDeclarations(text):
matches = templateDeclaration.findall(text)
for k in matches:
text = re.sub(templateDocumentation % k[1], templateDocumentationReplacement % k[0], text)
return text
def processFile(fileName):
print fileName # log message
f = open(fileName)
text = f.read()
f.close()
text = insertVersion.sub(sys.argv[2], text)
text = insertSTLLink.sub(r'http://www.sgi.com/tech/stl/', text)
if re.search('.*/index.html', fileName) or re.search('.*\\index.html', fileName):
text = re.sub(r'<h3 (align="center"|class="version")>\d+\.\d+\.\d+ </h3>', '', text)
text = indexPageHeading.sub(indexPageHeadingReplacement, text)
text = convertHeadings(text)
text = insertMissingTemplateDeclarations(text)
f = open(fileName, 'w+')
f.write(text)
f.close()
files = glob.glob(path + '/*.html') # use given path to files
#files = glob.glob(path + '/index.html')
for file in files:
processFile(file)
| mit |
Supermem/ibis | ibis/compat.py | 6 | 1557 | # Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
import itertools
import numpy as np
import sys
import six
from six import BytesIO, StringIO, string_types as py_string
PY26 = sys.version_info[0] == 2 and sys.version_info[1] == 6
PY3 = (sys.version_info[0] >= 3)
PY2 = sys.version_info[0] == 2
if PY26:
import unittest2 as unittest
else:
import unittest
if PY3:
import pickle
unicode_type = str
def lzip(*x):
return list(zip(*x))
zip = zip
pickle_dump = pickle.dumps
pickle_load = pickle.loads
def dict_values(x):
return list(x.values())
from decimal import Decimal
else:
import cPickle
try:
from cdecimal import Decimal
except ImportError:
from decimal import Decimal
unicode_type = unicode
lzip = zip
zip = itertools.izip
from ibis.cloudpickle import dumps as pickle_dump
pickle_load = cPickle.loads
def dict_values(x):
return x.values()
integer_types = six.integer_types + (np.integer,)
| apache-2.0 |
40223209/test1 | static/Brython3.1.3-20150514-095342/Lib/_weakrefset.py | 766 | 5570 | # Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard:
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet:
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
yield item
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
def difference(self, other):
newset = self.copy()
newset.difference_update(other)
return newset
__sub__ = difference
def difference_update(self, other):
self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__le__ = issubset
def __lt__(self, other):
return self.data < set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__ge__ = issuperset
def __gt__(self, other):
return self.data > set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
newset = self.copy()
newset.symmetric_difference_update(other)
return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
| agpl-3.0 |
schuza/odin-master | src/main/python/compileall.py | 251 | 5283 | """Module/script to "compile" all .py files to .pyc (or .pyo) file.
When called as a script with arguments, this compiles the directories
given as arguments recursively; the -l option prevents it from
recursing into directories.
Without arguments, if compiles all modules on sys.path, without
recursing into subdirectories. (Even though it should do so for
packages -- for now, you'll have to deal with packages separately.)
See module py_compile for details of the actual byte-compilation.
"""
import os
import sys
import py_compile
__all__ = ["compile_dir","compile_path"]
def compile_dir(dir, maxlevels=10, ddir=None,
force=0, rx=None, quiet=0):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
maxlevels: maximum recursion level (default 10)
ddir: if given, purported directory name (this is the
directory name that will show up in error messages)
force: if 1, force compilation, even if timestamps are up-to-date
quiet: if 1, be quiet during compilation
"""
if not quiet:
print 'Listing', dir, '...'
try:
names = os.listdir(dir)
except os.error:
print "Can't list", dir
names = []
names.sort()
success = 1
for name in names:
fullname = os.path.join(dir, name)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if rx is not None:
mo = rx.search(fullname)
if mo:
continue
if os.path.isfile(fullname):
head, tail = name[:-3], name[-3:]
if tail == '.py':
cfile = fullname + (__debug__ and 'c' or 'o')
ftime = os.stat(fullname).st_mtime
try: ctime = os.stat(cfile).st_mtime
except os.error: ctime = 0
if (ctime > ftime) and not force: continue
if not quiet:
print 'Compiling', fullname, '...'
try:
ok = py_compile.compile(fullname, None, dfile, True)
except KeyboardInterrupt:
raise KeyboardInterrupt
except py_compile.PyCompileError,err:
if quiet:
print 'Compiling', fullname, '...'
print err.msg
success = 0
except IOError, e:
print "Sorry", e
success = 0
else:
if ok == 0:
success = 0
elif maxlevels > 0 and \
name != os.curdir and name != os.pardir and \
os.path.isdir(fullname) and \
not os.path.islink(fullname):
if not compile_dir(fullname, maxlevels - 1, dfile, force, rx, quiet):
success = 0
return success
def compile_path(skip_curdir=1, maxlevels=0, force=0, quiet=0):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default true)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default 0)
quiet: as for compile_dir() (default 0)
"""
success = 1
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
print 'Skipping current directory'
else:
success = success and compile_dir(dir, maxlevels, None,
force, quiet=quiet)
return success
def main():
"""Script main program."""
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'lfqd:x:')
except getopt.error, msg:
print msg
print "usage: python compileall.py [-l] [-f] [-q] [-d destdir] " \
"[-x regexp] [directory ...]"
print "-l: don't recurse down"
print "-f: force rebuild even if timestamps are up-to-date"
print "-q: quiet operation"
print "-d destdir: purported directory name for error messages"
print " if no directory arguments, -l sys.path is assumed"
print "-x regexp: skip files matching the regular expression regexp"
print " the regexp is search for in the full path of the file"
sys.exit(2)
maxlevels = 10
ddir = None
force = 0
quiet = 0
rx = None
for o, a in opts:
if o == '-l': maxlevels = 0
if o == '-d': ddir = a
if o == '-f': force = 1
if o == '-q': quiet = 1
if o == '-x':
import re
rx = re.compile(a)
if ddir:
if len(args) != 1:
print "-d destdir require exactly one directory argument"
sys.exit(2)
success = 1
try:
if args:
for dir in args:
if not compile_dir(dir, maxlevels, ddir,
force, rx, quiet):
success = 0
else:
success = compile_path()
except KeyboardInterrupt:
print "\n[interrupt]"
success = 0
return success
if __name__ == '__main__':
exit_status = int(not main())
sys.exit(exit_status)
| apache-2.0 |
teobaluta/qr-linux-kernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
zvolsky/hemtrans | languages/zh.py | 152 | 10080 | # coding: utf8
{
'!langcode!': 'zh-tw',
'!langname!': '中文',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"更新" 是選擇性的條件式, 格式就像 "欄位1=\'值\'". 但是 JOIN 的資料不可以使用 update 或是 delete"',
'%s %%{row} deleted': '已刪除 %s 筆',
'%s %%{row} updated': '已更新 %s 筆',
'%s selected': '%s 已選擇',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(something like "it-it")': '(格式類似 "zh-tw")',
'A new version of web2py is available': '新版的 web2py 已發行',
'A new version of web2py is available: %s': '新版的 web2py 已發行: %s',
'about': '關於',
'About': '關於',
'About application': '關於本應用程式',
'Access Control': 'Access Control',
'Admin is disabled because insecure channel': '管理功能(Admin)在不安全連線環境下自動關閉',
'Admin is disabled because unsecure channel': '管理功能(Admin)在不安全連線環境下自動關閉',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': '點此處進入管理介面',
'Administrator Password:': '管理員密碼:',
'Ajax Recipes': 'Ajax Recipes',
'appadmin is disabled because insecure channel': '因為來自非安全通道,管理介面關閉',
'Are you sure you want to delete file "%s"?': '確定要刪除檔案"%s"?',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Are you sure you want to uninstall application "%s"': '確定要移除應用程式 "%s"',
'Are you sure you want to uninstall application "%s"?': '確定要移除應用程式 "%s"',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': '注意: 登入管理帳號需要安全連線(HTTPS)或是在本機連線(localhost).',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': '注意: 因為在測試模式不保證多執行緒安全性,也就是說不可以同時執行多個測試案例',
'ATTENTION: you cannot edit the running application!': '注意:不可編輯正在執行的應用程式!',
'Authentication': '驗證',
'Available Databases and Tables': '可提供的資料庫和資料表',
'Buy this book': 'Buy this book',
'cache': '快取記憶體',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': '不可空白',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': '無法編譯:應用程式中含有錯誤,請除錯後再試一次.',
'Change Password': '變更密碼',
'change password': '變更密碼',
'Check to delete': '打勾代表刪除',
'Check to delete:': '點選以示刪除:',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': '客戶端網址(IP)',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': '控件',
'Controllers': '控件',
'Copyright': '版權所有',
'Create new application': '創建應用程式',
'Current request': '目前網路資料要求(request)',
'Current response': '目前網路資料回應(response)',
'Current session': '目前網路連線資訊(session)',
'customize me!': '請調整我!',
'data uploaded': '資料已上傳',
'Database': '資料庫',
'Database %s select': '已選擇 %s 資料庫',
'Date and Time': '日期和時間',
'db': 'db',
'DB Model': '資料庫模組',
'Delete': '刪除',
'Delete:': '刪除:',
'Demo': 'Demo',
'Deploy on Google App Engine': '配置到 Google App Engine',
'Deployment Recipes': 'Deployment Recipes',
'Description': '描述',
'DESIGN': '設計',
'design': '設計',
'Design for': '設計為了',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': '完成!',
'Download': 'Download',
'E-mail': '電子郵件',
'EDIT': '編輯',
'Edit': '編輯',
'Edit application': '編輯應用程式',
'Edit current record': '編輯當前紀錄',
'edit profile': '編輯設定檔',
'Edit Profile': '編輯設定檔',
'Edit This App': '編輯本應用程式',
'Editing file': '編輯檔案',
'Editing file "%s"': '編輯檔案"%s"',
'Email and SMS': 'Email and SMS',
'Error logs for "%(app)s"': '"%(app)s"的錯誤紀錄',
'Errors': 'Errors',
'export as csv file': '以逗號分隔檔(csv)格式匯出',
'FAQ': 'FAQ',
'First name': '名',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Functions with no doctests will result in [passed] tests.': '沒有 doctests 的函式會顯示 [passed].',
'Group ID': '群組編號',
'Groups': 'Groups',
'Hello World': '嗨! 世界',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': '匯入/匯出',
'Index': '索引',
'insert new': '插入新資料',
'insert new %s': '插入新資料 %s',
'Installed applications': '已安裝應用程式',
'Internal State': '內部狀態',
'Introduction': 'Introduction',
'Invalid action': '不合法的動作(action)',
'Invalid email': '不合法的電子郵件',
'Invalid Query': '不合法的查詢',
'invalid request': '不合法的網路要求(request)',
'Key': 'Key',
'Language files (static strings) updated': '語言檔已更新',
'Languages': '各國語言',
'Last name': '姓',
'Last saved on:': '最後儲存時間:',
'Layout': '網頁配置',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'License for': '軟體版權為',
'Live Chat': 'Live Chat',
'login': '登入',
'Login': '登入',
'Login to the Administrative Interface': '登入到管理員介面',
'logout': '登出',
'Logout': '登出',
'Lost Password': '密碼遺忘',
'Main Menu': '主選單',
'Manage Cache': 'Manage Cache',
'Menu Model': '選單模組(menu)',
'Models': '資料模組',
'Modules': '程式模組',
'My Sites': 'My Sites',
'Name': '名字',
'New Record': '新紀錄',
'new record inserted': '已插入新紀錄',
'next 100 rows': '往後 100 筆',
'NO': '否',
'No databases in this application': '這應用程式不含資料庫',
'Online examples': '點此處進入線上範例',
'or import from csv file': '或是從逗號分隔檔(CSV)匯入',
'Origin': '原文',
'Original/Translation': '原文/翻譯',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': '密碼',
"Password fields don't match": '密碼欄不匹配',
'Peeking at file': '選擇檔案',
'Plugins': 'Plugins',
'Powered by': '基於以下技術構建:',
'Preface': 'Preface',
'previous 100 rows': '往前 100 筆',
'Python': 'Python',
'Query:': '查詢:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': '紀錄',
'record does not exist': '紀錄不存在',
'Record ID': '紀錄編號',
'Record id': '紀錄編號',
'Register': '註冊',
'register': '註冊',
'Registration key': '註冊金鑰',
'Remember me (for 30 days)': '記住我(30 天)',
'Reset Password key': '重設密碼',
'Resolve Conflict file': '解決衝突檔案',
'Role': '角色',
'Rows in Table': '在資料表裏的資料',
'Rows selected': '筆資料被選擇',
'Saved file hash:': '檔案雜湊值已紀錄:',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': '狀態',
'Static files': '靜態檔案',
'Statistics': 'Statistics',
'Stylesheet': '網頁風格檔',
'submit': 'submit',
'Submit': '傳送',
'Support': 'Support',
'Sure you want to delete this object?': '確定要刪除此物件?',
'Table': '資料表',
'Table name': '資料表名稱',
'Testing application': '測試中的應用程式',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"查詢"是一個像 "db.表1.欄位1==\'值\'" 的條件式. 以"db.表1.欄位1==db.表2.欄位2"方式則相當於執行 JOIN SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'There are no controllers': '沒有控件(controllers)',
'There are no models': '沒有資料庫模組(models)',
'There are no modules': '沒有程式模組(modules)',
'There are no static files': '沒有靜態檔案',
'There are no translators, only default language is supported': '沒有翻譯檔,只支援原始語言',
'There are no views': '沒有視圖',
'This App': 'This App',
'This is the %(filename)s template': '這是%(filename)s檔案的樣板(template)',
'Ticket': '問題單',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': '時間標記',
'Twitter': 'Twitter',
'Unable to check for upgrades': '無法做升級檢查',
'Unable to download': '無法下載',
'Unable to download app': '無法下載應用程式',
'unable to parse csv file': '無法解析逗號分隔檔(csv)',
'Update:': '更新:',
'Upload existing application': '更新存在的應用程式',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '使用下列方式來組合更複雜的條件式, (...)&(...) 代表同時存在的條件, (...)|(...) 代表擇一的條件, ~(...)則代表反向條件.',
'User %(id)s Logged-in': '使用者 %(id)s 已登入',
'User %(id)s Registered': '使用者 %(id)s 已註冊',
'User ID': '使用者編號',
'Verify Password': '驗證密碼',
'Videos': 'Videos',
'View': '視圖',
'Views': '視圖',
'Welcome %s': '歡迎 %s',
'Welcome to web2py': '歡迎使用 web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'YES': '是',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
| agpl-3.0 |
rlutz/xorn | src/gaf/netlist/slib.py | 1 | 1934 | # gaf.netlist - gEDA Netlist Extraction and Generation
# Copyright (C) 1998-2010 Ales Hvezda
# Copyright (C) 1998-2010 gEDA Contributors (see ChangeLog for details)
# Copyright (C) 2013-2019 Roland Lutz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
## \namespace gaf.netlist.slib
## Source library.
#
# slib stands for source (project/schematic/hdl/model source) library
import os
## Search path.
slib = []
## Search the source library for a particular file name substring.
#
# Search the source library for a directory, starting with the last
# one, which contains a file whose filename contains the substring \a
# basename.
#
# \returns the found directory plus \a basename, or \c None if \a
# basename was not found in the source library
#
# If, for example, the directory \c path/to is in the source library
# and contains the file \c myschematic.sch, then a search for \c
# "schematic" would return \c "path/to/schematic" [sic].
def s_slib_search_single(basename):
# search slib paths backwards
for i, dir_name in reversed(list(enumerate(slib))):
for d_name in os.listdir(dir_name):
# Do a substring comp for a match
if basename in d_name:
return os.path.join(dir_name, basename)
return None
| gpl-2.0 |
arnoutaertgeerts/zerorpc-python | zerorpc/channel.py | 55 | 9259 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gevent.pool
import gevent.queue
import gevent.event
import gevent.local
import gevent.lock
from .exceptions import TimeoutExpired
from logging import getLogger
logger = getLogger(__name__)
class ChannelMultiplexer(object):
def __init__(self, events, ignore_broadcast=False):
self._events = events
self._active_channels = {}
self._channel_dispatcher_task = None
self._broadcast_queue = None
if events.recv_is_available and not ignore_broadcast:
self._broadcast_queue = gevent.queue.Queue(maxsize=1)
self._channel_dispatcher_task = gevent.spawn(
self._channel_dispatcher)
@property
def recv_is_available(self):
return self._events.recv_is_available
def __del__(self):
self.close()
def close(self):
if self._channel_dispatcher_task:
self._channel_dispatcher_task.kill()
def create_event(self, name, args, xheader=None):
return self._events.create_event(name, args, xheader)
def emit_event(self, event, identity=None):
return self._events.emit_event(event, identity)
def emit(self, name, args, xheader=None):
return self._events.emit(name, args, xheader)
def recv(self):
if self._broadcast_queue is not None:
event = self._broadcast_queue.get()
else:
event = self._events.recv()
return event
def _channel_dispatcher(self):
while True:
try:
event = self._events.recv()
except Exception as e:
logger.error(
'zerorpc.ChannelMultiplexer, '
'ignoring error on recv: {0}'.format(e))
continue
channel_id = event.header.get('response_to', None)
queue = None
if channel_id is not None:
channel = self._active_channels.get(channel_id, None)
if channel is not None:
queue = channel._queue
elif self._broadcast_queue is not None:
queue = self._broadcast_queue
if queue is None:
logger.error(
'zerorpc.ChannelMultiplexer, '
'unable to route event: {0}'
.format(event.__str__(ignore_args=True)))
else:
queue.put(event)
def channel(self, from_event=None):
if self._channel_dispatcher_task is None:
self._channel_dispatcher_task = gevent.spawn(
self._channel_dispatcher)
return Channel(self, from_event)
@property
def active_channels(self):
return self._active_channels
@property
def context(self):
return self._events.context
class Channel(object):
def __init__(self, multiplexer, from_event=None):
self._multiplexer = multiplexer
self._channel_id = None
self._zmqid = None
self._queue = gevent.queue.Queue(maxsize=1)
if from_event is not None:
self._channel_id = from_event.header['message_id']
self._zmqid = from_event.header.get('zmqid', None)
self._multiplexer._active_channels[self._channel_id] = self
self._queue.put(from_event)
@property
def recv_is_available(self):
return self._multiplexer.recv_is_available
def __del__(self):
self.close()
def close(self):
if self._channel_id is not None:
del self._multiplexer._active_channels[self._channel_id]
self._channel_id = None
def create_event(self, name, args, xheader=None):
event = self._multiplexer.create_event(name, args, xheader)
if self._channel_id is None:
self._channel_id = event.header['message_id']
self._multiplexer._active_channels[self._channel_id] = self
else:
event.header['response_to'] = self._channel_id
return event
def emit(self, name, args, xheader=None):
event = self.create_event(name, args, xheader)
self._multiplexer.emit_event(event, self._zmqid)
def emit_event(self, event):
self._multiplexer.emit_event(event, self._zmqid)
def recv(self, timeout=None):
try:
event = self._queue.get(timeout=timeout)
except gevent.queue.Empty:
raise TimeoutExpired(timeout)
return event
@property
def context(self):
return self._multiplexer.context
class BufferedChannel(object):
def __init__(self, channel, inqueue_size=100):
self._channel = channel
self._input_queue_size = inqueue_size
self._remote_queue_open_slots = 1
self._input_queue_reserved = 1
self._remote_can_recv = gevent.event.Event()
self._input_queue = gevent.queue.Queue()
self._lost_remote = False
self._verbose = False
self._on_close_if = None
self._recv_task = gevent.spawn(self._recver)
@property
def recv_is_available(self):
return self._channel.recv_is_available
@property
def on_close_if(self):
return self._on_close_if
@on_close_if.setter
def on_close_if(self, cb):
self._on_close_if = cb
def __del__(self):
self.close()
def close(self):
if self._recv_task is not None:
self._recv_task.kill()
self._recv_task = None
if self._channel is not None:
self._channel.close()
self._channel = None
def _recver(self):
while True:
event = self._channel.recv()
if event.name == '_zpc_more':
try:
self._remote_queue_open_slots += int(event.args[0])
except Exception as e:
logger.error(
'gevent_zerorpc.BufferedChannel._recver, '
'exception: ' + repr(e))
if self._remote_queue_open_slots > 0:
self._remote_can_recv.set()
elif self._input_queue.qsize() == self._input_queue_size:
raise RuntimeError(
'BufferedChannel, queue overflow on event:', event)
else:
self._input_queue.put(event)
if self._on_close_if is not None and self._on_close_if(event):
self._recv_task = None
self.close()
return
def create_event(self, name, args, xheader=None):
return self._channel.create_event(name, args, xheader)
def emit_event(self, event, block=True, timeout=None):
if self._remote_queue_open_slots == 0:
if not block:
return False
self._remote_can_recv.clear()
self._remote_can_recv.wait(timeout=timeout)
self._remote_queue_open_slots -= 1
try:
self._channel.emit_event(event)
except:
self._remote_queue_open_slots += 1
raise
return True
def emit(self, name, args, xheader=None, block=True, timeout=None):
event = self.create_event(name, args, xheader)
return self.emit_event(event, block, timeout)
def _request_data(self):
open_slots = self._input_queue_size - self._input_queue_reserved
self._input_queue_reserved += open_slots
self._channel.emit('_zpc_more', (open_slots,))
def recv(self, timeout=None):
if self._verbose:
if self._input_queue_reserved < self._input_queue_size / 2:
self._request_data()
else:
self._verbose = True
try:
event = self._input_queue.get(timeout=timeout)
except gevent.queue.Empty:
raise TimeoutExpired(timeout)
self._input_queue_reserved -= 1
return event
@property
def channel(self):
return self._channel
@property
def context(self):
return self._channel.context
| mit |
speef/linux | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
sarantapichos/faircoop-market | openerp/addons/base/tests/test_orm.py | 149 | 18110 | from collections import defaultdict
from openerp.tools import mute_logger
from openerp.tests import common
UID = common.ADMIN_USER_ID
class TestORM(common.TransactionCase):
""" test special behaviors of ORM CRUD functions
TODO: use real Exceptions types instead of Exception """
def setUp(self):
super(TestORM, self).setUp()
cr, uid = self.cr, self.uid
self.partner = self.registry('res.partner')
self.users = self.registry('res.users')
self.p1 = self.partner.name_create(cr, uid, 'W')[0]
self.p2 = self.partner.name_create(cr, uid, 'Y')[0]
self.ir_rule = self.registry('ir.rule')
# sample unprivileged user
employee_gid = self.ref('base.group_user')
self.uid2 = self.users.create(cr, uid, {'name': 'test user', 'login': 'test', 'groups_id': [4,employee_gid]})
@mute_logger('openerp.models')
def testAccessDeletedRecords(self):
""" Verify that accessing deleted records works as expected """
cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
self.partner.unlink(cr, uid, [p1])
# read() is expected to skip deleted records because our API is not
# transactional for a sequence of search()->read() performed from the
# client-side... a concurrent deletion could therefore cause spurious
# exceptions even when simply opening a list view!
# /!\ Using unprileged user to detect former side effects of ir.rules!
self.assertEqual([{'id': p2, 'name': 'Y'}], self.partner.read(cr, uid2, [p1,p2], ['name']), "read() should skip deleted records")
self.assertEqual([], self.partner.read(cr, uid2, [p1], ['name']), "read() should skip deleted records")
# Deleting an already deleted record should be simply ignored
self.assertTrue(self.partner.unlink(cr, uid, [p1]), "Re-deleting should be a no-op")
# Updating an already deleted record should raise, even as admin
with self.assertRaises(Exception):
self.partner.write(cr, uid, [p1], {'name': 'foo'})
@mute_logger('openerp.models')
def testAccessFilteredRecords(self):
""" Verify that accessing filtered records works as expected for non-admin user """
cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
partner_model = self.registry('ir.model').search(cr, uid, [('model','=','res.partner')])[0]
self.ir_rule.create(cr, uid, {'name': 'Y is invisible',
'domain_force': [('id', '!=', p1)],
'model_id': partner_model})
# search as unprivileged user
partners = self.partner.search(cr, uid2, [])
self.assertFalse(p1 in partners, "W should not be visible...")
self.assertTrue(p2 in partners, "... but Y should be visible")
# read as unprivileged user
with self.assertRaises(Exception):
self.partner.read(cr, uid2, [p1], ['name'])
# write as unprivileged user
with self.assertRaises(Exception):
self.partner.write(cr, uid2, [p1], {'name': 'foo'})
# unlink as unprivileged user
with self.assertRaises(Exception):
self.partner.unlink(cr, uid2, [p1])
# Prepare mixed case
self.partner.unlink(cr, uid, [p2])
# read mixed records: some deleted and some filtered
with self.assertRaises(Exception):
self.partner.read(cr, uid2, [p1,p2], ['name'])
# delete mixed records: some deleted and some filtered
with self.assertRaises(Exception):
self.partner.unlink(cr, uid2, [p1,p2])
def test_multi_read(self):
record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
records = self.partner.read(self.cr, UID, [record_id])
self.assertIsInstance(records, list)
def test_one_read(self):
record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
record = self.partner.read(self.cr, UID, record_id)
self.assertIsInstance(record, dict)
@mute_logger('openerp.models')
def test_search_read(self):
# simple search_read
self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
found = self.partner.search_read(self.cr, UID, [['name', '=', 'MyPartner1']], ['name'])
self.assertEqual(len(found), 1)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertTrue('id' in found[0])
# search_read correct order
self.partner.create(self.cr, UID, {'name': 'MyPartner2'})
found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertEqual(found[1]['name'], 'MyPartner2')
found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name desc")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner2')
self.assertEqual(found[1]['name'], 'MyPartner1')
# search_read that finds nothing
found = self.partner.search_read(self.cr, UID, [['name', '=', 'Does not exists']], ['name'])
self.assertEqual(len(found), 0)
def test_exists(self):
partner = self.partner.browse(self.cr, UID, [])
# check that records obtained from search exist
recs = partner.search([])
self.assertTrue(recs)
self.assertEqual(recs.exists(), recs)
# check that there is no record with id 0
recs = partner.browse([0])
self.assertFalse(recs.exists())
def test_groupby_date(self):
partners = dict(
A='2012-11-19',
B='2012-12-17',
C='2012-12-31',
D='2013-01-07',
E='2013-01-14',
F='2013-01-28',
G='2013-02-11',
)
all_partners = []
partners_by_day = defaultdict(set)
partners_by_month = defaultdict(set)
partners_by_year = defaultdict(set)
for name, date in partners.items():
p = self.partner.create(self.cr, UID, dict(name=name, date=date))
all_partners.append(p)
partners_by_day[date].add(p)
partners_by_month[date.rsplit('-', 1)[0]].add(p)
partners_by_year[date.split('-', 1)[0]].add(p)
def read_group(interval, domain=None):
main_domain = [('id', 'in', all_partners)]
if domain:
domain = ['&'] + main_domain + domain
else:
domain = main_domain
rg = self.partner.read_group(self.cr, self.uid, domain, ['date'], 'date' + ':' + interval)
result = {}
for r in rg:
result[r['date:' + interval]] = set(self.partner.search(self.cr, self.uid, r['__domain']))
return result
self.assertEqual(len(read_group('day')), len(partners_by_day))
self.assertEqual(len(read_group('month')), len(partners_by_month))
self.assertEqual(len(read_group('year')), len(partners_by_year))
rg = self.partner.read_group(self.cr, self.uid, [('id', 'in', all_partners)],
['date'], ['date:month', 'date:day'], lazy=False)
self.assertEqual(len(rg), len(all_partners))
class TestInherits(common.TransactionCase):
""" test the behavior of the orm for models that use _inherits;
specifically: res.users, that inherits from res.partner
"""
def setUp(self):
super(TestInherits, self).setUp()
self.partner = self.registry('res.partner')
self.user = self.registry('res.users')
def test_default(self):
""" `default_get` cannot return a dictionary or a new id """
defaults = self.user.default_get(self.cr, UID, ['partner_id'])
if 'partner_id' in defaults:
self.assertIsInstance(defaults['partner_id'], (bool, int, long))
def test_create(self):
""" creating a user should automatically create a new partner """
partners_before = self.partner.search(self.cr, UID, [])
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo = self.user.browse(self.cr, UID, foo_id)
self.assertNotIn(foo.partner_id.id, partners_before)
def test_create_with_ancestor(self):
""" creating a user with a specific 'partner_id' should not create a new partner """
par_id = self.partner.create(self.cr, UID, {'name': 'Foo'})
partners_before = self.partner.search(self.cr, UID, [])
foo_id = self.user.create(self.cr, UID, {'partner_id': par_id, 'login': 'foo', 'password': 'foo'})
partners_after = self.partner.search(self.cr, UID, [])
self.assertEqual(set(partners_before), set(partners_after))
foo = self.user.browse(self.cr, UID, foo_id)
self.assertEqual(foo.name, 'Foo')
self.assertEqual(foo.partner_id.id, par_id)
@mute_logger('openerp.models')
def test_read(self):
""" inherited fields should be read without any indirection """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo_values, = self.user.read(self.cr, UID, [foo_id])
partner_id = foo_values['partner_id'][0]
partner_values, = self.partner.read(self.cr, UID, [partner_id])
self.assertEqual(foo_values['name'], partner_values['name'])
foo = self.user.browse(self.cr, UID, foo_id)
self.assertEqual(foo.name, foo.partner_id.name)
@mute_logger('openerp.models')
def test_copy(self):
""" copying a user should automatically copy its partner, too """
foo_id = self.user.create(self.cr, UID, {
'name': 'Foo',
'login': 'foo',
'password': 'foo',
'supplier': True,
})
foo_before, = self.user.read(self.cr, UID, [foo_id])
del foo_before['__last_update']
bar_id = self.user.copy(self.cr, UID, foo_id, {
'login': 'bar',
'password': 'bar',
})
foo_after, = self.user.read(self.cr, UID, [foo_id])
del foo_after['__last_update']
self.assertEqual(foo_before, foo_after)
foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
self.assertEqual(bar.name, 'Foo (copy)')
self.assertEqual(bar.login, 'bar')
self.assertEqual(foo.supplier, bar.supplier)
self.assertNotEqual(foo.id, bar.id)
self.assertNotEqual(foo.partner_id.id, bar.partner_id.id)
@mute_logger('openerp.models')
def test_copy_with_ancestor(self):
""" copying a user with 'parent_id' in defaults should not duplicate the partner """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo',
'login_date': '2016-01-01', 'signature': 'XXX'})
par_id = self.partner.create(self.cr, UID, {'name': 'Bar'})
foo_before, = self.user.read(self.cr, UID, [foo_id])
del foo_before['__last_update']
partners_before = self.partner.search(self.cr, UID, [])
bar_id = self.user.copy(self.cr, UID, foo_id, {'partner_id': par_id, 'login': 'bar'})
foo_after, = self.user.read(self.cr, UID, [foo_id])
del foo_after['__last_update']
partners_after = self.partner.search(self.cr, UID, [])
self.assertEqual(foo_before, foo_after)
self.assertEqual(set(partners_before), set(partners_after))
foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
self.assertNotEqual(foo.id, bar.id)
self.assertEqual(bar.partner_id.id, par_id)
self.assertEqual(bar.login, 'bar', "login is given from copy parameters")
self.assertFalse(bar.login_date, "login_date should not be copied from original record")
self.assertEqual(bar.name, 'Bar', "name is given from specific partner")
self.assertEqual(bar.signature, foo.signature, "signature should be copied")
CREATE = lambda values: (0, False, values)
UPDATE = lambda id, values: (1, id, values)
DELETE = lambda id: (2, id, False)
FORGET = lambda id: (3, id, False)
LINK_TO = lambda id: (4, id, False)
DELETE_ALL = lambda: (5, False, False)
REPLACE_WITH = lambda ids: (6, False, ids)
def sorted_by_id(list_of_dicts):
"sort dictionaries by their 'id' field; useful for comparisons"
return sorted(list_of_dicts, key=lambda d: d.get('id'))
class TestO2MSerialization(common.TransactionCase):
""" test the orm method 'write' on one2many fields """
def setUp(self):
super(TestO2MSerialization, self).setUp()
self.partner = self.registry('res.partner')
def test_no_command(self):
" empty list of commands yields an empty list of records "
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [])
self.assertEqual(results, [])
def test_CREATE_commands(self):
" returns the VALUES dict as-is "
values = [{'foo': 'bar'}, {'foo': 'baz'}, {'foo': 'baq'}]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', map(CREATE, values))
self.assertEqual(results, values)
def test_LINK_TO_command(self):
" reads the records from the database, records are returned with their ids. "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = map(LINK_TO, ids)
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_bare_ids_command(self):
" same as the equivalent LINK_TO commands "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', ids, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_UPDATE_command(self):
" take the in-db records and merge the provided information in "
id_foo = self.partner.create(self.cr, UID, {'name': 'foo'})
id_bar = self.partner.create(self.cr, UID, {'name': 'bar'})
id_baz = self.partner.create(self.cr, UID, {'name': 'baz', 'city': 'tag'})
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [
LINK_TO(id_foo),
UPDATE(id_bar, {'name': 'qux', 'city': 'tagtag'}),
UPDATE(id_baz, {'name': 'quux'})
], ['name', 'city'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': id_foo, 'name': 'foo', 'city': False},
{'id': id_bar, 'name': 'qux', 'city': 'tagtag'},
{'id': id_baz, 'name': 'quux', 'city': 'tag'}
]))
def test_DELETE_command(self):
" deleted records are not returned at all. "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = [DELETE(ids[0]), DELETE(ids[1]), DELETE(ids[2])]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(results, [])
def test_mixed_commands(self):
ids = [
self.partner.create(self.cr, UID, {'name': name})
for name in ['NObar', 'baz', 'qux', 'NOquux', 'NOcorge', 'garply']
]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [
CREATE({'name': 'foo'}),
UPDATE(ids[0], {'name': 'bar'}),
LINK_TO(ids[1]),
DELETE(ids[2]),
UPDATE(ids[3], {'name': 'quux',}),
UPDATE(ids[4], {'name': 'corge'}),
CREATE({'name': 'grault'}),
LINK_TO(ids[5])
], ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'name': 'foo'},
{'id': ids[0], 'name': 'bar'},
{'id': ids[1], 'name': 'baz'},
{'id': ids[3], 'name': 'quux'},
{'id': ids[4], 'name': 'corge'},
{'name': 'grault'},
{'id': ids[5], 'name': 'garply'}
]))
def test_LINK_TO_pairs(self):
"LINK_TO commands can be written as pairs, instead of triplets"
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = map(lambda id: (4, id), ids)
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_singleton_commands(self):
"DELETE_ALL can appear as a singleton"
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [DELETE_ALL()], ['name'])
self.assertEqual(results, [])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tboyce021/home-assistant | tests/components/rfxtrx/conftest.py | 5 | 2291 | """Common test tools."""
from datetime import timedelta
import pytest
from homeassistant.components import rfxtrx
from homeassistant.components.rfxtrx import DOMAIN
from homeassistant.util.dt import utcnow
from tests.async_mock import patch
from tests.common import MockConfigEntry, async_fire_time_changed
from tests.components.light.conftest import mock_light_profiles # noqa
def create_rfx_test_cfg(device="abcd", automatic_add=False, devices=None):
"""Create rfxtrx config entry data."""
return {
"device": device,
"host": None,
"port": None,
"automatic_add": automatic_add,
"debug": False,
"devices": devices,
}
@pytest.fixture(autouse=True, name="rfxtrx")
async def rfxtrx_fixture(hass):
"""Fixture that cleans up threads from integration."""
with patch("RFXtrx.Connect") as connect, patch("RFXtrx.DummyTransport2"):
rfx = connect.return_value
async def _signal_event(packet_id):
event = rfxtrx.get_rfx_object(packet_id)
await hass.async_add_executor_job(
rfx.event_callback,
event,
)
await hass.async_block_till_done()
await hass.async_block_till_done()
return event
rfx.signal = _signal_event
yield rfx
@pytest.fixture(name="rfxtrx_automatic")
async def rfxtrx_automatic_fixture(hass, rfxtrx):
"""Fixture that starts up with automatic additions."""
entry_data = create_rfx_test_cfg(automatic_add=True, devices={})
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
yield rfxtrx
@pytest.fixture
async def timestep(hass):
"""Step system time forward."""
with patch("homeassistant.core.dt_util.utcnow") as mock_utcnow:
mock_utcnow.return_value = utcnow()
async def delay(seconds):
"""Trigger delay in system."""
mock_utcnow.return_value += timedelta(seconds=seconds)
async_fire_time_changed(hass, mock_utcnow.return_value)
await hass.async_block_till_done()
yield delay
| apache-2.0 |
nextgis-extra/tests | lib_gdal/utilities/test_gdal_grid.py | 1 | 36399 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id: test_gdal_grid.py 32170 2015-12-13 19:59:59Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: gdal_grid testing
# Author: Even Rouault <even dot rouault @ mines-paris dot org>
#
###############################################################################
# Copyright (c) 2008-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
import os
import struct
sys.path.append( '../pymod' )
sys.path.append( '../gcore' )
from osgeo import gdal
from osgeo import ogr
import gdaltest
import test_cli_utilities
# List of output TIFF files that will be created by tests and later deleted
# in test_gdal_grid_cleanup()
outfiles = []
# Path to gdal_grid utility executable
gdal_grid = test_cli_utilities.get_gdal_grid_path()
###############################################################################
#
def test_gdal_grid_1():
if gdal_grid is None:
return 'skip'
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
outfiles.append('tmp/n43.tif')
try:
os.remove('tmp/n43.shp')
except:
pass
try:
os.remove('tmp/n43.dbf')
except:
pass
try:
os.remove('tmp/n43.shx')
except:
pass
try:
os.remove('tmp/n43.qix')
except:
pass
# Create an OGR grid from the values of n43.dt0
ds = gdal.Open('../gdrivers/data/n43.dt0')
geotransform = ds.GetGeoTransform()
shape_drv = ogr.GetDriverByName('ESRI Shapefile')
shape_ds = shape_drv.CreateDataSource( 'tmp' )
shape_lyr = shape_ds.CreateLayer( 'n43' )
data = ds.ReadRaster(0, 0, 121, 121)
array_val = struct.unpack('h' * 121*121, data)
for j in range(121):
for i in range(121):
wkt = 'POINT(%f %f %s)' % ( geotransform[0] + (i + .5) * geotransform[1],
geotransform[3] + (j + .5) * geotransform[5],
array_val[j * 121 + i] )
dst_feat = ogr.Feature( feature_def = shape_lyr.GetLayerDefn() )
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt(wkt))
shape_lyr.CreateFeature( dst_feat )
dst_feat.Destroy()
shape_ds.ExecuteSQL('CREATE SPATIAL INDEX ON n43')
shape_ds.Destroy()
# Create a GDAL dataset from the previous generated OGR grid
(out, err) = gdaltest.runexternal_out_and_err(gdal_grid + ' -txe -80.0041667 -78.9958333 -tye 42.9958333 44.0041667 -outsize 121 121 -ot Int16 -a nearest:radius1=0.0:radius2=0.0:angle=0.0 -co TILED=YES -co BLOCKXSIZE=256 -co BLOCKYSIZE=256 tmp/n43.shp ' + outfiles[-1])
if not (err is None or err == '') :
gdaltest.post_reason('got error/warning')
print(err)
return 'fail'
# We should get the same values as in n43.td0
ds2 = gdal.Open(outfiles[-1])
if ds.GetRasterBand(1).Checksum() != ds2.GetRasterBand(1).Checksum():
print('bad checksum : got %d, expected %d' % (ds.GetRasterBand(1).Checksum() , ds2.GetRasterBand(1).Checksum()))
return 'fail'
if ds2.GetRasterBand(1).GetNoDataValue() is not None:
print('did not expect nodata value')
return 'fail'
ds = None
ds2 = None
return 'success'
###############################################################################
# Test Nearest Neighbour gridding algorithm
def test_gdal_grid_2():
if gdal_grid is None:
return 'skip'
# Open reference dataset
ds_ref = gdal.Open('../gcore/data/byte.tif')
checksum_ref = ds_ref.GetRasterBand(1).Checksum()
ds_ref = None
#################
outfiles.append('tmp/grid_near.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# Grid nodes are located exactly in raster nodes.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Byte -l grid -a nearest:radius1=0.0:radius2=0.0:angle=0.0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "gcore/data/byte.tif"
ds = gdal.Open(outfiles[-1])
if ds.GetRasterBand(1).Checksum() != checksum_ref:
gdaltest.post_reason('bad checksum')
print('bad checksum : got %d, expected %d' % \
(ds.GetRasterBand(1).Checksum(), checksum_ref))
return 'fail'
if ds.GetRasterBand(1).GetNoDataValue() != 0.0:
print('expected a nodata value')
return 'fail'
ds = None
#################
outfiles.append('tmp/grid_near_shift.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Now the same, but shift grid nodes a bit in both horizontal and vertical
# directions.
gdaltest.runexternal(gdal_grid + ' -txe 440721.0 441920.0 -tye 3751321.0 3750120.0 -outsize 20 20 -ot Byte -l grid -a nearest:radius1=0.0:radius2=0.0:angle=0.0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "gcore/data/byte.tif"
ds = gdal.Open(outfiles[-1])
if ds.GetRasterBand(1).Checksum() != checksum_ref:
gdaltest.post_reason('bad checksum')
print('bad checksum : got %d, expected %d' % \
(ds.GetRasterBand(1).Checksum(), checksum_ref))
return 'fail'
ds = None
#################
outfiles.append('tmp/grid_near_search3.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Now try the search ellipse larger than the raster cell.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Byte -l grid -a nearest:radius1=180.0:radius2=180.0:angle=0.0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "gcore/data/byte.tif"
ds = gdal.Open(outfiles[-1])
if ds.GetRasterBand(1).Checksum() != checksum_ref:
gdaltest.post_reason('bad checksum')
print('bad checksum : got %d, expected %d' % \
(ds.GetRasterBand(1).Checksum(), checksum_ref))
return 'fail'
ds = None
#################
outfiles.append('tmp/grid_near_search1.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Search ellipse smaller than the raster cell.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Byte -l grid -a nearest:radius1=20.0:radius2=20.0:angle=0.0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "gcore/data/byte.tif"
ds = gdal.Open(outfiles[-1])
if ds.GetRasterBand(1).Checksum() != checksum_ref:
gdaltest.post_reason('bad checksum')
print('bad checksum : got %d, expected %d' % \
(ds.GetRasterBand(1).Checksum(), checksum_ref))
return 'fail'
ds = None
#################
outfiles.append('tmp/grid_near_shift_search3.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Large search ellipse and the grid shift.
gdaltest.runexternal(gdal_grid + ' -txe 440721.0 441920.0 -tye 3751321.0 3750120.0 -outsize 20 20 -ot Byte -l grid -a nearest:radius1=180.0:radius2=180.0:angle=0.0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "gcore/data/byte.tif"
ds = gdal.Open(outfiles[-1])
if ds.GetRasterBand(1).Checksum() != checksum_ref:
gdaltest.post_reason('bad checksum')
print('bad checksum : got %d, expected %d' % \
(ds.GetRasterBand(1).Checksum(), checksum_ref))
return 'fail'
ds = None
#################
outfiles.append('tmp/grid_near_shift_search1.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Small search ellipse and the grid shift.
gdaltest.runexternal(gdal_grid + ' -txe 440721.0 441920.0 -tye 3751321.0 3750120.0 -outsize 20 20 -ot Byte -l grid -a nearest:radius1=20.0:radius2=20.0:angle=0.0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "gcore/data/byte.tif"
ds = gdal.Open(outfiles[-1])
if ds.GetRasterBand(1).Checksum() != checksum_ref:
gdaltest.post_reason('bad checksum')
print('bad checksum : got %d, expected %d' % \
(ds.GetRasterBand(1).Checksum(), checksum_ref))
return 'fail'
ds = None
return 'success'
###############################################################################
# Test Inverse Distance to a Power gridding algorithm
def test_gdal_grid_3():
if gdal_grid is None:
return 'skip'
#################
# Test generic implementation (no AVX, no SSE)
outfiles.append('tmp/grid_invdist_generic.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
print('Step 1: Disabling AVX/SSE optimized versions...')
(out, err) = gdaltest.runexternal_out_and_err(gdal_grid + ' --debug on --config GDAL_USE_AVX NO --config GDAL_USE_SSE NO -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a invdist:power=2.0:smoothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
pos = err.find(' threads')
if pos >= 0:
pos_blank = err[0:pos-1].rfind(' ')
if pos_blank >= 0:
print('Step 1: %s threads used' % err[pos_blank+1:pos])
# We should get the same values as in "ref_data/gdal_invdist.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_invdist.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
if maxdiff > 1:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds_ref = None
ds = None
#################
# Potentially test optimized SSE implementation
outfiles.append('tmp/grid_invdist_sse.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
print('Step 2: Trying SSE optimized version...')
(out, err) = gdaltest.runexternal_out_and_err(gdal_grid + ' --debug on --config GDAL_USE_AVX NO -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a invdist:power=2.0:smoothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
if err.find('SSE') >= 0:
print('...SSE optimized version used')
else:
print('...SSE optimized version NOT used')
# We should get the same values as in "ref_data/gdal_invdist.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_invdist.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
if maxdiff > 1:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds_ref = None
ds = None
#################
# Potentially test optimized AVX implementation
outfiles.append('tmp/grid_invdist_avx.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
print('Step 3: Trying AVX optimized version...')
(out, err) = gdaltest.runexternal_out_and_err(gdal_grid + ' --debug on -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a invdist:power=2.0:smoothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
if err.find('AVX') >= 0:
print('...AVX optimized version used')
else:
print('...AVX optimized version NOT used')
# We should get the same values as in "ref_data/gdal_invdist.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_invdist.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
if maxdiff > 1:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds_ref = None
ds = None
#################
# Test GDAL_NUM_THREADS config option to 1
outfiles.append('tmp/grid_invdist_1thread.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
gdaltest.runexternal(gdal_grid + ' --config GDAL_NUM_THREADS 1 -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a invdist:power=2.0:smoothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/gdal_invdist.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_invdist.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
if maxdiff > 1:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds_ref = None
ds = None
#################
# Test GDAL_NUM_THREADS config option to 2
outfiles.append('tmp/grid_invdist_2threads.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
gdaltest.runexternal(gdal_grid + ' --config GDAL_NUM_THREADS 2 -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a invdist:power=2.0:smoothing=0.0:radius1=0.0:radius2=0.0:angle=0.0:max_points=0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/gdal_invdist.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_invdist.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
if maxdiff > 1:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds_ref = None
ds = None
#################
outfiles.append('tmp/grid_invdist_90_90_8p.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# Circular window, shifted, test min points and NODATA setting.
gdaltest.runexternal(gdal_grid + ' -txe 440721.0 441920.0 -tye 3751321.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a invdist:power=2.0:radius1=90.0:radius2=90.0:angle=0.0:max_points=0:min_points=8:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_invdist_90_90_8p.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_invdist_90_90_8p.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
if maxdiff > 1:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds_ref = None
ds = None
return 'success'
###############################################################################
# Test Moving Average gridding algorithm
def test_gdal_grid_4():
if gdal_grid is None:
return 'skip'
#################
outfiles.append('tmp/grid_average.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# We are using all the points from input dataset to average, so
# the result is a raster filled with the same value in each node.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a average:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_average.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_average.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
ds_ref = None
if maxdiff > 1:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds = None
#################
outfiles.append('tmp/grid_average_190_190.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# This time using a circular window.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a average:radius1=190.0:radius2=190.0:angle=0.0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_average_190_190.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_average_190_190.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
ds_ref = None
if maxdiff > 1:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds = None
#################
outfiles.append('tmp/grid_average_300_100_40.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# Elliptical window, rotated.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a average:radius1=300.0:radius2=100.0:angle=40.0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_average_300_100_40.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_average_300_100_40.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
ds_ref = None
if maxdiff > 1:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds = None
#################
outfiles.append('tmp/grid_average_90_90_8p.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# Circular window, shifted, test min points and NODATA setting.
gdaltest.runexternal(gdal_grid + ' -txe 440721.0 441920.0 -tye 3751321.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a average:radius1=90.0:radius2=90.0:angle=0.0:min_points=8:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_average_90_90_8p.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_average_90_90_8p.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
ds_ref = None
if maxdiff > 1:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test Minimum data metric
def test_gdal_grid_5():
if gdal_grid is None:
return 'skip'
#################
outfiles.append('tmp/grid_minimum.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# Search the whole dataset for minimum.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Byte -l grid -a minimum:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_minimum.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_minimum.tif')
if ds.GetRasterBand(1).Checksum() != ds_ref.GetRasterBand(1).Checksum():
print('bad checksum : got %d, expected %d' % \
(ds.GetRasterBand(1).Checksum(), ds_ref.GetRasterBand(1).checksum_ref))
return 'fail'
ds_ref = None
ds = None
#################
outfiles.append('tmp/grid_minimum_400_100_120.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# Elliptical window, rotated.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Byte -l grid -a minimum:radius1=400.0:radius2=100.0:angle=120.0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_minimum_400_100_120.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_minimum_400_100_120.tif')
if ds.GetRasterBand(1).Checksum() != ds_ref.GetRasterBand(1).Checksum():
print('bad checksum : got %d, expected %d' % \
(ds.GetRasterBand(1).Checksum(), ds_ref.GetRasterBand(1).checksum_ref))
return 'fail'
ds_ref = None
ds = None
return 'success'
###############################################################################
# Test Maximum data metric
def test_gdal_grid_6():
if gdal_grid is None:
return 'skip'
#################
outfiles.append('tmp/grid_maximum.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# Search the whole dataset for maximum.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Byte -l grid -a maximum:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_maximum.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_maximum.tif')
if ds.GetRasterBand(1).Checksum() != ds_ref.GetRasterBand(1).Checksum():
print('bad checksum : got %d, expected %d' % \
(ds.GetRasterBand(1).Checksum(), ds_ref.GetRasterBand(1).checksum_ref))
return 'fail'
ds_ref = None
ds = None
#################
outfiles.append('tmp/grid_maximum_100_100.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# Circular window.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Byte -l grid -a maximum:radius1=100.0:radius2=100.0:angle=0.0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_maximum_100_100.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_maximum_100_100.tif')
if ds.GetRasterBand(1).Checksum() != ds_ref.GetRasterBand(1).Checksum():
print('bad checksum : got %d, expected %d' % \
(ds.GetRasterBand(1).Checksum(), ds_ref.GetRasterBand(1).checksum_ref))
return 'fail'
ds_ref = None
ds = None
return 'success'
###############################################################################
# Test Range data metric
def test_gdal_grid_7():
if gdal_grid is None:
return 'skip'
#################
outfiles.append('tmp/grid_range.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# Search the whole dataset.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Byte -l grid -a range:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_range.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_range.tif')
if ds.GetRasterBand(1).Checksum() != ds_ref.GetRasterBand(1).Checksum():
print('bad checksum : got %d, expected %d' % \
(ds.GetRasterBand(1).Checksum(), ds_ref.GetRasterBand(1).checksum_ref))
return 'fail'
ds_ref = None
ds = None
#################
outfiles.append('tmp/grid_range_90_90_8p.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# Circular window, fill node with NODATA value if less than required
# points found.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Byte -l grid -a range:radius1=90.0:radius2=90.0:angle=0.0:min_points=8:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_range_90_90_8p.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_range_90_90_8p.tif')
if ds.GetRasterBand(1).Checksum() != ds_ref.GetRasterBand(1).Checksum():
print('bad checksum : got %d, expected %d' % \
(ds.GetRasterBand(1).Checksum(), ds_ref.GetRasterBand(1).checksum_ref))
return 'fail'
ds_ref = None
ds = None
return 'success'
###############################################################################
# Test Count data metric
def test_gdal_grid_8():
if gdal_grid is None:
return 'skip'
#################
outfiles.append('tmp/grid_count_70_70.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Byte -l grid -a count:radius1=70.0:radius2=70.0:angle=0.0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_count_70_70.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_count_70_70.tif')
if ds.GetRasterBand(1).Checksum() != ds_ref.GetRasterBand(1).Checksum():
print('bad checksum : got %d, expected %d' % \
(ds.GetRasterBand(1).Checksum(), ds_ref.GetRasterBand(1).checksum_ref))
return 'fail'
ds_ref = None
ds = None
#################
outfiles.append('tmp/grid_count_300_300.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Byte -l grid -a count:radius1=300.0:radius2=300.0:angle=0.0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_count_300_300.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_count_300_300.tif')
if ds.GetRasterBand(1).Checksum() != ds_ref.GetRasterBand(1).Checksum():
print('bad checksum : got %d, expected %d' % \
(ds.GetRasterBand(1).Checksum(), ds_ref.GetRasterBand(1).checksum_ref))
return 'fail'
ds_ref = None
ds = None
return 'success'
###############################################################################
# Test Average Distance data metric
def test_gdal_grid_9():
if gdal_grid is None:
return 'skip'
#################
outfiles.append('tmp/grid_avdist.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# We are using all the points from input dataset to average, so
# the result is a raster filled with the same value in each node.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a average_distance:radius1=0.0:radius2=0.0:angle=0.0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_avdist.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_avdist.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
ds_ref = None
if maxdiff > 1:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds = None
#################
outfiles.append('tmp/grid_avdist_150_150.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# We are using all the points from input dataset to average, so
# the result is a raster filled with the same value in each node.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a average_distance:radius1=150.0:radius2=150.0:angle=0.0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_avdist_150_150.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_avdist_150_150.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
ds_ref = None
if maxdiff > 1:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test Average Distance Between Points data metric
def test_gdal_grid_10():
if gdal_grid is None:
return 'skip'
#################
outfiles.append('tmp/grid_avdist_150_50_-15.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# We are using all the points from input dataset to average, so
# the result is a raster filled with the same value in each node.
gdaltest.runexternal(gdal_grid + ' -txe 440720.0 441920.0 -tye 3751320.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a average_distance_pts:radius1=150.0:radius2=50.0:angle=-15.0:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_avdist_150_50_-15.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_avdist_150_50_-15.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
ds_ref = None
if maxdiff > 1:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test linear
def test_gdal_grid_11():
if gdal_grid is None:
return 'skip'
outfiles.append('tmp/n43_linear.tif')
# Create a GDAL dataset from the previous generated OGR grid
(out, err) = gdaltest.runexternal_out_and_err(gdal_grid + ' -txe -80.0041667 -78.9958333 -tye 42.9958333 44.0041667 -outsize 121 121 -ot Int16 -l n43 -a linear -co TILED=YES -co BLOCKXSIZE=256 -co BLOCKYSIZE=256 tmp/n43.shp ' + outfiles[-1])
if not (err is None or err == '') :
gdaltest.post_reason('got error/warning')
print(err)
return 'fail'
# We should get the same values as in n43.td0
ds = gdal.Open('../gdrivers/data/n43.dt0')
ds2 = gdal.Open(outfiles[-1])
if ds.GetRasterBand(1).Checksum() != ds2.GetRasterBand(1).Checksum():
print('bad checksum : got %d, expected %d' % (ds.GetRasterBand(1).Checksum() , ds2.GetRasterBand(1).Checksum()))
return 'fail'
ds = None
ds2 = None
return 'success'
###############################################################################
# Test Inverse Distance to a Power with Nearest Neighbor gridding algorithm
def test_gdal_grid_12():
if gdal_grid is None:
return 'skip'
#################
# Test generic implementation (no AVX, no SSE)
outfiles.append('tmp/grid_invdistnn_generic.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
(out, err) = gdaltest.runexternal_out_and_err(gdal_grid + ' -txe 440721.0 441920.0 -tye 3751321.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a invdistnn:power=2.0:radius=1.0:max_points=12:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/gdal_invdistnn.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_invdistnn.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
if maxdiff > 0.00001:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds_ref = None
ds = None
#################
outfiles.append('tmp/grid_invdistnn_250_8minp.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
# Circular window, shifted, test min points and NODATA setting.
gdaltest.runexternal(gdal_grid + ' -txe 440721.0 441920.0 -tye 3751321.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a invdistnn:power=2.0:radius=250.0:max_points=12:min_points=8:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/grid_invdistnn_250_8minp.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_invdistnn_250_8minp.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
if maxdiff > 0.00001:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds_ref = None
ds = None
#################
# Test generic implementation with max_points and radius specified
outfiles.append('tmp/grid_invdistnn_250_10maxp_3pow.tif')
try:
os.remove(outfiles[-1])
except:
pass
# Create a GDAL dataset from the values of "grid.csv".
gdaltest.runexternal(gdal_grid + ' -txe 440721.0 441920.0 -tye 3751321.0 3750120.0 -outsize 20 20 -ot Float64 -l grid -a invdistnn:power=3.0:radius=250.0:max_points=10:min_points=0:nodata=0.0 data/grid.vrt ' + outfiles[-1])
# We should get the same values as in "ref_data/gdal_invdistnn_250_10maxp_3pow.tif"
ds = gdal.Open(outfiles[-1])
ds_ref = gdal.Open('ref_data/grid_invdistnn_250_10maxp_3pow.tif')
maxdiff = gdaltest.compare_ds(ds, ds_ref, verbose = 0)
if maxdiff > 0.00001:
gdaltest.compare_ds(ds, ds_ref, verbose = 1)
gdaltest.post_reason('Image too different from the reference')
return 'fail'
ds_ref = None
ds = None
return 'success'
###############################################################################
# Cleanup
def test_gdal_grid_cleanup():
ogr.GetDriverByName('ESRI Shapefile').DeleteDataSource('tmp/n43.shp')
drv = gdal.GetDriverByName('GTiff')
for outfile in outfiles:
drv.Delete(outfile)
return 'success'
gdaltest_list = [
test_gdal_grid_1,
test_gdal_grid_2,
test_gdal_grid_3,
test_gdal_grid_4,
test_gdal_grid_5,
test_gdal_grid_6,
test_gdal_grid_7,
test_gdal_grid_8,
test_gdal_grid_9,
test_gdal_grid_10,
test_gdal_grid_11,
test_gdal_grid_12,
test_gdal_grid_cleanup
]
if __name__ == '__main__':
gdaltest.setup_run( 'test_gdal_grid' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| gpl-2.0 |
gunicorn/gunicorn | examples/websocket/websocket.py | 7 | 15711 |
import collections
import errno
import re
from hashlib import md5, sha1
import base64
from base64 import b64encode, b64decode
import socket
import struct
import logging
from socket import error as SocketError
import eventlet
from gunicorn.workers.async import ALREADY_HANDLED
from eventlet import pools
logger = logging.getLogger(__name__)
WS_KEY = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
class WebSocketWSGI(object):
def __init__(self, handler):
self.handler = handler
def verify_client(self, ws):
pass
def _get_key_value(self, key_value):
if not key_value:
return
key_number = int(re.sub("\\D", "", key_value))
spaces = re.subn(" ", "", key_value)[1]
if key_number % spaces != 0:
return
part = key_number / spaces
return part
def __call__(self, environ, start_response):
if not (environ.get('HTTP_CONNECTION').find('Upgrade') != -1 and
environ['HTTP_UPGRADE'].lower() == 'websocket'):
# need to check a few more things here for true compliance
start_response('400 Bad Request', [('Connection','close')])
return []
sock = environ['gunicorn.socket']
version = environ.get('HTTP_SEC_WEBSOCKET_VERSION')
ws = WebSocket(sock, environ, version)
handshake_reply = ("HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: websocket\r\n"
"Connection: Upgrade\r\n")
key = environ.get('HTTP_SEC_WEBSOCKET_KEY')
if key:
ws_key = base64.b64decode(key)
if len(ws_key) != 16:
start_response('400 Bad Request', [('Connection','close')])
return []
protocols = []
subprotocols = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL')
ws_protocols = []
if subprotocols:
for s in subprotocols.split(','):
s = s.strip()
if s in protocols:
ws_protocols.append(s)
if ws_protocols:
handshake_reply += 'Sec-WebSocket-Protocol: %s\r\n' % ', '.join(ws_protocols)
exts = []
extensions = environ.get('HTTP_SEC_WEBSOCKET_EXTENSIONS')
ws_extensions = []
if extensions:
for ext in extensions.split(','):
ext = ext.strip()
if ext in exts:
ws_extensions.append(ext)
if ws_extensions:
handshake_reply += 'Sec-WebSocket-Extensions: %s\r\n' % ', '.join(ws_extensions)
handshake_reply += (
"Sec-WebSocket-Origin: %s\r\n"
"Sec-WebSocket-Location: ws://%s%s\r\n"
"Sec-WebSocket-Version: %s\r\n"
"Sec-WebSocket-Accept: %s\r\n\r\n"
% (
environ.get('HTTP_ORIGIN'),
environ.get('HTTP_HOST'),
ws.path,
version,
base64.b64encode(sha1(key + WS_KEY).digest())
))
else:
handshake_reply += (
"WebSocket-Origin: %s\r\n"
"WebSocket-Location: ws://%s%s\r\n\r\n" % (
environ.get('HTTP_ORIGIN'),
environ.get('HTTP_HOST'),
ws.path))
sock.sendall(handshake_reply)
try:
self.handler(ws)
except socket.error, e:
if e[0] != errno.EPIPE:
raise
# use this undocumented feature of grainbows to ensure that it
# doesn't barf on the fact that we didn't call start_response
return ALREADY_HANDLED
class WebSocket(object):
"""A websocket object that handles the details of
serialization/deserialization to the socket.
The primary way to interact with a :class:`WebSocket` object is to
call :meth:`send` and :meth:`wait` in order to pass messages back
and forth with the browser. Also available are the following
properties:
path
The path value of the request. This is the same as the WSGI PATH_INFO variable, but more convenient.
protocol
The value of the Websocket-Protocol header.
origin
The value of the 'Origin' header.
environ
The full WSGI environment for this request.
"""
def __init__(self, sock, environ, version=76):
"""
:param socket: The eventlet socket
:type socket: :class:`eventlet.greenio.GreenSocket`
:param environ: The wsgi environment
:param version: The WebSocket spec version to follow (default is 76)
"""
self.socket = sock
self.origin = environ.get('HTTP_ORIGIN')
self.protocol = environ.get('HTTP_WEBSOCKET_PROTOCOL')
self.path = environ.get('PATH_INFO')
self.environ = environ
self.version = version
self.websocket_closed = False
self._buf = ""
self._msgs = collections.deque()
self._sendlock = pools.TokenPool(1)
@staticmethod
def encode_hybi(buf, opcode, base64=False):
""" Encode a HyBi style WebSocket frame.
Optional opcode:
0x0 - continuation
0x1 - text frame (base64 encode buf)
0x2 - binary frame (use raw buf)
0x8 - connection close
0x9 - ping
0xA - pong
"""
if base64:
buf = b64encode(buf)
b1 = 0x80 | (opcode & 0x0f) # FIN + opcode
payload_len = len(buf)
if payload_len <= 125:
header = struct.pack('>BB', b1, payload_len)
elif payload_len > 125 and payload_len < 65536:
header = struct.pack('>BBH', b1, 126, payload_len)
elif payload_len >= 65536:
header = struct.pack('>BBQ', b1, 127, payload_len)
#print("Encoded: %s" % repr(header + buf))
return header + buf, len(header), 0
@staticmethod
def decode_hybi(buf, base64=False):
""" Decode HyBi style WebSocket packets.
Returns:
{'fin' : 0_or_1,
'opcode' : number,
'mask' : 32_bit_number,
'hlen' : header_bytes_number,
'length' : payload_bytes_number,
'payload' : decoded_buffer,
'left' : bytes_left_number,
'close_code' : number,
'close_reason' : string}
"""
f = {'fin' : 0,
'opcode' : 0,
'mask' : 0,
'hlen' : 2,
'length' : 0,
'payload' : None,
'left' : 0,
'close_code' : None,
'close_reason' : None}
blen = len(buf)
f['left'] = blen
if blen < f['hlen']:
return f # Incomplete frame header
b1, b2 = struct.unpack_from(">BB", buf)
f['opcode'] = b1 & 0x0f
f['fin'] = (b1 & 0x80) >> 7
has_mask = (b2 & 0x80) >> 7
f['length'] = b2 & 0x7f
if f['length'] == 126:
f['hlen'] = 4
if blen < f['hlen']:
return f # Incomplete frame header
(f['length'],) = struct.unpack_from('>xxH', buf)
elif f['length'] == 127:
f['hlen'] = 10
if blen < f['hlen']:
return f # Incomplete frame header
(f['length'],) = struct.unpack_from('>xxQ', buf)
full_len = f['hlen'] + has_mask * 4 + f['length']
if blen < full_len: # Incomplete frame
return f # Incomplete frame header
# Number of bytes that are part of the next frame(s)
f['left'] = blen - full_len
# Process 1 frame
if has_mask:
# unmask payload
f['mask'] = buf[f['hlen']:f['hlen']+4]
b = c = ''
if f['length'] >= 4:
data = struct.unpack('<I', buf[f['hlen']:f['hlen']+4])[0]
of1 = f['hlen']+4
b = ''
for i in xrange(0, int(f['length']/4)):
mask = struct.unpack('<I', buf[of1+4*i:of1+4*(i+1)])[0]
b += struct.pack('I', data ^ mask)
if f['length'] % 4:
l = f['length'] % 4
of1 = f['hlen']
of2 = full_len - l
c = ''
for i in range(0, l):
mask = struct.unpack('B', buf[of1 + i])[0]
data = struct.unpack('B', buf[of2 + i])[0]
c += chr(data ^ mask)
f['payload'] = b + c
else:
print("Unmasked frame: %s" % repr(buf))
f['payload'] = buf[(f['hlen'] + has_mask * 4):full_len]
if base64 and f['opcode'] in [1, 2]:
try:
f['payload'] = b64decode(f['payload'])
except:
print("Exception while b64decoding buffer: %s" %
repr(buf))
raise
if f['opcode'] == 0x08:
if f['length'] >= 2:
f['close_code'] = struct.unpack_from(">H", f['payload'])
if f['length'] > 3:
f['close_reason'] = f['payload'][2:]
return f
@staticmethod
def _pack_message(message):
"""Pack the message inside ``00`` and ``FF``
As per the dataframing section (5.3) for the websocket spec
"""
if isinstance(message, unicode):
message = message.encode('utf-8')
elif not isinstance(message, str):
message = str(message)
packed = "\x00%s\xFF" % message
return packed
def _parse_messages(self):
""" Parses for messages in the buffer *buf*. It is assumed that
the buffer contains the start character for a message, but that it
may contain only part of the rest of the message.
Returns an array of messages, and the buffer remainder that
didn't contain any full messages."""
msgs = []
end_idx = 0
buf = self._buf
while buf:
if self.version in ['7', '8', '13']:
frame = self.decode_hybi(buf, base64=False)
#print("Received buf: %s, frame: %s" % (repr(buf), frame))
if frame['payload'] == None:
break
else:
if frame['opcode'] == 0x8: # connection close
self.websocket_closed = True
break
#elif frame['opcode'] == 0x1:
else:
msgs.append(frame['payload']);
#msgs.append(frame['payload'].decode('utf-8', 'replace'));
#buf = buf[-frame['left']:]
if frame['left']:
buf = buf[-frame['left']:]
else:
buf = ''
else:
frame_type = ord(buf[0])
if frame_type == 0:
# Normal message.
end_idx = buf.find("\xFF")
if end_idx == -1: #pragma NO COVER
break
msgs.append(buf[1:end_idx].decode('utf-8', 'replace'))
buf = buf[end_idx+1:]
elif frame_type == 255:
# Closing handshake.
assert ord(buf[1]) == 0, "Unexpected closing handshake: %r" % buf
self.websocket_closed = True
break
else:
raise ValueError("Don't understand how to parse this type of message: %r" % buf)
self._buf = buf
return msgs
def send(self, message):
"""Send a message to the browser.
*message* should be convertable to a string; unicode objects should be
encodable as utf-8. Raises socket.error with errno of 32
(broken pipe) if the socket has already been closed by the client."""
if self.version in ['7', '8', '13']:
packed, lenhead, lentail = self.encode_hybi(message, opcode=0x01, base64=False)
else:
packed = self._pack_message(message)
# if two greenthreads are trying to send at the same time
# on the same socket, sendlock prevents interleaving and corruption
#self._sendlock.acquire()
t = self._sendlock.get()
try:
self.socket.sendall(packed)
finally:
self._sendlock.put(t)
def wait(self):
"""Waits for and deserializes messages.
Returns a single message; the oldest not yet processed. If the client
has already closed the connection, returns None. This is different
from normal socket behavior because the empty string is a valid
websocket message."""
while not self._msgs:
# Websocket might be closed already.
if self.websocket_closed:
return None
# no parsed messages, must mean buf needs more data
delta = self.socket.recv(8096)
if delta == '':
return None
self._buf += delta
msgs = self._parse_messages()
self._msgs.extend(msgs)
return self._msgs.popleft()
def _send_closing_frame(self, ignore_send_errors=False):
"""Sends the closing frame to the client, if required."""
if self.version in ['7', '8', '13'] and not self.websocket_closed:
msg = ''
#if code != None:
# msg = struct.pack(">H%ds" % (len(reason)), code)
buf, h, t = self.encode_hybi(msg, opcode=0x08, base64=False)
self.socket.sendall(buf)
self.websocket_closed = True
elif self.version == 76 and not self.websocket_closed:
try:
self.socket.sendall("\xff\x00")
except SocketError:
# Sometimes, like when the remote side cuts off the connection,
# we don't care about this.
if not ignore_send_errors: #pragma NO COVER
raise
self.websocket_closed = True
def close(self):
"""Forcibly close the websocket; generally it is preferable to
return from the handler method."""
self._send_closing_frame()
self.socket.shutdown(True)
self.socket.close()
# demo app
import os
import random
def handle(ws):
""" This is the websocket handler function. Note that we
can dispatch based on path in here, too."""
if ws.path == '/echo':
while True:
m = ws.wait()
if m is None:
break
ws.send(m)
elif ws.path == '/data':
for i in xrange(10000):
ws.send("0 %s %s\n" % (i, random.random()))
eventlet.sleep(0.1)
wsapp = WebSocketWSGI(handle)
def app(environ, start_response):
""" This resolves to the web page or the websocket depending on
the path."""
if environ['PATH_INFO'] == '/' or environ['PATH_INFO'] == "":
data = open(os.path.join(
os.path.dirname(__file__),
'websocket.html')).read()
data = data % environ
start_response('200 OK', [('Content-Type', 'text/html'),
('Content-Length', len(data))])
return [data]
else:
return wsapp(environ, start_response)
| mit |
phaller0513/aima-python | submissions/aardvark/puzzles.py | 20 | 1982 | import search
from math import(cos, pi)
# A sample map problem
sumner_map = search.UndirectedGraph(dict(
# Portland=dict(Mitchellville=7, Fairfield=17, Cottontown=18),
# Cottontown=dict(Portland=18),
# Fairfield=dict(Mitchellville=21, Portland=17),
# Mitchellville=dict(Portland=7, Fairfield=21),
# A=dict(D=70, C=80, B=140),
# D=dict(A=70,C=100,Z=200),
# C=dict(A=80,D=100,E=80,B=70),
# B=dict(A=140,C=70,E=90,Z=130),
# E=dict(C=80,B=90,Z=60),
# Z=dict(D=200,E=60,B=130),
# A=dict(B=70,C=80,E=100),
# B=dict(A=70),
# C=dict(A=80,E=100,D=60),
# E=dict(A=100,C=100,Z=150),
# D=dict(C=60,Z=90),
# Z=dict(D=90,E=150),
A=dict(Z=75,S=140,T=118),
Z=dict(O=71,A=75),
S=dict(O=151,R=80,F=99),
T=dict(A=118,L=111),
O=dict(Z=71,S=151),
L=dict(T=111,M=70),
M=dict(L=70,D=75),
D=dict(M=75,C=120),
R=dict(S=80,C=146,P=97),
C=dict(R=146,P=138,D=120),
F=dict(S=99,B=211),
P=dict(R=97,C=138,B=101),
B=dict(G=90,P=101,F=211),
))
#sumner_puzzle = search.GraphProblem('Cottontown', 'Mitchellville', sumner_map)
sumner_puzzle = search.GraphProblem('A', 'B', sumner_map)
sumner_puzzle.label = 'Sumner Map'
sumner_puzzle.description = '''
An abbreviated map of Sumner County, TN.
This map is unique, to the best of my knowledge.
'''
# A trivial Problem definition
class LightSwitch(search.Problem):
def actions(self, state):
return ['up', 'down']
def result(self, state, action):
if action == 'up':
return 'on'
else:
return 'off'
def goal_test(self, state):
return state == 'on'
def h(self, node):
state = node.state
if self.goal_test(state):
return 0
else:
return 1
#swiss_puzzle = search.GraphProblem('A', 'Z', sumner_map)
switch_puzzle = LightSwitch('off')
switch_puzzle.label = 'Light Switch'
myPuzzles = [
# swiss_puzzle,
sumner_puzzle,
switch_puzzle,
] | mit |
credativUK/OCB | addons/membership/__init__.py | 441 | 1101 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import membership
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pubudu538/mesos-artifacts | common/scripts/get-host-ip.py | 3 | 1422 | #!/usr/bin/python
# ------------------------------------------------------------------------
#
# Copyright 2016 WSO2, Inc. (http://wso2.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
# ------------------------------------------------------------------------
import sys, json;
def main():
if len(sys.argv) != 2:
print "Invalid Arguments: ", sys.argv
sys.exit(1)
app_id = str.strip(sys.argv[1])
try:
data = json.load(sys.stdin)
except ValueError, e:
print e
sys.exit(1)
if (data is None or 'tasks' not in data):
print "Invalid json input"
sys.exit(1)
if (len(data['tasks']) == 0):
print "Error! No tasks found for", app_id
sys.exit(1)
if ('host' not in data['tasks'][0]):
print "Host is not found for", app_id, " task"
sys.exit(1)
print data['tasks'][0]['host']
if __name__ == "__main__":
main()
| apache-2.0 |
chaffra/sympy | sympy/physics/quantum/tests/test_grover.py | 45 | 3563 | from sympy import sqrt, Matrix
from sympy.physics.quantum.represent import represent
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.qubit import IntQubit
from sympy.physics.quantum.grover import (apply_grover, superposition_basis,
OracleGate, grover_iteration, WGate)
def return_one_on_two(qubits):
return qubits == IntQubit(2, qubits.nqubits)
def return_one_on_one(qubits):
return qubits == IntQubit(1, qubits.nqubits)
def test_superposition_basis():
nbits = 2
first_half_state = IntQubit(0, nbits)/2 + IntQubit(1, nbits)/2
second_half_state = IntQubit(2, nbits)/2 + IntQubit(3, nbits)/2
assert first_half_state + second_half_state == superposition_basis(nbits)
nbits = 3
firstq = (1/sqrt(8))*IntQubit(0, nbits) + (1/sqrt(8))*IntQubit(1, nbits)
secondq = (1/sqrt(8))*IntQubit(2, nbits) + (1/sqrt(8))*IntQubit(3, nbits)
thirdq = (1/sqrt(8))*IntQubit(4, nbits) + (1/sqrt(8))*IntQubit(5, nbits)
fourthq = (1/sqrt(8))*IntQubit(6, nbits) + (1/sqrt(8))*IntQubit(7, nbits)
assert firstq + secondq + thirdq + fourthq == superposition_basis(nbits)
def test_OracleGate():
v = OracleGate(1, lambda qubits: qubits == IntQubit(0))
assert qapply(v*IntQubit(0)) == -IntQubit(0)
assert qapply(v*IntQubit(1)) == IntQubit(1)
nbits = 2
v = OracleGate(2, return_one_on_two)
assert qapply(v*IntQubit(0, nbits)) == IntQubit(0, nbits)
assert qapply(v*IntQubit(1, nbits)) == IntQubit(1, nbits)
assert qapply(v*IntQubit(2, nbits)) == -IntQubit(2, nbits)
assert qapply(v*IntQubit(3, nbits)) == IntQubit(3, nbits)
# Due to a bug of IntQubit, this first assertion is buggy
# assert represent(OracleGate(1, lambda qubits: qubits == IntQubit(0)), nqubits=1) == \
# Matrix([[-1/sqrt(2), 0], [0, 1/sqrt(2)]])
assert represent(v, nqubits=2) == Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
def test_WGate():
nqubits = 2
basis_states = superposition_basis(nqubits)
assert qapply(WGate(nqubits)*basis_states) == basis_states
expected = ((2/sqrt(pow(2, nqubits)))*basis_states) - IntQubit(1, nqubits)
assert qapply(WGate(nqubits)*IntQubit(1, nqubits)) == expected
def test_grover_iteration_1():
numqubits = 2
basis_states = superposition_basis(numqubits)
v = OracleGate(numqubits, return_one_on_one)
expected = IntQubit(1, numqubits)
assert qapply(grover_iteration(basis_states, v)) == expected
def test_grover_iteration_2():
numqubits = 4
basis_states = superposition_basis(numqubits)
v = OracleGate(numqubits, return_one_on_two)
# After (pi/4)sqrt(pow(2, n)), IntQubit(2) should have highest prob
# In this case, after around pi times (3 or 4)
iterated = grover_iteration(basis_states, v)
iterated = qapply(iterated)
iterated = grover_iteration(iterated, v)
iterated = qapply(iterated)
iterated = grover_iteration(iterated, v)
iterated = qapply(iterated)
# In this case, probability was highest after 3 iterations
# Probability of Qubit('0010') was 251/256 (3) vs 781/1024 (4)
# Ask about measurement
expected = (-13*basis_states)/64 + 264*IntQubit(2, numqubits)/256
assert qapply(expected) == iterated
def test_grover():
nqubits = 2
assert apply_grover(return_one_on_one, nqubits) == IntQubit(1, nqubits)
nqubits = 4
basis_states = superposition_basis(nqubits)
expected = (-13*basis_states)/64 + 264*IntQubit(2, nqubits)/256
assert apply_grover(return_one_on_two, 4) == qapply(expected)
| bsd-3-clause |
smalls257/VRvisu | Library/External.LCA_RESTRICTED/Languages/CPython/27/Lib/test/test_select.py | 39 | 1851 | from test import test_support
import unittest
import select
import os
import sys
@unittest.skipIf(sys.platform[:3] in ('win', 'os2', 'riscos'),
"can't easily test on this system")
class SelectTestCase(unittest.TestCase):
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
def test_error_conditions(self):
self.assertRaises(TypeError, select.select, 1, 2, 3)
self.assertRaises(TypeError, select.select, [self.Nope()], [], [])
self.assertRaises(TypeError, select.select, [self.Almost()], [], [])
self.assertRaises(TypeError, select.select, [], [], [], "not a number")
def test_returned_list_identity(self):
# See issue #8329
r, w, x = select.select([], [], [], 1)
self.assertIsNot(r, w)
self.assertIsNot(r, x)
self.assertIsNot(w, x)
def test_select(self):
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
for tout in (0, 1, 2, 4, 8, 16) + (None,)*10:
if test_support.verbose:
print 'timeout =', tout
rfd, wfd, xfd = select.select([p], [], [], tout)
if (rfd, wfd, xfd) == ([], [], []):
continue
if (rfd, wfd, xfd) == ([p], [], []):
line = p.readline()
if test_support.verbose:
print repr(line)
if not line:
if test_support.verbose:
print 'EOF'
break
continue
self.fail('Unexpected return values from select():', rfd, wfd, xfd)
p.close()
def test_main():
test_support.run_unittest(SelectTestCase)
test_support.reap_children()
if __name__ == "__main__":
test_main()
| gpl-3.0 |
mdietrichc2c/stock-logistics-workflow | __unported__/stock_picking_show_returns/__openerp__.py | 5 | 1540 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2013 Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Show returns on stock pickings",
"version": "1.0",
"author": "Serv. Tecnol. Avanzados - Pedro M. Baeza",
"category": "Warehouse Management",
"description": """
Show in pickings one tab that contains returns made for that picking.
""",
"website": "www.serviciosbaeza.com",
"license": "AGPL-3",
"depends": [
"stock",
],
"demo": [],
"data": [
'stock_picking_view.xml',
],
"installable": True,
"active": False,
}
| agpl-3.0 |
luyishisi/tensorflow | 4.Object_Detection/object_detection/utils/metrics.py | 17 | 5390 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for computing metrics like precision, recall, CorLoc and etc."""
from __future__ import division
import numpy as np
from six import moves
def compute_precision_recall(scores, labels, num_gt):
"""Compute precision and recall.
Args:
scores: A float numpy array representing detection score
labels: A boolean numpy array representing true/false positive labels
num_gt: Number of ground truth instances
Raises:
ValueError: if the input is not of the correct format
Returns:
precision: Fraction of positive instances over detected ones. This value is
None if no ground truth labels are present.
recall: Fraction of detected positive instance over all positive instances.
This value is None if no ground truth labels are present.
"""
if not isinstance(
labels, np.ndarray) or labels.dtype != np.bool or len(labels.shape) != 1:
raise ValueError("labels must be single dimension bool numpy array")
if not isinstance(
scores, np.ndarray) or len(scores.shape) != 1:
raise ValueError("scores must be single dimension numpy array")
if num_gt < np.sum(labels):
raise ValueError("Number of true positives must be smaller than num_gt.")
if len(scores) != len(labels):
raise ValueError("scores and labels must be of the same size.")
if num_gt == 0:
return None, None
sorted_indices = np.argsort(scores)
sorted_indices = sorted_indices[::-1]
labels = labels.astype(int)
true_positive_labels = labels[sorted_indices]
false_positive_labels = 1 - true_positive_labels
cum_true_positives = np.cumsum(true_positive_labels)
cum_false_positives = np.cumsum(false_positive_labels)
precision = cum_true_positives.astype(float) / (
cum_true_positives + cum_false_positives)
recall = cum_true_positives.astype(float) / num_gt
return precision, recall
def compute_average_precision(precision, recall):
"""Compute Average Precision according to the definition in VOCdevkit.
Precision is modified to ensure that it does not decrease as recall
decrease.
Args:
precision: A float [N, 1] numpy array of precisions
recall: A float [N, 1] numpy array of recalls
Raises:
ValueError: if the input is not of the correct format
Returns:
average_precison: The area under the precision recall curve. NaN if
precision and recall are None.
"""
if precision is None:
if recall is not None:
raise ValueError("If precision is None, recall must also be None")
return np.NAN
if not isinstance(precision, np.ndarray) or not isinstance(recall,
np.ndarray):
raise ValueError("precision and recall must be numpy array")
if precision.dtype != np.float or recall.dtype != np.float:
raise ValueError("input must be float numpy array.")
if len(precision) != len(recall):
raise ValueError("precision and recall must be of the same size.")
if not precision.size:
return 0.0
if np.amin(precision) < 0 or np.amax(precision) > 1:
raise ValueError("Precision must be in the range of [0, 1].")
if np.amin(recall) < 0 or np.amax(recall) > 1:
raise ValueError("recall must be in the range of [0, 1].")
if not all(recall[i] <= recall[i + 1] for i in moves.range(len(recall) - 1)):
raise ValueError("recall must be a non-decreasing array")
recall = np.concatenate([[0], recall, [1]])
precision = np.concatenate([[0], precision, [0]])
# Preprocess precision to be a non-decreasing array
for i in range(len(precision) - 2, -1, -1):
precision[i] = np.maximum(precision[i], precision[i + 1])
indices = np.where(recall[1:] != recall[:-1])[0] + 1
average_precision = np.sum(
(recall[indices] - recall[indices - 1]) * precision[indices])
return average_precision
def compute_cor_loc(num_gt_imgs_per_class,
num_images_correctly_detected_per_class):
"""Compute CorLoc according to the definition in the following paper.
https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf
Returns nans if there are no ground truth images for a class.
Args:
num_gt_imgs_per_class: 1D array, representing number of images containing
at least one object instance of a particular class
num_images_correctly_detected_per_class: 1D array, representing number of
images that are correctly detected at least one object instance of a
particular class
Returns:
corloc_per_class: A float numpy array represents the corloc score of each
class
"""
return np.where(
num_gt_imgs_per_class == 0,
np.nan,
num_images_correctly_detected_per_class / num_gt_imgs_per_class)
| bsd-2-clause |
ericlink/adms-server | playframework-dist/1.1-src/python/Lib/idlelib/configSectionNameDialog.py | 9 | 3817 | """
Dialog that allows user to specify a new config file section name.
Used to get new highlight theme and keybinding set names.
"""
from Tkinter import *
import tkMessageBox
class GetCfgSectionNameDialog(Toplevel):
def __init__(self,parent,title,message,usedNames):
"""
message - string, informational message to display
usedNames - list, list of names already in use for validity check
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.resizable(height=FALSE,width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.message=message
self.usedNames=usedNames
self.result=''
self.CreateWidgets()
self.withdraw() #hide while setting geometry
self.update_idletasks()
#needs to be done here so that the winfo_reqwidth is valid
self.messageInfo.config(width=self.frameMain.winfo_reqwidth())
self.geometry("+%d+%d" %
((parent.winfo_rootx()+((parent.winfo_width()/2)
-(self.winfo_reqwidth()/2)),
parent.winfo_rooty()+((parent.winfo_height()/2)
-(self.winfo_reqheight()/2)) )) ) #centre dialog over parent
self.deiconify() #geometry set, unhide
self.wait_window()
def CreateWidgets(self):
self.name=StringVar(self)
self.fontSize=StringVar(self)
self.frameMain = Frame(self,borderwidth=2,relief=SUNKEN)
self.frameMain.pack(side=TOP,expand=TRUE,fill=BOTH)
self.messageInfo=Message(self.frameMain,anchor=W,justify=LEFT,padx=5,pady=5,
text=self.message)#,aspect=200)
entryName=Entry(self.frameMain,textvariable=self.name,width=30)
entryName.focus_set()
self.messageInfo.pack(padx=5,pady=5)#,expand=TRUE,fill=BOTH)
entryName.pack(padx=5,pady=5)
frameButtons=Frame(self)
frameButtons.pack(side=BOTTOM,fill=X)
self.buttonOk = Button(frameButtons,text='Ok',
width=8,command=self.Ok)
self.buttonOk.grid(row=0,column=0,padx=5,pady=5)
self.buttonCancel = Button(frameButtons,text='Cancel',
width=8,command=self.Cancel)
self.buttonCancel.grid(row=0,column=1,padx=5,pady=5)
def NameOk(self):
#simple validity check for a sensible
#ConfigParser file section name
nameOk=1
name=self.name.get()
name.strip()
if not name: #no name specified
tkMessageBox.showerror(title='Name Error',
message='No name specified.', parent=self)
nameOk=0
elif len(name)>30: #name too long
tkMessageBox.showerror(title='Name Error',
message='Name too long. It should be no more than '+
'30 characters.', parent=self)
nameOk=0
elif name in self.usedNames:
tkMessageBox.showerror(title='Name Error',
message='This name is already in use.', parent=self)
nameOk=0
return nameOk
def Ok(self, event=None):
if self.NameOk():
self.result=self.name.get().strip()
self.destroy()
def Cancel(self, event=None):
self.result=''
self.destroy()
if __name__ == '__main__':
#test the dialog
root=Tk()
def run():
keySeq=''
dlg=GetCfgSectionNameDialog(root,'Get Name',
'The information here should need to be word wrapped. Test.')
print dlg.result
Button(root,text='Dialog',command=run).pack()
root.mainloop()
| mit |
kingmotley/SickRage | sickrage/providers/nzb/NZBProvider.py | 11 | 1677 | # coding=utf-8
# This file is part of SickRage.
#
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
from sickbeard import logger
from sickbeard.classes import NZBSearchResult
from sickrage.helper.common import try_int
from sickrage.providers.GenericProvider import GenericProvider
class NZBProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.provider_type = GenericProvider.NZB
def is_active(self):
return bool(sickbeard.USE_NZBS) and self.is_enabled()
def _get_result(self, episodes):
return NZBSearchResult(episodes)
def _get_size(self, item):
try:
size = item.get('links')[1].get('length', -1)
except (AttributeError, IndexError, TypeError):
size = -1
if not size:
logger.log(u'The size was not found in the provider response', logger.DEBUG)
return try_int(size, -1)
def _get_storage_dir(self):
return sickbeard.NZB_DIR
| gpl-3.0 |
rephorm/libnotify-mozilla | indicator.py | 1 | 3854 | #!/usr/bin/env python
import gtk, glib, sys, fcntl, os
import gobject
import indicate
class MessagingServer:
def __init__(self, subtype, desktop):
self.indicators = {}
self.actions = {}
self.user_cb = None
self.server_cb = None
self.desktop = desktop
self.srv = indicate.indicate_server_ref_default()
type = "message.%s" % subtype
self.srv.set_type(type)
self.srv.set_desktop_file(desktop)
self.srv.show()
def show_indicator(self, name, count, draw_attention=True):
# update existing indicator, or create new one
try:
ind = self.indicators[name]
except KeyError:
print "NEW"
ind = indicate.Indicator()
self.indicators[name] = ind
ind.set_property('name', name)
ind.set_property('count', str(count))
ind.set_property('draw-attention', 'true' if draw_attention else 'false')
ind.connect('user-display', self.cb_user_display)
# hide and reshow actions to keep them at top of list
for a in self.actions.values():
a.hide()
ind.show()
for a in self.actions.values():
a.show()
return ind
def hide_indicator(self, name):
try:
self.indicators[name].hide()
del(self.indicators[name])
except KeyError:
print "ERROR: No indicator named '%s' to hide" % name
def add_action(self, name, cb):
ind = indicate.Indicator()
self.actions[name] = ind
ind.set_property('name', name)
ind.set_property('subtype', 'menu')
ind.connect('user-display', cb)
ind.show()
return ind
def set_user_cb(self, cb):
self.user_cb = cb
def set_server_cb(self, cb):
self.server_cb = cb
def cb_server_display(self, srv, id):
print "SERVER DISPLAY"
if (self.server_cb):
self.server_cb(self)
def cb_user_display(self, ind, id):
print "USER DISPLAY"
if (self.user_cb):
self.user_cb(ind.get_property('name'), ind.get_property('count'))
ind.hide()
def set_nonblock(fd, nonblock):
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
if nonblock:
fl |= os.O_NONBLOCK
else:
fl &= ~os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, fl)
def user_display(name, count):
os.system("thunderbird -mail&")
def server_display(srv):
os.system("thunderbird -mail&")
def io_cb(f, condition, srv):
commands = {
'show': [srv.show_indicator, 2],
'hide': [srv.hide_indicator, 1],
'exit': [exit, 0]
}
if condition == glib.IO_IN:
data = f.read().strip()
args = data.strip().split("::")
cmd = args.pop(0)
try:
fn, numargs = commands[cmd]
except KeyError:
print "ERROR: command '%s' not known" % cmd
return True
if numargs != len(args):
print "ERROR: '%s' command takes %d arguments but were %d given" % (cmd,
numargs, len(args))
return True
print "CMD: %s" % cmd
if fn:
fn(*args)
else:
print "ERROR: I/O Error"
exit()
return True
if __name__ == "__main__":
def action_compose(indicator, id):
os.system("thunderbird -compose&")
def action_addressbook(indicator, id):
os.system("thunderbird -addressbook&")
def timeout(srv):
srv.add_action("Contacts", action_addressbook)
srv.add_action("Compose New Message", action_compose)
srv = MessagingServer('email', '/usr/share/applications/thunderbird.desktop')
srv.set_user_cb(user_display)
srv.set_server_cb(server_display)
fifopath = sys.argv[1]
#fifopath = "/tmp/thunderbird-indicator-fifo"
if not os.path.exists(fifopath):
os.mkfifo(fifopath)
if len(sys.argv) > 2 and sys.argv[2] == 'mkfifoonly':
exit()
fdr = os.open(fifopath, os.O_RDONLY | os.O_NONBLOCK)
fdw = os.open(fifopath, os.O_WRONLY | os.O_NONBLOCK)
f = os.fdopen(fdr)
glib.io_add_watch(f, glib.IO_IN | glib.IO_ERR, io_cb, srv)
gobject.timeout_add_seconds(0, timeout, srv)
gtk.main()
| gpl-3.0 |
ppanczyk/ansible | lib/ansible/module_utils/junos.py | 8 | 13183 | #
# (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import collections
from contextlib import contextmanager
from copy import deepcopy
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.netconf import send_request, children
from ansible.module_utils.netconf import discard_changes, validate
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
try:
from lxml.etree import Element, SubElement, fromstring, tostring
HAS_LXML = True
except ImportError:
from xml.etree.ElementTree import Element, SubElement, fromstring, tostring
HAS_LXML = False
ACTIONS = frozenset(['merge', 'override', 'replace', 'update', 'set'])
JSON_ACTIONS = frozenset(['merge', 'override', 'update'])
FORMATS = frozenset(['xml', 'text', 'json'])
CONFIG_FORMATS = frozenset(['xml', 'text', 'json', 'set'])
junos_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int'),
'transport': dict()
}
junos_argument_spec = {
'provider': dict(type='dict', options=junos_provider_spec),
}
junos_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'timeout': dict(removed_in_version=2.9, type='int'),
'transport': dict(removed_in_version=2.9)
}
junos_argument_spec.update(junos_top_spec)
def get_provider_argspec():
return junos_provider_spec
def check_args(module, warnings):
pass
def _validate_rollback_id(module, value):
try:
if not 0 <= int(value) <= 49:
raise ValueError
except ValueError:
module.fail_json(msg='rollback must be between 0 and 49')
def load_configuration(module, candidate=None, action='merge', rollback=None, format='xml'):
if all((candidate is None, rollback is None)):
module.fail_json(msg='one of candidate or rollback must be specified')
elif all((candidate is not None, rollback is not None)):
module.fail_json(msg='candidate and rollback are mutually exclusive')
if format not in FORMATS:
module.fail_json(msg='invalid format specified')
if format == 'json' and action not in JSON_ACTIONS:
module.fail_json(msg='invalid action for format json')
elif format in ('text', 'xml') and action not in ACTIONS:
module.fail_json(msg='invalid action format %s' % format)
if action == 'set' and not format == 'text':
module.fail_json(msg='format must be text when action is set')
if rollback is not None:
_validate_rollback_id(module, rollback)
xattrs = {'rollback': str(rollback)}
else:
xattrs = {'action': action, 'format': format}
obj = Element('load-configuration', xattrs)
if candidate is not None:
lookup = {'xml': 'configuration', 'text': 'configuration-text',
'set': 'configuration-set', 'json': 'configuration-json'}
if action == 'set':
cfg = SubElement(obj, 'configuration-set')
else:
cfg = SubElement(obj, lookup[format])
if isinstance(candidate, string_types):
if format == 'xml':
cfg.append(fromstring(candidate))
else:
cfg.text = to_text(candidate, encoding='latin-1')
else:
cfg.append(candidate)
return send_request(module, obj)
def get_configuration(module, compare=False, format='xml', rollback='0'):
if format not in CONFIG_FORMATS:
module.fail_json(msg='invalid config format specified')
xattrs = {'format': format}
if compare:
_validate_rollback_id(module, rollback)
xattrs['compare'] = 'rollback'
xattrs['rollback'] = str(rollback)
return send_request(module, Element('get-configuration', xattrs))
def commit_configuration(module, confirm=False, check=False, comment=None, confirm_timeout=None):
obj = Element('commit-configuration')
if confirm:
SubElement(obj, 'confirmed')
if check:
SubElement(obj, 'check')
if comment:
subele = SubElement(obj, 'log')
subele.text = str(comment)
if confirm_timeout:
subele = SubElement(obj, 'confirm-timeout')
subele.text = str(confirm_timeout)
return send_request(module, obj)
def command(module, command, format='text', rpc_only=False):
xattrs = {'format': format}
if rpc_only:
command += ' | display xml rpc'
xattrs['format'] = 'text'
return send_request(module, Element('command', xattrs, text=command))
def lock_configuration(x):
return send_request(x, Element('lock-configuration'))
def unlock_configuration(x):
return send_request(x, Element('unlock-configuration'))
@contextmanager
def locked_config(module):
try:
lock_configuration(module)
yield
finally:
unlock_configuration(module)
def get_diff(module):
reply = get_configuration(module, compare=True, format='text')
# if warning is received from device diff is empty.
if isinstance(reply, list):
return None
output = reply.find('.//configuration-output')
if output is not None:
return to_text(output.text, encoding='latin-1').strip()
def load_config(module, candidate, warnings, action='merge', format='xml'):
if not candidate:
return
if isinstance(candidate, list):
candidate = '\n'.join(candidate)
reply = load_configuration(module, candidate, action=action, format=format)
if isinstance(reply, list):
warnings.extend(reply)
validate(module)
return get_diff(module)
def get_param(module, key):
if module.params.get(key):
value = module.params[key]
elif module.params.get('provider'):
value = module.params['provider'].get(key)
else:
value = None
return value
def map_params_to_obj(module, param_to_xpath_map, param=None):
"""
Creates a new dictionary with key as xpath corresponding
to param and value is a list of dict with metadata and values for
the xpath.
Acceptable metadata keys:
'value': Value of param.
'tag_only': Value is indicated by tag only in xml hierarchy.
'leaf_only': If operation is to be added at leaf node only.
'value_req': If value(text) is requried for leaf node.
'is_key': If the field is key or not.
eg: Output
{
'name': [{'value': 'ge-0/0/1'}]
'disable': [{'value': True, tag_only': True}]
}
:param module:
:param param_to_xpath_map: Modules params to xpath map
:return: obj
"""
if not param:
param = module.params
obj = collections.OrderedDict()
for key, attribute in param_to_xpath_map.items():
if key in param:
is_attribute_dict = False
value = param[key]
if not isinstance(value, (list, tuple)):
value = [value]
if isinstance(attribute, dict):
xpath = attribute.get('xpath')
is_attribute_dict = True
else:
xpath = attribute
if not obj.get(xpath):
obj[xpath] = list()
for val in value:
if is_attribute_dict:
attr = deepcopy(attribute)
del attr['xpath']
attr.update({'value': val})
obj[xpath].append(attr)
else:
obj[xpath].append({'value': val})
return obj
def map_obj_to_ele(module, want, top, value_map=None, param=None):
if not HAS_LXML:
module.fail_json(msg='lxml is not installed.')
if not param:
param = module.params
root = Element('root')
top_ele = top.split('/')
ele = SubElement(root, top_ele[0])
if len(top_ele) > 1:
for item in top_ele[1:-1]:
ele = SubElement(ele, item)
container = ele
state = param.get('state')
active = param.get('active')
if active:
oper = 'active'
else:
oper = 'inactive'
# build xml subtree
if container.tag != top_ele[-1]:
node = SubElement(container, top_ele[-1])
else:
node = container
for fxpath, attributes in want.items():
for attr in attributes:
tag_only = attr.get('tag_only', False)
leaf_only = attr.get('leaf_only', False)
value_req = attr.get('value_req', False)
is_key = attr.get('is_key', False)
parent_attrib = attr.get('parent_attrib', True)
value = attr.get('value')
field_top = attr.get('top')
# operation 'delete' is added as element attribute
# only if it is key or leaf only node
if state == 'absent' and not (is_key or leaf_only):
continue
# convert param value to device specific value
if value_map and fxpath in value_map:
value = value_map[fxpath].get(value)
if (value is not None) or tag_only or leaf_only:
ele = node
if field_top:
# eg: top = 'system/syslog/file'
# field_top = 'system/syslog/file/contents'
# <file>
# <name>test</name>
# <contents>
# </contents>
# </file>
ele_list = root.xpath(top + '/' + field_top)
if not len(ele_list):
fields = field_top.split('/')
ele = node
for item in fields:
inner_ele = root.xpath(top + '/' + item)
if len(inner_ele):
ele = inner_ele[0]
else:
ele = SubElement(ele, item)
else:
ele = ele_list[0]
if value is not None and not isinstance(value, bool):
value = to_text(value, errors='surrogate_then_replace')
if fxpath:
tags = fxpath.split('/')
for item in tags:
ele = SubElement(ele, item)
if tag_only:
if state == 'present':
if not value:
# if value of tag_only node is false, delete the node
ele.set('delete', 'delete')
elif leaf_only:
if state == 'present':
ele.set(oper, oper)
ele.text = value
else:
ele.set('delete', 'delete')
# Add value of leaf node if required while deleting.
# in some cases if value is present while deleting, it
# can result in error, hence the check
if value_req:
ele.text = value
if is_key:
par = ele.getparent()
par.set('delete', 'delete')
else:
ele.text = value
par = ele.getparent()
if parent_attrib:
if state == 'present':
# set replace attribute at parent node
if not par.attrib.get('replace'):
par.set('replace', 'replace')
# set active/inactive at parent node
if not par.attrib.get(oper):
par.set(oper, oper)
else:
par.set('delete', 'delete')
return root.getchildren()[0]
def to_param_list(module):
aggregate = module.params.get('aggregate')
if aggregate:
if isinstance(aggregate, dict):
return [aggregate]
else:
return aggregate
else:
return [module.params]
| gpl-3.0 |
jimcunderwood/MissionPlanner | Lib/site-packages/numpy/f2py/tests/test_return_complex.py | 59 | 4480 | from numpy.testing import *
from numpy import array
import util
class TestReturnComplex(util.F2PyTest):
def check_function(self, t):
tname = t.__doc__.split()[0]
if tname in ['t0','t8','s0','s8']:
err = 1e-5
else:
err = 0.0
assert abs(t(234j)-234.0j)<=err
assert abs(t(234.6)-234.6)<=err
assert abs(t(234l)-234.0)<=err
assert abs(t(234.6+3j)-(234.6+3j))<=err
#assert abs(t('234')-234.)<=err
#assert abs(t('234.6')-234.6)<=err
assert abs(t(-234)+234.)<=err
assert abs(t([234])-234.)<=err
assert abs(t((234,))-234.)<=err
assert abs(t(array(234))-234.)<=err
assert abs(t(array(23+4j,'F'))-(23+4j))<=err
assert abs(t(array([234]))-234.)<=err
assert abs(t(array([[234]]))-234.)<=err
assert abs(t(array([234],'b'))+22.)<=err
assert abs(t(array([234],'h'))-234.)<=err
assert abs(t(array([234],'i'))-234.)<=err
assert abs(t(array([234],'l'))-234.)<=err
assert abs(t(array([234],'q'))-234.)<=err
assert abs(t(array([234],'f'))-234.)<=err
assert abs(t(array([234],'d'))-234.)<=err
assert abs(t(array([234+3j],'F'))-(234+3j))<=err
assert abs(t(array([234],'D'))-234.)<=err
#assert_raises(TypeError, t, array([234], 'a1'))
assert_raises(TypeError, t, 'abc')
assert_raises(IndexError, t, [])
assert_raises(IndexError, t, ())
assert_raises(TypeError, t, t)
assert_raises(TypeError, t, {})
try:
r = t(10l**400)
assert `r` in ['(inf+0j)','(Infinity+0j)'],`r`
except OverflowError:
pass
class TestF77ReturnComplex(TestReturnComplex):
code = """
function t0(value)
complex value
complex t0
t0 = value
end
function t8(value)
complex*8 value
complex*8 t8
t8 = value
end
function t16(value)
complex*16 value
complex*16 t16
t16 = value
end
function td(value)
double complex value
double complex td
td = value
end
subroutine s0(t0,value)
complex value
complex t0
cf2py intent(out) t0
t0 = value
end
subroutine s8(t8,value)
complex*8 value
complex*8 t8
cf2py intent(out) t8
t8 = value
end
subroutine s16(t16,value)
complex*16 value
complex*16 t16
cf2py intent(out) t16
t16 = value
end
subroutine sd(td,value)
double complex value
double complex td
cf2py intent(out) td
td = value
end
"""
@dec.slow
def test_all(self):
for name in "t0,t8,t16,td,s0,s8,s16,sd".split(","):
self.check_function(getattr(self.module, name))
class TestF90ReturnComplex(TestReturnComplex):
suffix = ".f90"
code = """
module f90_return_complex
contains
function t0(value)
complex :: value
complex :: t0
t0 = value
end function t0
function t8(value)
complex(kind=4) :: value
complex(kind=4) :: t8
t8 = value
end function t8
function t16(value)
complex(kind=8) :: value
complex(kind=8) :: t16
t16 = value
end function t16
function td(value)
double complex :: value
double complex :: td
td = value
end function td
subroutine s0(t0,value)
complex :: value
complex :: t0
!f2py intent(out) t0
t0 = value
end subroutine s0
subroutine s8(t8,value)
complex(kind=4) :: value
complex(kind=4) :: t8
!f2py intent(out) t8
t8 = value
end subroutine s8
subroutine s16(t16,value)
complex(kind=8) :: value
complex(kind=8) :: t16
!f2py intent(out) t16
t16 = value
end subroutine s16
subroutine sd(td,value)
double complex :: value
double complex :: td
!f2py intent(out) td
td = value
end subroutine sd
end module f90_return_complex
"""
@dec.slow
def test_all(self):
for name in "t0,t8,t16,td,s0,s8,s16,sd".split(","):
self.check_function(getattr(self.module.f90_return_complex, name))
if __name__ == "__main__":
import nose
nose.runmodule()
| gpl-3.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/pywin32-219/com/win32com/makegw/makegwenum.py | 46 | 9750 | """Utility file for generating PyIEnum support.
This is almost a 'template' file. It simplay contains almost full
C++ source code for PyIEnum* support, and the Python code simply
substitutes the appropriate interface name.
This module is notmally not used directly - the @makegw@ module
automatically calls this.
"""
#
# INTERNAL FUNCTIONS
#
#
import string
def is_interface_enum(enumtype):
return not (enumtype[0] in string.uppercase and enumtype[2] in string.uppercase)
def _write_enumifc_cpp(f, interface):
enumtype = interface.name[5:]
if is_interface_enum(enumtype):
# Assume an interface.
enum_interface = "I" + enumtype[:-1]
converter = "PyObject *ob = PyCom_PyObjectFromIUnknown(rgVar[i], IID_%(enum_interface)s, FALSE);" % locals()
arraydeclare = "%(enum_interface)s **rgVar = new %(enum_interface)s *[celt];" % locals()
else:
# Enum of a simple structure
converter = "PyObject *ob = PyCom_PyObjectFrom%(enumtype)s(&rgVar[i]);" % locals()
arraydeclare = "%(enumtype)s *rgVar = new %(enumtype)s[celt];" % locals()
f.write(\
'''
// ---------------------------------------------------
//
// Interface Implementation
PyIEnum%(enumtype)s::PyIEnum%(enumtype)s(IUnknown *pdisp):
PyIUnknown(pdisp)
{
ob_type = &type;
}
PyIEnum%(enumtype)s::~PyIEnum%(enumtype)s()
{
}
/* static */ IEnum%(enumtype)s *PyIEnum%(enumtype)s::GetI(PyObject *self)
{
return (IEnum%(enumtype)s *)PyIUnknown::GetI(self);
}
// @pymethod object|PyIEnum%(enumtype)s|Next|Retrieves a specified number of items in the enumeration sequence.
PyObject *PyIEnum%(enumtype)s::Next(PyObject *self, PyObject *args)
{
long celt = 1;
// @pyparm int|num|1|Number of items to retrieve.
if ( !PyArg_ParseTuple(args, "|l:Next", &celt) )
return NULL;
IEnum%(enumtype)s *pIE%(enumtype)s = GetI(self);
if ( pIE%(enumtype)s == NULL )
return NULL;
%(arraydeclare)s
if ( rgVar == NULL ) {
PyErr_SetString(PyExc_MemoryError, "allocating result %(enumtype)ss");
return NULL;
}
int i;
/* for ( i = celt; i--; )
// *** possibly init each structure element???
*/
ULONG celtFetched = 0;
PY_INTERFACE_PRECALL;
HRESULT hr = pIE%(enumtype)s->Next(celt, rgVar, &celtFetched);
PY_INTERFACE_POSTCALL;
if ( HRESULT_CODE(hr) != ERROR_NO_MORE_ITEMS && FAILED(hr) )
{
delete [] rgVar;
return PyCom_BuildPyException(hr,pIE%(enumtype)s, IID_IE%(enumtype)s);
}
PyObject *result = PyTuple_New(celtFetched);
if ( result != NULL )
{
for ( i = celtFetched; i--; )
{
%(converter)s
if ( ob == NULL )
{
Py_DECREF(result);
result = NULL;
break;
}
PyTuple_SET_ITEM(result, i, ob);
}
}
/* for ( i = celtFetched; i--; )
// *** possibly cleanup each structure element???
*/
delete [] rgVar;
return result;
}
// @pymethod |PyIEnum%(enumtype)s|Skip|Skips over the next specified elementes.
PyObject *PyIEnum%(enumtype)s::Skip(PyObject *self, PyObject *args)
{
long celt;
if ( !PyArg_ParseTuple(args, "l:Skip", &celt) )
return NULL;
IEnum%(enumtype)s *pIE%(enumtype)s = GetI(self);
if ( pIE%(enumtype)s == NULL )
return NULL;
PY_INTERFACE_PRECALL;
HRESULT hr = pIE%(enumtype)s->Skip(celt);
PY_INTERFACE_POSTCALL;
if ( FAILED(hr) )
return PyCom_BuildPyException(hr, pIE%(enumtype)s, IID_IE%(enumtype)s);
Py_INCREF(Py_None);
return Py_None;
}
// @pymethod |PyIEnum%(enumtype)s|Reset|Resets the enumeration sequence to the beginning.
PyObject *PyIEnum%(enumtype)s::Reset(PyObject *self, PyObject *args)
{
if ( !PyArg_ParseTuple(args, ":Reset") )
return NULL;
IEnum%(enumtype)s *pIE%(enumtype)s = GetI(self);
if ( pIE%(enumtype)s == NULL )
return NULL;
PY_INTERFACE_PRECALL;
HRESULT hr = pIE%(enumtype)s->Reset();
PY_INTERFACE_POSTCALL;
if ( FAILED(hr) )
return PyCom_BuildPyException(hr, pIE%(enumtype)s, IID_IE%(enumtype)s);
Py_INCREF(Py_None);
return Py_None;
}
// @pymethod <o PyIEnum%(enumtype)s>|PyIEnum%(enumtype)s|Clone|Creates another enumerator that contains the same enumeration state as the current one
PyObject *PyIEnum%(enumtype)s::Clone(PyObject *self, PyObject *args)
{
if ( !PyArg_ParseTuple(args, ":Clone") )
return NULL;
IEnum%(enumtype)s *pIE%(enumtype)s = GetI(self);
if ( pIE%(enumtype)s == NULL )
return NULL;
IEnum%(enumtype)s *pClone;
PY_INTERFACE_PRECALL;
HRESULT hr = pIE%(enumtype)s->Clone(&pClone);
PY_INTERFACE_POSTCALL;
if ( FAILED(hr) )
return PyCom_BuildPyException(hr, pIE%(enumtype)s, IID_IE%(enumtype)s);
return PyCom_PyObjectFromIUnknown(pClone, IID_IEnum%(enumtype)s, FALSE);
}
// @object PyIEnum%(enumtype)s|A Python interface to IEnum%(enumtype)s
static struct PyMethodDef PyIEnum%(enumtype)s_methods[] =
{
{ "Next", PyIEnum%(enumtype)s::Next, 1 }, // @pymeth Next|Retrieves a specified number of items in the enumeration sequence.
{ "Skip", PyIEnum%(enumtype)s::Skip, 1 }, // @pymeth Skip|Skips over the next specified elementes.
{ "Reset", PyIEnum%(enumtype)s::Reset, 1 }, // @pymeth Reset|Resets the enumeration sequence to the beginning.
{ "Clone", PyIEnum%(enumtype)s::Clone, 1 }, // @pymeth Clone|Creates another enumerator that contains the same enumeration state as the current one.
{ NULL }
};
PyComEnumTypeObject PyIEnum%(enumtype)s::type("PyIEnum%(enumtype)s",
&PyIUnknown::type,
sizeof(PyIEnum%(enumtype)s),
PyIEnum%(enumtype)s_methods,
GET_PYCOM_CTOR(PyIEnum%(enumtype)s));
''' % locals() )
def _write_enumgw_cpp(f, interface):
enumtype = interface.name[5:]
if is_interface_enum(enumtype):
# Assume an interface.
enum_interface = "I" + enumtype[:-1]
converter = "if ( !PyCom_InterfaceFromPyObject(ob, IID_%(enum_interface)s, (void **)&rgVar[i], FALSE) )" % locals()
argdeclare="%(enum_interface)s __RPC_FAR * __RPC_FAR *rgVar" % locals()
else:
argdeclare="%(enumtype)s __RPC_FAR *rgVar" % locals()
converter="if ( !PyCom_PyObjectAs%(enumtype)s(ob, &rgVar[i]) )" % locals()
f.write(
'''
// ---------------------------------------------------
//
// Gateway Implementation
// Std delegation
STDMETHODIMP_(ULONG) PyGEnum%(enumtype)s::AddRef(void) {return PyGatewayBase::AddRef();}
STDMETHODIMP_(ULONG) PyGEnum%(enumtype)s::Release(void) {return PyGatewayBase::Release();}
STDMETHODIMP PyGEnum%(enumtype)s::QueryInterface(REFIID iid, void ** obj) {return PyGatewayBase::QueryInterface(iid, obj);}
STDMETHODIMP PyGEnum%(enumtype)s::GetTypeInfoCount(UINT FAR* pctInfo) {return PyGatewayBase::GetTypeInfoCount(pctInfo);}
STDMETHODIMP PyGEnum%(enumtype)s::GetTypeInfo(UINT itinfo, LCID lcid, ITypeInfo FAR* FAR* pptInfo) {return PyGatewayBase::GetTypeInfo(itinfo, lcid, pptInfo);}
STDMETHODIMP PyGEnum%(enumtype)s::GetIDsOfNames(REFIID refiid, OLECHAR FAR* FAR* rgszNames, UINT cNames, LCID lcid, DISPID FAR* rgdispid) {return PyGatewayBase::GetIDsOfNames( refiid, rgszNames, cNames, lcid, rgdispid);}
STDMETHODIMP PyGEnum%(enumtype)s::Invoke(DISPID dispid, REFIID riid, LCID lcid, WORD wFlags, DISPPARAMS FAR* params, VARIANT FAR* pVarResult, EXCEPINFO FAR* pexcepinfo, UINT FAR* puArgErr) {return PyGatewayBase::Invoke( dispid, riid, lcid, wFlags, params, pVarResult, pexcepinfo, puArgErr);}
STDMETHODIMP PyGEnum%(enumtype)s::Next(
/* [in] */ ULONG celt,
/* [length_is][size_is][out] */ %(argdeclare)s,
/* [out] */ ULONG __RPC_FAR *pCeltFetched)
{
PY_GATEWAY_METHOD;
PyObject *result;
HRESULT hr = InvokeViaPolicy("Next", &result, "i", celt);
if ( FAILED(hr) )
return hr;
if ( !PySequence_Check(result) )
goto error;
int len;
len = PyObject_Length(result);
if ( len == -1 )
goto error;
if ( len > (int)celt)
len = celt;
if ( pCeltFetched )
*pCeltFetched = len;
int i;
for ( i = 0; i < len; ++i )
{
PyObject *ob = PySequence_GetItem(result, i);
if ( ob == NULL )
goto error;
%(converter)s
{
Py_DECREF(result);
return PyCom_SetCOMErrorFromPyException(IID_IEnum%(enumtype)s);
}
}
Py_DECREF(result);
return len < (int)celt ? S_FALSE : S_OK;
error:
PyErr_Clear(); // just in case
Py_DECREF(result);
return PyCom_SetCOMErrorFromSimple(E_FAIL, IID_IEnum%(enumtype)s, "Next() did not return a sequence of objects");
}
STDMETHODIMP PyGEnum%(enumtype)s::Skip(
/* [in] */ ULONG celt)
{
PY_GATEWAY_METHOD;
return InvokeViaPolicy("Skip", NULL, "i", celt);
}
STDMETHODIMP PyGEnum%(enumtype)s::Reset(void)
{
PY_GATEWAY_METHOD;
return InvokeViaPolicy("Reset");
}
STDMETHODIMP PyGEnum%(enumtype)s::Clone(
/* [out] */ IEnum%(enumtype)s __RPC_FAR *__RPC_FAR *ppEnum)
{
PY_GATEWAY_METHOD;
PyObject * result;
HRESULT hr = InvokeViaPolicy("Clone", &result);
if ( FAILED(hr) )
return hr;
/*
** Make sure we have the right kind of object: we should have some kind
** of IUnknown subclass wrapped into a PyIUnknown instance.
*/
if ( !PyIBase::is_object(result, &PyIUnknown::type) )
{
/* the wrong kind of object was returned to us */
Py_DECREF(result);
return PyCom_SetCOMErrorFromSimple(E_FAIL, IID_IEnum%(enumtype)s);
}
/*
** Get the IUnknown out of the thing. note that the Python ob maintains
** a reference, so we don't have to explicitly AddRef() here.
*/
IUnknown *punk = ((PyIUnknown *)result)->m_obj;
if ( !punk )
{
/* damn. the object was released. */
Py_DECREF(result);
return PyCom_SetCOMErrorFromSimple(E_FAIL, IID_IEnum%(enumtype)s);
}
/*
** Get the interface we want. note it is returned with a refcount.
** This QI is actually going to instantiate a PyGEnum%(enumtype)s.
*/
hr = punk->QueryInterface(IID_IEnum%(enumtype)s, (LPVOID *)ppEnum);
/* done with the result; this DECREF is also for <punk> */
Py_DECREF(result);
return PyCom_SetCOMErrorFromSimple(hr, IID_IEnum%(enumtype)s, "Python could not convert the result from Next() into the required COM interface");
}
''' % locals())
| mit |
YongMan/Xen-4.3.1 | tools/xm-test/tests/restore/01_restore_basic_pos.py | 42 | 2022 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: Dan Smith <danms@us.ibm.com>
# Save a domain and attempt to restore it
#
# Since we don't want to depend on the fact that save/01_basic_pos.py
# ran successfully, we try to save the domain here again
import time
from XmTestLib import *
if ENABLE_HVM_SUPPORT:
SKIP("Restore currently not supported for HVM domains")
domain = XmTestDomain()
try:
console = domain.start()
except DomainError, e:
if verbose:
print "Failed to create test domain because:"
print e.extra
FAIL(str(e))
# Make sure the domain isn't DOA
try:
console.runCmd("foo=bar")
except ConsoleError, e:
FAIL(str(e))
domain.closeConsole()
# Save it out
try:
s, o = traceCommand("xm save %s /tmp/test.state" % domain.getName(),
timeout=30)
except TimeoutError, e:
FAIL(str(e))
if s != 0:
FAIL("save command exited %i != 0" % s)
# FIXME: Give the system some time to update the internal state
traceCommand("xm list")
# Make sure it's gone
if isDomainRunning(domain.getName()):
FAIL("Domain still running after save!")
# Let things settle
time.sleep(2)
# Restore it in
status, output = traceCommand("xm restore /tmp/test.state",
timeout=30)
if s != 0:
FAIL("restore command exited %i != 0" % s)
# Make sure it's running
if not isDomainRunning(domain.getName()):
FAIL("Restore didn't result in a running %s domain!" % domain.getName())
# Make sure it's alive
try:
newConsole = domain.getConsole()
# Enable debug dumping because this generates a Oops on x86_64
newConsole.debugMe = True
newConsole.sendInput("ls")
run = newConsole.runCmd("echo xx$foo")
if not re.search("bar", run["output"]):
FAIL("Restored domain has been reset")
except ConsoleError, e:
FAIL("Restored domain is dead (%s)" % str(e))
domain.closeConsole()
# This only works because the domain
# still has the same name
domain.stop()
| gpl-2.0 |
thomasrstorey/recipesfordisaster | actions/reheat.py | 2 | 4903 | '''
add.py
Takes a list of input ingredient names. Imports each if not already present.
If already present, duplicates and rotates the ingredient.
Thomas Storey
2016
'''
import sys
import argparse
import bpy
import numpy as np
import os
import bmesh
from math import *
from mathutils import *
import random
def getObjectsBySubstring(objname):
copies = []
for obj in bpy.data.objects:
if(objname in obj.name):
copies.append(obj)
return copies
def deleteObject(obj):
bpy.context.scene.objects.unlink(obj)
obj.user_clear()
bpy.data.objects.remove(obj)
def getObject(objdir, objname):
if (bpy.data.objects.get(objname) == None):
objpath = os.path.join(objdir, objname+".obj")
bpy.ops.import_scene.obj(filepath=objpath,
axis_forward='Y',axis_up='Z')
return bpy.data.objects[objname]
def setOriginToGeometry(scn, obj):
obj.select = True;
scn.objects.active = obj
bpy.ops.object.origin_set(type="ORIGIN_GEOMETRY")
obj.select = False;
def joinObjects(scn, objs, name):
bpy.ops.object.select_all(action='DESELECT')
for obj in objs:
obj.select = True
activeobj = objs[0]
scn.objects.active = activeobj
bpy.ops.object.join()
activeobj.name = name
activeobj.data.name = name
return activeobj
def bakeObject(scn, obj):
bpy.ops.object.select_all(action='DESELECT')
obj.select = True
scn.objects.active = obj
mat = obj.material_slots[0].material
bpy.ops.texture.new()
tex = bpy.data.textures["Texture"]
tex.name = "fry"
tex_slot = mat.texture_slots.add()
tex_slot.texture = tex
bpy.ops.image.new()
img = bpy.data.images["Untitled"]
img.name = "fry_img"
tex.image = img
img.generated_color = (1.0, 0.3, 0.3, 1.00)
tex_slot.blend_type = 'SOFT_LIGHT'
bpy.ops.object.modifier_add(type="DISPLACE")
mod = obj.modifiers["Displace"]
mod.direction = "RGB_TO_XYZ"
mod.strength = 0.2
batterTex = bpy.data.textures.new(name="batter",type="CLOUDS")
batterTex.type_recast()
mod.texture = batterTex
batterTex.cloud_type = "COLOR"
batterTex.noise_scale = 0.1 + random.random()*0.2
batterTex.noise_depth = 2
batterTex.intensity = 1.0
batterTex.contrast = 0.4
batterTex.saturation = 1.4
# bpy.ops.texture.new()
# baked_tex = bpy.data.textures["Texture"]
# baked_tex.name = "baked"
# baked_tex_slot = mat.texture_slots.create(2)
# baked_tex_slot.texture = baked_tex
# bpy.ops.image.new()
# baked_img = bpy.data.images["Untitled"]
# baked_img.name = "baked_img"
# mat.active_texture_index = 2
# mat.active_texture = baked_tex
#
# bpy.ops.object.mode_set(mode="EDIT")
# bpy.data.scenes["Scene"].render.bake_type = "TEXTURE"
# for area in bpy.context.screen.areas :
# if area.type == 'IMAGE_EDITOR' :
# area.spaces.active.image = baked_img
# bpy.ops.object.bake_image()
# mat.texture_slots[0].texture.image = baked_img
def removeMod(scn, obj):
bpy.ops.object.select_all(action='DESELECT')
obj.select = True
scn.objects.active = obj
bpy.ops.object.convert(target='MESH')
obj.select = False
def execute(inputs, output):
ctx = bpy.context
scn = ctx.scene
cwd = os.getcwd()
objdir = os.path.join(cwd, 'objs')
for objname in inputs:
# import file, or get it if it's already here
obj = getObject(objdir, objname)
obj.location = Vector([0,0,0])
bakeObject(scn, obj)
removeMod(scn, obj)
# save out .blend
if not output == None:
bpy.ops.wm.save_as_mainfile(filepath=output,
check_existing=False,relative_remap=True)
else:
bpy.ops.wm.save_mainfile(check_existing=False,relative_remap=True)
def main():
argv = sys.argv
if "--" not in argv:
argv = []
else:
argv = argv[argv.index("--") + 1:]
usage_text =\
"Usage: blender -b [.blend file] --python " + __file__ + " -- [options]"
parser = argparse.ArgumentParser(description=usage_text)
parser.add_argument("-i", "--input", dest="input", type=str, required=True,
help="Comma delimited list of .objs to import. Exclude the file extension.")
parser.add_argument("-o", "--output", dest="output", type=str, required=False,
help="Name of blend file to save to, if not the same as the one being opened.")
args = parser.parse_args(argv)
output = ""
if not argv:
parser.print_help()
return
if not args.input:
print("input argument not given. aborting.")
parser.print_help()
return
if not args.output:
output = None
else:
output = args.output+".blend"
inputs = args.input.split(",")
execute(inputs, output)
print("battered " + ", ".join(inputs))
if __name__ == "__main__":
main()
| mit |
hxxft/lynx-native | Core/third_party/jsoncpp/scons-tools/substinfile.py | 3 | 3511 | # Copyright 2010 The JsonCpp Authors
# Distributed under MIT license, or public domain if desired and
# recognized in your jurisdiction.
# See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
import re
from SCons.Script import * # the usual scons stuff you get in a SConscript
import collections
def generate(env):
"""
Add builders and construction variables for the
SubstInFile tool.
Adds SubstInFile builder, which substitutes the keys->values of SUBST_DICT
from the source to the target.
The values of SUBST_DICT first have any construction variables expanded
(its keys are not expanded).
If a value of SUBST_DICT is a python callable function, it is called and
the result is expanded as the value.
If there's more than one source and more than one target, each target gets
substituted from the corresponding source.
"""
def do_subst_in_file(targetfile, sourcefile, dict):
"""Replace all instances of the keys of dict with their values.
For example, if dict is {'%VERSION%': '1.2345', '%BASE%': 'MyProg'},
then all instances of %VERSION% in the file will be replaced with 1.2345 etc.
"""
try:
f = open(sourcefile, 'rb')
contents = f.read()
f.close()
except:
raise SCons.Errors.UserError("Can't read source file %s"%sourcefile)
for (k,v) in list(dict.items()):
contents = re.sub(k, v, contents)
try:
f = open(targetfile, 'wb')
f.write(contents)
f.close()
except:
raise SCons.Errors.UserError("Can't write target file %s"%targetfile)
return 0 # success
def subst_in_file(target, source, env):
if 'SUBST_DICT' not in env:
raise SCons.Errors.UserError("SubstInFile requires SUBST_DICT to be set.")
d = dict(env['SUBST_DICT']) # copy it
for (k,v) in list(d.items()):
if isinstance(v, collections.Callable):
d[k] = env.subst(v()).replace('\\','\\\\')
elif SCons.Util.is_String(v):
d[k] = env.subst(v).replace('\\','\\\\')
else:
raise SCons.Errors.UserError("SubstInFile: key %s: %s must be a string or callable"%(k, repr(v)))
for (t,s) in zip(target, source):
return do_subst_in_file(str(t), str(s), d)
def subst_in_file_string(target, source, env):
"""This is what gets printed on the console."""
return '\n'.join(['Substituting vars from %s into %s'%(str(s), str(t))
for (t,s) in zip(target, source)])
def subst_emitter(target, source, env):
"""Add dependency from substituted SUBST_DICT to target.
Returns original target, source tuple unchanged.
"""
d = env['SUBST_DICT'].copy() # copy it
for (k,v) in list(d.items()):
if isinstance(v, collections.Callable):
d[k] = env.subst(v())
elif SCons.Util.is_String(v):
d[k]=env.subst(v)
Depends(target, SCons.Node.Python.Value(d))
return target, source
## env.Append(TOOLS = 'substinfile') # this should be automaticaly done by Scons ?!?
subst_action = SCons.Action.Action(subst_in_file, subst_in_file_string)
env['BUILDERS']['SubstInFile'] = Builder(action=subst_action, emitter=subst_emitter)
def exists(env):
"""
Make sure tool exists.
"""
return True
| mit |
luhanhan/horizon | horizon/test/customization/cust_test1.py | 67 | 1146 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import horizon
from horizon import base
# Rename "cats" to "wildcats", ignore if panel doesn't exist
try:
cats = horizon.get_dashboard("cats")
cats.name = "WildCats"
except base.NotRegistered:
cats = None
# Disable tigers panel, ignore if panel doesn't exist
if cats:
try:
tigers = cats.get_panel("tigers")
cats.unregister(tigers.__class__)
except base.NotRegistered:
pass
# Remove dogs dashboard, ignore if dashboard doesn't exist
try:
dogs = horizon.get_dashboard("dogs")
horizon.unregister(dogs.__class__)
except base.NotRegistered:
pass
| apache-2.0 |
shawnwanderson/cmput404-project | venv/lib/python2.7/site-packages/django/core/checks/security/sessions.py | 477 | 2595 | from django.conf import settings
from .. import Tags, Warning, register
def add_session_cookie_message(message):
return message + (
" Using a secure-only session cookie makes it more difficult for "
"network traffic sniffers to hijack user sessions."
)
W010 = Warning(
add_session_cookie_message(
"You have 'django.contrib.sessions' in your INSTALLED_APPS, "
"but you have not set SESSION_COOKIE_SECURE to True."
),
id='security.W010',
)
W011 = Warning(
add_session_cookie_message(
"You have 'django.contrib.sessions.middleware.SessionMiddleware' "
"in your MIDDLEWARE_CLASSES, but you have not set "
"SESSION_COOKIE_SECURE to True."
),
id='security.W011',
)
W012 = Warning(
add_session_cookie_message("SESSION_COOKIE_SECURE is not set to True."),
id='security.W012',
)
def add_httponly_message(message):
return message + (
" Using an HttpOnly session cookie makes it more difficult for "
"cross-site scripting attacks to hijack user sessions."
)
W013 = Warning(
add_httponly_message(
"You have 'django.contrib.sessions' in your INSTALLED_APPS, "
"but you have not set SESSION_COOKIE_HTTPONLY to True.",
),
id='security.W013',
)
W014 = Warning(
add_httponly_message(
"You have 'django.contrib.sessions.middleware.SessionMiddleware' "
"in your MIDDLEWARE_CLASSES, but you have not set "
"SESSION_COOKIE_HTTPONLY to True."
),
id='security.W014',
)
W015 = Warning(
add_httponly_message("SESSION_COOKIE_HTTPONLY is not set to True."),
id='security.W015',
)
@register(Tags.security, deploy=True)
def check_session_cookie_secure(app_configs, **kwargs):
errors = []
if not settings.SESSION_COOKIE_SECURE:
if _session_app():
errors.append(W010)
if _session_middleware():
errors.append(W011)
if len(errors) > 1:
errors = [W012]
return errors
@register(Tags.security, deploy=True)
def check_session_cookie_httponly(app_configs, **kwargs):
errors = []
if not settings.SESSION_COOKIE_HTTPONLY:
if _session_app():
errors.append(W013)
if _session_middleware():
errors.append(W014)
if len(errors) > 1:
errors = [W015]
return errors
def _session_middleware():
return ("django.contrib.sessions.middleware.SessionMiddleware" in
settings.MIDDLEWARE_CLASSES)
def _session_app():
return "django.contrib.sessions" in settings.INSTALLED_APPS
| gpl-3.0 |
joelthompson/ansible-modules-core | network/openswitch/ops_command.py | 16 | 4786 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: ops_command
version_added: "2.1"
author: "Peter sprygada (@privateip)"
short_description: Run arbitrary commands on OpenSwitch devices.
description:
- Sends arbitrary commands to an OpenSwitch node and returns the results
read from the device. The M(ops_command) module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
extends_documentation_fragment: openswitch
options:
commands:
description:
- List of commands to send to the remote ops device over the
configured provider. The resulting output from the command
is returned. If the I(waitfor) argument is provided, the
module is not returned until the condition is satisfied or
the number of retires as expired.
required: true
waitfor:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for a each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
waitfor conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
- ops_command:
commands:
- show version
register: output
- ops_command:
commands:
- show version
waitfor:
- "result[0] contains OpenSwitch"
- ops_command:
commands:
- show version
- show interfaces
"""
RETURN = """
stdout:
description: the set of responses from the commands
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: the conditionals that failed
retured: failed
type: list
sample: ['...', '...']
"""
import time
def to_lines(stdout):
for item in stdout:
if isinstance(item, basestring):
item = str(item).split('\n')
yield item
def main():
spec = dict(
commands=dict(type='list'),
waitfor=dict(type='list'),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int'),
transport=dict(default='cli', choices=['cli'])
)
module = get_module(argument_spec=spec,
supports_check_mode=True)
commands = module.params['commands']
retries = module.params['retries']
interval = module.params['interval']
try:
queue = set()
for entry in (module.params['waitfor'] or list()):
queue.add(Conditional(entry))
except AttributeError, exc:
module.fail_json(msg=exc.message)
result = dict(changed=False)
while retries > 0:
response = module.execute(commands)
result['stdout'] = response
for item in list(queue):
if item(response):
queue.remove(item)
if not queue:
break
time.sleep(interval)
retries -= 1
else:
failed_conditions = [item.raw for item in queue]
module.fail_json(msg='timeout waiting for value', failed_conditions=failed_conditions)
result['stdout_lines'] = list(to_lines(result['stdout']))
return module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.shell import *
from ansible.module_utils.netcfg import *
from ansible.module_utils.openswitch import *
if __name__ == '__main__':
main()
| gpl-3.0 |
maxdeliso/elevatorSim | Lib/test/test_traceback.py | 34 | 13164 | """Test cases for traceback module"""
from _testcapi import traceback_print, exception_print
from io import StringIO
import sys
import unittest
import re
from test.support import run_unittest, Error, captured_output
from test.support import TESTFN, unlink
import traceback
class SyntaxTracebackCases(unittest.TestCase):
# For now, a very minimal set of tests. I want to be sure that
# formatting of SyntaxErrors works based on changes for 2.1.
def get_exception_format(self, func, exc):
try:
func()
except exc as value:
return traceback.format_exception_only(exc, value)
else:
raise ValueError("call did not raise exception")
def syntax_error_with_caret(self):
compile("def fact(x):\n\treturn x!\n", "?", "exec")
def syntax_error_with_caret_2(self):
compile("1 +\n", "?", "exec")
def syntax_error_bad_indentation(self):
compile("def spam():\n print(1)\n print(2)", "?", "exec")
def test_caret(self):
err = self.get_exception_format(self.syntax_error_with_caret,
SyntaxError)
self.assertEqual(len(err), 4)
self.assertTrue(err[1].strip() == "return x!")
self.assertIn("^", err[2]) # third line has caret
self.assertEqual(err[1].find("!"), err[2].find("^")) # in the right place
err = self.get_exception_format(self.syntax_error_with_caret_2,
SyntaxError)
self.assertIn("^", err[2]) # third line has caret
self.assertTrue(err[2].count('\n') == 1) # and no additional newline
self.assertTrue(err[1].find("+") == err[2].find("^")) # in the right place
def test_nocaret(self):
exc = SyntaxError("error", ("x.py", 23, None, "bad syntax"))
err = traceback.format_exception_only(SyntaxError, exc)
self.assertEqual(len(err), 3)
self.assertEqual(err[1].strip(), "bad syntax")
def test_bad_indentation(self):
err = self.get_exception_format(self.syntax_error_bad_indentation,
IndentationError)
self.assertEqual(len(err), 4)
self.assertEqual(err[1].strip(), "print(2)")
self.assertIn("^", err[2])
self.assertEqual(err[1].find(")"), err[2].find("^"))
def test_base_exception(self):
# Test that exceptions derived from BaseException are formatted right
e = KeyboardInterrupt()
lst = traceback.format_exception_only(e.__class__, e)
self.assertEqual(lst, ['KeyboardInterrupt\n'])
def test_format_exception_only_bad__str__(self):
class X(Exception):
def __str__(self):
1/0
err = traceback.format_exception_only(X, X())
self.assertEqual(len(err), 1)
str_value = '<unprintable %s object>' % X.__name__
if X.__module__ in ('__main__', 'builtins'):
str_name = X.__name__
else:
str_name = '.'.join([X.__module__, X.__name__])
self.assertEqual(err[0], "%s: %s\n" % (str_name, str_value))
def test_without_exception(self):
err = traceback.format_exception_only(None, None)
self.assertEqual(err, ['None\n'])
def test_encoded_file(self):
# Test that tracebacks are correctly printed for encoded source files:
# - correct line number (Issue2384)
# - respect file encoding (Issue3975)
import tempfile, sys, subprocess, os
# The spawned subprocess has its stdout redirected to a PIPE, and its
# encoding may be different from the current interpreter, on Windows
# at least.
process = subprocess.Popen([sys.executable, "-c",
"import sys; print(sys.stdout.encoding)"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
output_encoding = str(stdout, 'ascii').splitlines()[0]
def do_test(firstlines, message, charset, lineno):
# Raise the message in a subprocess, and catch the output
try:
output = open(TESTFN, "w", encoding=charset)
output.write("""{0}if 1:
import traceback;
raise RuntimeError('{1}')
""".format(firstlines, message))
output.close()
process = subprocess.Popen([sys.executable, TESTFN],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
stdout = stdout.decode(output_encoding).splitlines()
finally:
unlink(TESTFN)
# The source lines are encoded with the 'backslashreplace' handler
encoded_message = message.encode(output_encoding,
'backslashreplace')
# and we just decoded them with the output_encoding.
message_ascii = encoded_message.decode(output_encoding)
err_line = "raise RuntimeError('{0}')".format(message_ascii)
err_msg = "RuntimeError: {0}".format(message_ascii)
self.assertIn(("line %s" % lineno), stdout[1],
"Invalid line number: {0!r} instead of {1}".format(
stdout[1], lineno))
self.assertTrue(stdout[2].endswith(err_line),
"Invalid traceback line: {0!r} instead of {1!r}".format(
stdout[2], err_line))
self.assertTrue(stdout[3] == err_msg,
"Invalid error message: {0!r} instead of {1!r}".format(
stdout[3], err_msg))
do_test("", "foo", "ascii", 3)
for charset in ("ascii", "iso-8859-1", "utf-8", "GBK"):
if charset == "ascii":
text = "foo"
elif charset == "GBK":
text = "\u4E02\u5100"
else:
text = "h\xe9 ho"
do_test("# coding: {0}\n".format(charset),
text, charset, 4)
do_test("#!shebang\n# coding: {0}\n".format(charset),
text, charset, 5)
class TracebackFormatTests(unittest.TestCase):
def test_traceback_format(self):
try:
raise KeyError('blah')
except KeyError:
type_, value, tb = sys.exc_info()
traceback_fmt = 'Traceback (most recent call last):\n' + \
''.join(traceback.format_tb(tb))
file_ = StringIO()
traceback_print(tb, file_)
python_fmt = file_.getvalue()
else:
raise Error("unable to create test traceback string")
# Make sure that Python and the traceback module format the same thing
self.assertEqual(traceback_fmt, python_fmt)
# Make sure that the traceback is properly indented.
tb_lines = python_fmt.splitlines()
self.assertEqual(len(tb_lines), 3)
banner, location, source_line = tb_lines
self.assertTrue(banner.startswith('Traceback'))
self.assertTrue(location.startswith(' File'))
self.assertTrue(source_line.startswith(' raise'))
cause_message = (
"\nThe above exception was the direct cause "
"of the following exception:\n\n")
context_message = (
"\nDuring handling of the above exception, "
"another exception occurred:\n\n")
boundaries = re.compile(
'(%s|%s)' % (re.escape(cause_message), re.escape(context_message)))
class BaseExceptionReportingTests:
def get_exception(self, exception_or_callable):
if isinstance(exception_or_callable, Exception):
return exception_or_callable
try:
exception_or_callable()
except Exception as e:
return e
def zero_div(self):
1/0 # In zero_div
def check_zero_div(self, msg):
lines = msg.splitlines()
self.assertTrue(lines[-3].startswith(' File'))
self.assertIn('1/0 # In zero_div', lines[-2])
self.assertTrue(lines[-1].startswith('ZeroDivisionError'), lines[-1])
def test_simple(self):
try:
1/0 # Marker
except ZeroDivisionError as _:
e = _
lines = self.get_report(e).splitlines()
self.assertEqual(len(lines), 4)
self.assertTrue(lines[0].startswith('Traceback'))
self.assertTrue(lines[1].startswith(' File'))
self.assertIn('1/0 # Marker', lines[2])
self.assertTrue(lines[3].startswith('ZeroDivisionError'))
def test_cause(self):
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError as e:
raise KeyError from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_context(self):
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError:
raise KeyError
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], context_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_context_suppression(self):
try:
try:
raise Exception
except:
raise ZeroDivisionError from None
except ZeroDivisionError as _:
e = _
lines = self.get_report(e).splitlines()
self.assertEqual(len(lines), 4)
self.assertTrue(lines[0].startswith('Traceback'))
self.assertTrue(lines[1].startswith(' File'))
self.assertIn('ZeroDivisionError from None', lines[2])
self.assertTrue(lines[3].startswith('ZeroDivisionError'))
def test_cause_and_context(self):
# When both a cause and a context are set, only the cause should be
# displayed and the context should be muted.
def inner_raise():
try:
self.zero_div()
except ZeroDivisionError as _e:
e = _e
try:
xyzzy
except NameError:
raise KeyError from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
self.check_zero_div(blocks[0])
self.assertIn('inner_raise() # Marker', blocks[2])
def test_cause_recursive(self):
def inner_raise():
try:
try:
self.zero_div()
except ZeroDivisionError as e:
z = e
raise KeyError from e
except KeyError as e:
raise z from e
def outer_raise():
inner_raise() # Marker
blocks = boundaries.split(self.get_report(outer_raise))
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[1], cause_message)
# The first block is the KeyError raised from the ZeroDivisionError
self.assertIn('raise KeyError from e', blocks[0])
self.assertNotIn('1/0', blocks[0])
# The second block (apart from the boundary) is the ZeroDivisionError
# re-raised from the KeyError
self.assertIn('inner_raise() # Marker', blocks[2])
self.check_zero_div(blocks[2])
def test_syntax_error_offset_at_eol(self):
# See #10186.
def e():
raise SyntaxError('', ('', 0, 5, 'hello'))
msg = self.get_report(e).splitlines()
self.assertEqual(msg[-2], " ^")
def e():
exec("x = 5 | 4 |")
msg = self.get_report(e).splitlines()
self.assertEqual(msg[-2], ' ^')
class PyExcReportingTests(BaseExceptionReportingTests, unittest.TestCase):
#
# This checks reporting through the 'traceback' module, with both
# format_exception() and print_exception().
#
def get_report(self, e):
e = self.get_exception(e)
s = ''.join(
traceback.format_exception(type(e), e, e.__traceback__))
with captured_output("stderr") as sio:
traceback.print_exception(type(e), e, e.__traceback__)
self.assertEqual(sio.getvalue(), s)
return s
class CExcReportingTests(BaseExceptionReportingTests, unittest.TestCase):
#
# This checks built-in reporting by the interpreter.
#
def get_report(self, e):
e = self.get_exception(e)
with captured_output("stderr") as s:
exception_print(e)
return s.getvalue()
def test_main():
run_unittest(__name__)
if __name__ == "__main__":
test_main()
| bsd-2-clause |
dliessi/frescobaldi | frescobaldi_app/viewers/documents.py | 1 | 4042 | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Code to load and manage PDF documents to view.
"""
import os
from PyQt5.QtCore import QSettings
try:
import popplerqt5
except ImportError:
popplerqt5 = None
import app
import plugin
import resultfiles
import signals
import pagedview
# This signal gets emitted when a finished Job has created new PDF document(s).
documentUpdated = signals.Signal() # Document
@app.jobFinished.connect
def _on_job_finished(document, job):
if group(document).update():
documentUpdated(document, job)
def group(document):
"""Returns a DocumentGroup instance for the given text document."""
return DocumentGroup.instance(document)
class DocumentGroup(plugin.DocumentPlugin):
"""Represents a group of PDF documents, created by the text document it belongs to.
Multiple MusicView instances can use this group, they can store the positions
of the Documents in the viewer themselves via a weak-key dictionary on the Document
instances returned by documents(). On update() these Document instances will be reused.
The global documentUpdated(Document) signal will be emitted when the global
app.jobFinished() signal causes a reload of documents in a group.
"""
def __init__(self, document):
self._documents = None
document.loaded.connect(self.update, -100)
def documents(self):
"""Returns the list of PDF Document objects created by our text document."""
# If the list is asked for the very first time, update
if self._documents is None:
self._documents = []
self.update()
return self._documents[:]
def update(self, newer=None):
"""Queries the resultfiles of this text document for PDF files and loads them.
Returns True if new documents were loaded.
If newer is True, only PDF files newer than the source document are returned.
If newer is False, all PDF files are returned.
If newer is None (default), the setting from the configuration is used.
"""
if newer is None:
newer = QSettings().value("musicview/newer_files_only", True, bool)
results = resultfiles.results(self.document())
files = results.files(".pdf", newer)
if files:
# reuse the older Document objects, they will probably be displaying
# (about) the same documents, and so the viewer will remember their position.
d = {}
if self._documents:
for doc in self._documents:
if doc.filename() in files:
d[doc.filename()] = doc
documents = []
for filename in files:
doc = d.get(filename)
if doc:
doc.invalidate()
elif popplerqt5:
doc = pagedview.loadPdf(filename)
doc.ispresent = os.path.isfile(filename)
else:
continue
doc.updated = newer or results.is_newer(filename)
documents.append(doc)
self._documents = documents
return True
| gpl-2.0 |
jar349/silverviewmc | XLS Perm Parser/PermParser/xlrd/formula.py | 77 | 94301 | # -*- coding: cp1252 -*-
##
# Module for parsing/evaluating Microsoft Excel formulas.
#
# <p>Copyright © 2005-2012 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under
# a BSD-style licence.</p>
##
# No part of the content of this file was derived from the works of David Giffin.
from __future__ import print_function
import copy
from struct import unpack
from .timemachine import *
from .biffh import unpack_unicode_update_pos, unpack_string_update_pos, \
XLRDError, hex_char_dump, error_text_from_code, BaseObject
__all__ = [
'oBOOL', 'oERR', 'oNUM', 'oREF', 'oREL', 'oSTRG', 'oUNK',
'decompile_formula',
'dump_formula',
'evaluate_name_formula',
'okind_dict',
'rangename3d', 'rangename3drel', 'cellname', 'cellnameabs', 'colname',
'FMLA_TYPE_CELL',
'FMLA_TYPE_SHARED',
'FMLA_TYPE_ARRAY',
'FMLA_TYPE_COND_FMT',
'FMLA_TYPE_DATA_VAL',
'FMLA_TYPE_NAME',
]
FMLA_TYPE_CELL = 1
FMLA_TYPE_SHARED = 2
FMLA_TYPE_ARRAY = 4
FMLA_TYPE_COND_FMT = 8
FMLA_TYPE_DATA_VAL = 16
FMLA_TYPE_NAME = 32
ALL_FMLA_TYPES = 63
FMLA_TYPEDESCR_MAP = {
1 : 'CELL',
2 : 'SHARED',
4 : 'ARRAY',
8 : 'COND-FMT',
16: 'DATA-VAL',
32: 'NAME',
}
_TOKEN_NOT_ALLOWED = {
0x01: ALL_FMLA_TYPES - FMLA_TYPE_CELL, # tExp
0x02: ALL_FMLA_TYPES - FMLA_TYPE_CELL, # tTbl
0x0F: FMLA_TYPE_SHARED + FMLA_TYPE_COND_FMT + FMLA_TYPE_DATA_VAL, # tIsect
0x10: FMLA_TYPE_SHARED + FMLA_TYPE_COND_FMT + FMLA_TYPE_DATA_VAL, # tUnion/List
0x11: FMLA_TYPE_SHARED + FMLA_TYPE_COND_FMT + FMLA_TYPE_DATA_VAL, # tRange
0x20: FMLA_TYPE_SHARED + FMLA_TYPE_COND_FMT + FMLA_TYPE_DATA_VAL, # tArray
0x23: FMLA_TYPE_SHARED, # tName
0x39: FMLA_TYPE_SHARED + FMLA_TYPE_COND_FMT + FMLA_TYPE_DATA_VAL, # tNameX
0x3A: FMLA_TYPE_SHARED + FMLA_TYPE_COND_FMT + FMLA_TYPE_DATA_VAL, # tRef3d
0x3B: FMLA_TYPE_SHARED + FMLA_TYPE_COND_FMT + FMLA_TYPE_DATA_VAL, # tArea3d
0x2C: FMLA_TYPE_CELL + FMLA_TYPE_ARRAY, # tRefN
0x2D: FMLA_TYPE_CELL + FMLA_TYPE_ARRAY, # tAreaN
# plus weird stuff like tMem*
}.get
oBOOL = 3
oERR = 4
oMSNG = 5 # tMissArg
oNUM = 2
oREF = -1
oREL = -2
oSTRG = 1
oUNK = 0
okind_dict = {
-2: "oREL",
-1: "oREF",
0 : "oUNK",
1 : "oSTRG",
2 : "oNUM",
3 : "oBOOL",
4 : "oERR",
5 : "oMSNG",
}
listsep = ',' #### probably should depend on locale
# sztabN[opcode] -> the number of bytes to consume.
# -1 means variable
# -2 means this opcode not implemented in this version.
# Which N to use? Depends on biff_version; see szdict.
sztab0 = [-2, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -2, -1, 8, 4, 2, 2, 3, 9, 8, 2, 3, 8, 4, 7, 5, 5, 5, 2, 4, 7, 4, 7, 2, 2, -2, -2, -2, -2, -2, -2, -2, -2, 3, -2, -2, -2, -2, -2, -2, -2]
sztab1 = [-2, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -2, -1, 11, 5, 2, 2, 3, 9, 9, 2, 3, 11, 4, 7, 7, 7, 7, 3, 4, 7, 4, 7, 3, 3, -2, -2, -2, -2, -2, -2, -2, -2, 3, -2, -2, -2, -2, -2, -2, -2]
sztab2 = [-2, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -2, -1, 11, 5, 2, 2, 3, 9, 9, 3, 4, 11, 4, 7, 7, 7, 7, 3, 4, 7, 4, 7, 3, 3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2]
sztab3 = [-2, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -2, -1, -2, -2, 2, 2, 3, 9, 9, 3, 4, 15, 4, 7, 7, 7, 7, 3, 4, 7, 4, 7, 3, 3, -2, -2, -2, -2, -2, -2, -2, -2, -2, 25, 18, 21, 18, 21, -2, -2]
sztab4 = [-2, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -2, -2, 2, 2, 3, 9, 9, 3, 4, 5, 5, 9, 7, 7, 7, 3, 5, 9, 5, 9, 3, 3, -2, -2, -2, -2, -2, -2, -2, -2, -2, 7, 7, 11, 7, 11, -2, -2]
szdict = {
20 : sztab0,
21 : sztab0,
30 : sztab1,
40 : sztab2,
45 : sztab2,
50 : sztab3,
70 : sztab3,
80 : sztab4,
}
# For debugging purposes ... the name for each opcode
# (without the prefix "t" used on OOo docs)
onames = ['Unk00', 'Exp', 'Tbl', 'Add', 'Sub', 'Mul', 'Div', 'Power', 'Concat', 'LT', 'LE', 'EQ', 'GE', 'GT', 'NE', 'Isect', 'List', 'Range', 'Uplus', 'Uminus', 'Percent', 'Paren', 'MissArg', 'Str', 'Extended', 'Attr', 'Sheet', 'EndSheet', 'Err', 'Bool', 'Int', 'Num', 'Array', 'Func', 'FuncVar', 'Name', 'Ref', 'Area', 'MemArea', 'MemErr', 'MemNoMem', 'MemFunc', 'RefErr', 'AreaErr', 'RefN', 'AreaN', 'MemAreaN', 'MemNoMemN', '', '', '', '', '', '', '', '', 'FuncCE', 'NameX', 'Ref3d', 'Area3d', 'RefErr3d', 'AreaErr3d', '', '']
func_defs = {
# index: (name, min#args, max#args, flags, #known_args, return_type, kargs)
0 : ('COUNT', 0, 30, 0x04, 1, 'V', 'R'),
1 : ('IF', 2, 3, 0x04, 3, 'V', 'VRR'),
2 : ('ISNA', 1, 1, 0x02, 1, 'V', 'V'),
3 : ('ISERROR', 1, 1, 0x02, 1, 'V', 'V'),
4 : ('SUM', 0, 30, 0x04, 1, 'V', 'R'),
5 : ('AVERAGE', 1, 30, 0x04, 1, 'V', 'R'),
6 : ('MIN', 1, 30, 0x04, 1, 'V', 'R'),
7 : ('MAX', 1, 30, 0x04, 1, 'V', 'R'),
8 : ('ROW', 0, 1, 0x04, 1, 'V', 'R'),
9 : ('COLUMN', 0, 1, 0x04, 1, 'V', 'R'),
10 : ('NA', 0, 0, 0x02, 0, 'V', ''),
11 : ('NPV', 2, 30, 0x04, 2, 'V', 'VR'),
12 : ('STDEV', 1, 30, 0x04, 1, 'V', 'R'),
13 : ('DOLLAR', 1, 2, 0x04, 1, 'V', 'V'),
14 : ('FIXED', 2, 3, 0x04, 3, 'V', 'VVV'),
15 : ('SIN', 1, 1, 0x02, 1, 'V', 'V'),
16 : ('COS', 1, 1, 0x02, 1, 'V', 'V'),
17 : ('TAN', 1, 1, 0x02, 1, 'V', 'V'),
18 : ('ATAN', 1, 1, 0x02, 1, 'V', 'V'),
19 : ('PI', 0, 0, 0x02, 0, 'V', ''),
20 : ('SQRT', 1, 1, 0x02, 1, 'V', 'V'),
21 : ('EXP', 1, 1, 0x02, 1, 'V', 'V'),
22 : ('LN', 1, 1, 0x02, 1, 'V', 'V'),
23 : ('LOG10', 1, 1, 0x02, 1, 'V', 'V'),
24 : ('ABS', 1, 1, 0x02, 1, 'V', 'V'),
25 : ('INT', 1, 1, 0x02, 1, 'V', 'V'),
26 : ('SIGN', 1, 1, 0x02, 1, 'V', 'V'),
27 : ('ROUND', 2, 2, 0x02, 2, 'V', 'VV'),
28 : ('LOOKUP', 2, 3, 0x04, 2, 'V', 'VR'),
29 : ('INDEX', 2, 4, 0x0c, 4, 'R', 'RVVV'),
30 : ('REPT', 2, 2, 0x02, 2, 'V', 'VV'),
31 : ('MID', 3, 3, 0x02, 3, 'V', 'VVV'),
32 : ('LEN', 1, 1, 0x02, 1, 'V', 'V'),
33 : ('VALUE', 1, 1, 0x02, 1, 'V', 'V'),
34 : ('TRUE', 0, 0, 0x02, 0, 'V', ''),
35 : ('FALSE', 0, 0, 0x02, 0, 'V', ''),
36 : ('AND', 1, 30, 0x04, 1, 'V', 'R'),
37 : ('OR', 1, 30, 0x04, 1, 'V', 'R'),
38 : ('NOT', 1, 1, 0x02, 1, 'V', 'V'),
39 : ('MOD', 2, 2, 0x02, 2, 'V', 'VV'),
40 : ('DCOUNT', 3, 3, 0x02, 3, 'V', 'RRR'),
41 : ('DSUM', 3, 3, 0x02, 3, 'V', 'RRR'),
42 : ('DAVERAGE', 3, 3, 0x02, 3, 'V', 'RRR'),
43 : ('DMIN', 3, 3, 0x02, 3, 'V', 'RRR'),
44 : ('DMAX', 3, 3, 0x02, 3, 'V', 'RRR'),
45 : ('DSTDEV', 3, 3, 0x02, 3, 'V', 'RRR'),
46 : ('VAR', 1, 30, 0x04, 1, 'V', 'R'),
47 : ('DVAR', 3, 3, 0x02, 3, 'V', 'RRR'),
48 : ('TEXT', 2, 2, 0x02, 2, 'V', 'VV'),
49 : ('LINEST', 1, 4, 0x04, 4, 'A', 'RRVV'),
50 : ('TREND', 1, 4, 0x04, 4, 'A', 'RRRV'),
51 : ('LOGEST', 1, 4, 0x04, 4, 'A', 'RRVV'),
52 : ('GROWTH', 1, 4, 0x04, 4, 'A', 'RRRV'),
56 : ('PV', 3, 5, 0x04, 5, 'V', 'VVVVV'),
57 : ('FV', 3, 5, 0x04, 5, 'V', 'VVVVV'),
58 : ('NPER', 3, 5, 0x04, 5, 'V', 'VVVVV'),
59 : ('PMT', 3, 5, 0x04, 5, 'V', 'VVVVV'),
60 : ('RATE', 3, 6, 0x04, 6, 'V', 'VVVVVV'),
61 : ('MIRR', 3, 3, 0x02, 3, 'V', 'RVV'),
62 : ('IRR', 1, 2, 0x04, 2, 'V', 'RV'),
63 : ('RAND', 0, 0, 0x0a, 0, 'V', ''),
64 : ('MATCH', 2, 3, 0x04, 3, 'V', 'VRR'),
65 : ('DATE', 3, 3, 0x02, 3, 'V', 'VVV'),
66 : ('TIME', 3, 3, 0x02, 3, 'V', 'VVV'),
67 : ('DAY', 1, 1, 0x02, 1, 'V', 'V'),
68 : ('MONTH', 1, 1, 0x02, 1, 'V', 'V'),
69 : ('YEAR', 1, 1, 0x02, 1, 'V', 'V'),
70 : ('WEEKDAY', 1, 2, 0x04, 2, 'V', 'VV'),
71 : ('HOUR', 1, 1, 0x02, 1, 'V', 'V'),
72 : ('MINUTE', 1, 1, 0x02, 1, 'V', 'V'),
73 : ('SECOND', 1, 1, 0x02, 1, 'V', 'V'),
74 : ('NOW', 0, 0, 0x0a, 0, 'V', ''),
75 : ('AREAS', 1, 1, 0x02, 1, 'V', 'R'),
76 : ('ROWS', 1, 1, 0x02, 1, 'V', 'R'),
77 : ('COLUMNS', 1, 1, 0x02, 1, 'V', 'R'),
78 : ('OFFSET', 3, 5, 0x04, 5, 'R', 'RVVVV'),
82 : ('SEARCH', 2, 3, 0x04, 3, 'V', 'VVV'),
83 : ('TRANSPOSE', 1, 1, 0x02, 1, 'A', 'A'),
86 : ('TYPE', 1, 1, 0x02, 1, 'V', 'V'),
92 : ('SERIESSUM', 4, 4, 0x02, 4, 'V', 'VVVA'),
97 : ('ATAN2', 2, 2, 0x02, 2, 'V', 'VV'),
98 : ('ASIN', 1, 1, 0x02, 1, 'V', 'V'),
99 : ('ACOS', 1, 1, 0x02, 1, 'V', 'V'),
100: ('CHOOSE', 2, 30, 0x04, 2, 'V', 'VR'),
101: ('HLOOKUP', 3, 4, 0x04, 4, 'V', 'VRRV'),
102: ('VLOOKUP', 3, 4, 0x04, 4, 'V', 'VRRV'),
105: ('ISREF', 1, 1, 0x02, 1, 'V', 'R'),
109: ('LOG', 1, 2, 0x04, 2, 'V', 'VV'),
111: ('CHAR', 1, 1, 0x02, 1, 'V', 'V'),
112: ('LOWER', 1, 1, 0x02, 1, 'V', 'V'),
113: ('UPPER', 1, 1, 0x02, 1, 'V', 'V'),
114: ('PROPER', 1, 1, 0x02, 1, 'V', 'V'),
115: ('LEFT', 1, 2, 0x04, 2, 'V', 'VV'),
116: ('RIGHT', 1, 2, 0x04, 2, 'V', 'VV'),
117: ('EXACT', 2, 2, 0x02, 2, 'V', 'VV'),
118: ('TRIM', 1, 1, 0x02, 1, 'V', 'V'),
119: ('REPLACE', 4, 4, 0x02, 4, 'V', 'VVVV'),
120: ('SUBSTITUTE', 3, 4, 0x04, 4, 'V', 'VVVV'),
121: ('CODE', 1, 1, 0x02, 1, 'V', 'V'),
124: ('FIND', 2, 3, 0x04, 3, 'V', 'VVV'),
125: ('CELL', 1, 2, 0x0c, 2, 'V', 'VR'),
126: ('ISERR', 1, 1, 0x02, 1, 'V', 'V'),
127: ('ISTEXT', 1, 1, 0x02, 1, 'V', 'V'),
128: ('ISNUMBER', 1, 1, 0x02, 1, 'V', 'V'),
129: ('ISBLANK', 1, 1, 0x02, 1, 'V', 'V'),
130: ('T', 1, 1, 0x02, 1, 'V', 'R'),
131: ('N', 1, 1, 0x02, 1, 'V', 'R'),
140: ('DATEVALUE', 1, 1, 0x02, 1, 'V', 'V'),
141: ('TIMEVALUE', 1, 1, 0x02, 1, 'V', 'V'),
142: ('SLN', 3, 3, 0x02, 3, 'V', 'VVV'),
143: ('SYD', 4, 4, 0x02, 4, 'V', 'VVVV'),
144: ('DDB', 4, 5, 0x04, 5, 'V', 'VVVVV'),
148: ('INDIRECT', 1, 2, 0x0c, 2, 'R', 'VV'),
162: ('CLEAN', 1, 1, 0x02, 1, 'V', 'V'),
163: ('MDETERM', 1, 1, 0x02, 1, 'V', 'A'),
164: ('MINVERSE', 1, 1, 0x02, 1, 'A', 'A'),
165: ('MMULT', 2, 2, 0x02, 2, 'A', 'AA'),
167: ('IPMT', 4, 6, 0x04, 6, 'V', 'VVVVVV'),
168: ('PPMT', 4, 6, 0x04, 6, 'V', 'VVVVVV'),
169: ('COUNTA', 0, 30, 0x04, 1, 'V', 'R'),
183: ('PRODUCT', 0, 30, 0x04, 1, 'V', 'R'),
184: ('FACT', 1, 1, 0x02, 1, 'V', 'V'),
189: ('DPRODUCT', 3, 3, 0x02, 3, 'V', 'RRR'),
190: ('ISNONTEXT', 1, 1, 0x02, 1, 'V', 'V'),
193: ('STDEVP', 1, 30, 0x04, 1, 'V', 'R'),
194: ('VARP', 1, 30, 0x04, 1, 'V', 'R'),
195: ('DSTDEVP', 3, 3, 0x02, 3, 'V', 'RRR'),
196: ('DVARP', 3, 3, 0x02, 3, 'V', 'RRR'),
197: ('TRUNC', 1, 2, 0x04, 2, 'V', 'VV'),
198: ('ISLOGICAL', 1, 1, 0x02, 1, 'V', 'V'),
199: ('DCOUNTA', 3, 3, 0x02, 3, 'V', 'RRR'),
204: ('USDOLLAR', 1, 2, 0x04, 2, 'V', 'VV'),
205: ('FINDB', 2, 3, 0x04, 3, 'V', 'VVV'),
206: ('SEARCHB', 2, 3, 0x04, 3, 'V', 'VVV'),
207: ('REPLACEB', 4, 4, 0x02, 4, 'V', 'VVVV'),
208: ('LEFTB', 1, 2, 0x04, 2, 'V', 'VV'),
209: ('RIGHTB', 1, 2, 0x04, 2, 'V', 'VV'),
210: ('MIDB', 3, 3, 0x02, 3, 'V', 'VVV'),
211: ('LENB', 1, 1, 0x02, 1, 'V', 'V'),
212: ('ROUNDUP', 2, 2, 0x02, 2, 'V', 'VV'),
213: ('ROUNDDOWN', 2, 2, 0x02, 2, 'V', 'VV'),
214: ('ASC', 1, 1, 0x02, 1, 'V', 'V'),
215: ('DBCS', 1, 1, 0x02, 1, 'V', 'V'),
216: ('RANK', 2, 3, 0x04, 3, 'V', 'VRV'),
219: ('ADDRESS', 2, 5, 0x04, 5, 'V', 'VVVVV'),
220: ('DAYS360', 2, 3, 0x04, 3, 'V', 'VVV'),
221: ('TODAY', 0, 0, 0x0a, 0, 'V', ''),
222: ('VDB', 5, 7, 0x04, 7, 'V', 'VVVVVVV'),
227: ('MEDIAN', 1, 30, 0x04, 1, 'V', 'R'),
228: ('SUMPRODUCT', 1, 30, 0x04, 1, 'V', 'A'),
229: ('SINH', 1, 1, 0x02, 1, 'V', 'V'),
230: ('COSH', 1, 1, 0x02, 1, 'V', 'V'),
231: ('TANH', 1, 1, 0x02, 1, 'V', 'V'),
232: ('ASINH', 1, 1, 0x02, 1, 'V', 'V'),
233: ('ACOSH', 1, 1, 0x02, 1, 'V', 'V'),
234: ('ATANH', 1, 1, 0x02, 1, 'V', 'V'),
235: ('DGET', 3, 3, 0x02, 3, 'V', 'RRR'),
244: ('INFO', 1, 1, 0x02, 1, 'V', 'V'),
247: ('DB', 4, 5, 0x04, 5, 'V', 'VVVVV'),
252: ('FREQUENCY', 2, 2, 0x02, 2, 'A', 'RR'),
261: ('ERROR.TYPE', 1, 1, 0x02, 1, 'V', 'V'),
269: ('AVEDEV', 1, 30, 0x04, 1, 'V', 'R'),
270: ('BETADIST', 3, 5, 0x04, 1, 'V', 'V'),
271: ('GAMMALN', 1, 1, 0x02, 1, 'V', 'V'),
272: ('BETAINV', 3, 5, 0x04, 1, 'V', 'V'),
273: ('BINOMDIST', 4, 4, 0x02, 4, 'V', 'VVVV'),
274: ('CHIDIST', 2, 2, 0x02, 2, 'V', 'VV'),
275: ('CHIINV', 2, 2, 0x02, 2, 'V', 'VV'),
276: ('COMBIN', 2, 2, 0x02, 2, 'V', 'VV'),
277: ('CONFIDENCE', 3, 3, 0x02, 3, 'V', 'VVV'),
278: ('CRITBINOM', 3, 3, 0x02, 3, 'V', 'VVV'),
279: ('EVEN', 1, 1, 0x02, 1, 'V', 'V'),
280: ('EXPONDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
281: ('FDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
282: ('FINV', 3, 3, 0x02, 3, 'V', 'VVV'),
283: ('FISHER', 1, 1, 0x02, 1, 'V', 'V'),
284: ('FISHERINV', 1, 1, 0x02, 1, 'V', 'V'),
285: ('FLOOR', 2, 2, 0x02, 2, 'V', 'VV'),
286: ('GAMMADIST', 4, 4, 0x02, 4, 'V', 'VVVV'),
287: ('GAMMAINV', 3, 3, 0x02, 3, 'V', 'VVV'),
288: ('CEILING', 2, 2, 0x02, 2, 'V', 'VV'),
289: ('HYPGEOMDIST', 4, 4, 0x02, 4, 'V', 'VVVV'),
290: ('LOGNORMDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
291: ('LOGINV', 3, 3, 0x02, 3, 'V', 'VVV'),
292: ('NEGBINOMDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
293: ('NORMDIST', 4, 4, 0x02, 4, 'V', 'VVVV'),
294: ('NORMSDIST', 1, 1, 0x02, 1, 'V', 'V'),
295: ('NORMINV', 3, 3, 0x02, 3, 'V', 'VVV'),
296: ('NORMSINV', 1, 1, 0x02, 1, 'V', 'V'),
297: ('STANDARDIZE', 3, 3, 0x02, 3, 'V', 'VVV'),
298: ('ODD', 1, 1, 0x02, 1, 'V', 'V'),
299: ('PERMUT', 2, 2, 0x02, 2, 'V', 'VV'),
300: ('POISSON', 3, 3, 0x02, 3, 'V', 'VVV'),
301: ('TDIST', 3, 3, 0x02, 3, 'V', 'VVV'),
302: ('WEIBULL', 4, 4, 0x02, 4, 'V', 'VVVV'),
303: ('SUMXMY2', 2, 2, 0x02, 2, 'V', 'AA'),
304: ('SUMX2MY2', 2, 2, 0x02, 2, 'V', 'AA'),
305: ('SUMX2PY2', 2, 2, 0x02, 2, 'V', 'AA'),
306: ('CHITEST', 2, 2, 0x02, 2, 'V', 'AA'),
307: ('CORREL', 2, 2, 0x02, 2, 'V', 'AA'),
308: ('COVAR', 2, 2, 0x02, 2, 'V', 'AA'),
309: ('FORECAST', 3, 3, 0x02, 3, 'V', 'VAA'),
310: ('FTEST', 2, 2, 0x02, 2, 'V', 'AA'),
311: ('INTERCEPT', 2, 2, 0x02, 2, 'V', 'AA'),
312: ('PEARSON', 2, 2, 0x02, 2, 'V', 'AA'),
313: ('RSQ', 2, 2, 0x02, 2, 'V', 'AA'),
314: ('STEYX', 2, 2, 0x02, 2, 'V', 'AA'),
315: ('SLOPE', 2, 2, 0x02, 2, 'V', 'AA'),
316: ('TTEST', 4, 4, 0x02, 4, 'V', 'AAVV'),
317: ('PROB', 3, 4, 0x04, 3, 'V', 'AAV'),
318: ('DEVSQ', 1, 30, 0x04, 1, 'V', 'R'),
319: ('GEOMEAN', 1, 30, 0x04, 1, 'V', 'R'),
320: ('HARMEAN', 1, 30, 0x04, 1, 'V', 'R'),
321: ('SUMSQ', 0, 30, 0x04, 1, 'V', 'R'),
322: ('KURT', 1, 30, 0x04, 1, 'V', 'R'),
323: ('SKEW', 1, 30, 0x04, 1, 'V', 'R'),
324: ('ZTEST', 2, 3, 0x04, 2, 'V', 'RV'),
325: ('LARGE', 2, 2, 0x02, 2, 'V', 'RV'),
326: ('SMALL', 2, 2, 0x02, 2, 'V', 'RV'),
327: ('QUARTILE', 2, 2, 0x02, 2, 'V', 'RV'),
328: ('PERCENTILE', 2, 2, 0x02, 2, 'V', 'RV'),
329: ('PERCENTRANK', 2, 3, 0x04, 2, 'V', 'RV'),
330: ('MODE', 1, 30, 0x04, 1, 'V', 'A'),
331: ('TRIMMEAN', 2, 2, 0x02, 2, 'V', 'RV'),
332: ('TINV', 2, 2, 0x02, 2, 'V', 'VV'),
336: ('CONCATENATE', 0, 30, 0x04, 1, 'V', 'V'),
337: ('POWER', 2, 2, 0x02, 2, 'V', 'VV'),
342: ('RADIANS', 1, 1, 0x02, 1, 'V', 'V'),
343: ('DEGREES', 1, 1, 0x02, 1, 'V', 'V'),
344: ('SUBTOTAL', 2, 30, 0x04, 2, 'V', 'VR'),
345: ('SUMIF', 2, 3, 0x04, 3, 'V', 'RVR'),
346: ('COUNTIF', 2, 2, 0x02, 2, 'V', 'RV'),
347: ('COUNTBLANK', 1, 1, 0x02, 1, 'V', 'R'),
350: ('ISPMT', 4, 4, 0x02, 4, 'V', 'VVVV'),
351: ('DATEDIF', 3, 3, 0x02, 3, 'V', 'VVV'),
352: ('DATESTRING', 1, 1, 0x02, 1, 'V', 'V'),
353: ('NUMBERSTRING', 2, 2, 0x02, 2, 'V', 'VV'),
354: ('ROMAN', 1, 2, 0x04, 2, 'V', 'VV'),
358: ('GETPIVOTDATA', 2, 2, 0x02, 2, 'V', 'RV'),
359: ('HYPERLINK', 1, 2, 0x04, 2, 'V', 'VV'),
360: ('PHONETIC', 1, 1, 0x02, 1, 'V', 'V'),
361: ('AVERAGEA', 1, 30, 0x04, 1, 'V', 'R'),
362: ('MAXA', 1, 30, 0x04, 1, 'V', 'R'),
363: ('MINA', 1, 30, 0x04, 1, 'V', 'R'),
364: ('STDEVPA', 1, 30, 0x04, 1, 'V', 'R'),
365: ('VARPA', 1, 30, 0x04, 1, 'V', 'R'),
366: ('STDEVA', 1, 30, 0x04, 1, 'V', 'R'),
367: ('VARA', 1, 30, 0x04, 1, 'V', 'R'),
368: ('BAHTTEXT', 1, 1, 0x02, 1, 'V', 'V'),
369: ('THAIDAYOFWEEK', 1, 1, 0x02, 1, 'V', 'V'),
370: ('THAIDIGIT', 1, 1, 0x02, 1, 'V', 'V'),
371: ('THAIMONTHOFYEAR', 1, 1, 0x02, 1, 'V', 'V'),
372: ('THAINUMSOUND', 1, 1, 0x02, 1, 'V', 'V'),
373: ('THAINUMSTRING', 1, 1, 0x02, 1, 'V', 'V'),
374: ('THAISTRINGLENGTH', 1, 1, 0x02, 1, 'V', 'V'),
375: ('ISTHAIDIGIT', 1, 1, 0x02, 1, 'V', 'V'),
376: ('ROUNDBAHTDOWN', 1, 1, 0x02, 1, 'V', 'V'),
377: ('ROUNDBAHTUP', 1, 1, 0x02, 1, 'V', 'V'),
378: ('THAIYEAR', 1, 1, 0x02, 1, 'V', 'V'),
379: ('RTD', 2, 5, 0x04, 1, 'V', 'V'),
}
tAttrNames = {
0x00: "Skip??", # seen in SAMPLES.XLS which shipped with Excel 5.0
0x01: "Volatile",
0x02: "If",
0x04: "Choose",
0x08: "Skip",
0x10: "Sum",
0x20: "Assign",
0x40: "Space",
0x41: "SpaceVolatile",
}
error_opcodes = set([0x07, 0x08, 0x0A, 0x0B, 0x1C, 0x1D, 0x2F])
tRangeFuncs = (min, max, min, max, min, max)
tIsectFuncs = (max, min, max, min, max, min)
def do_box_funcs(box_funcs, boxa, boxb):
return tuple([
func(numa, numb)
for func, numa, numb in zip(box_funcs, boxa.coords, boxb.coords)
])
def adjust_cell_addr_biff8(rowval, colval, reldelta, browx=None, bcolx=None):
row_rel = (colval >> 15) & 1
col_rel = (colval >> 14) & 1
rowx = rowval
colx = colval & 0xff
if reldelta:
if row_rel and rowx >= 32768:
rowx -= 65536
if col_rel and colx >= 128:
colx -= 256
else:
if row_rel:
rowx -= browx
if col_rel:
colx -= bcolx
return rowx, colx, row_rel, col_rel
def adjust_cell_addr_biff_le7(
rowval, colval, reldelta, browx=None, bcolx=None):
row_rel = (rowval >> 15) & 1
col_rel = (rowval >> 14) & 1
rowx = rowval & 0x3fff
colx = colval
if reldelta:
if row_rel and rowx >= 8192:
rowx -= 16384
if col_rel and colx >= 128:
colx -= 256
else:
if row_rel:
rowx -= browx
if col_rel:
colx -= bcolx
return rowx, colx, row_rel, col_rel
def get_cell_addr(data, pos, bv, reldelta, browx=None, bcolx=None):
if bv >= 80:
rowval, colval = unpack("<HH", data[pos:pos+4])
# print " rv=%04xh cv=%04xh" % (rowval, colval)
return adjust_cell_addr_biff8(rowval, colval, reldelta, browx, bcolx)
else:
rowval, colval = unpack("<HB", data[pos:pos+3])
# print " rv=%04xh cv=%04xh" % (rowval, colval)
return adjust_cell_addr_biff_le7(
rowval, colval, reldelta, browx, bcolx)
def get_cell_range_addr(data, pos, bv, reldelta, browx=None, bcolx=None):
if bv >= 80:
row1val, row2val, col1val, col2val = unpack("<HHHH", data[pos:pos+8])
# print " rv=%04xh cv=%04xh" % (row1val, col1val)
# print " rv=%04xh cv=%04xh" % (row2val, col2val)
res1 = adjust_cell_addr_biff8(row1val, col1val, reldelta, browx, bcolx)
res2 = adjust_cell_addr_biff8(row2val, col2val, reldelta, browx, bcolx)
return res1, res2
else:
row1val, row2val, col1val, col2val = unpack("<HHBB", data[pos:pos+6])
# print " rv=%04xh cv=%04xh" % (row1val, col1val)
# print " rv=%04xh cv=%04xh" % (row2val, col2val)
res1 = adjust_cell_addr_biff_le7(
row1val, col1val, reldelta, browx, bcolx)
res2 = adjust_cell_addr_biff_le7(
row2val, col2val, reldelta, browx, bcolx)
return res1, res2
def get_externsheet_local_range(bk, refx, blah=0):
try:
info = bk._externsheet_info[refx]
except IndexError:
print("!!! get_externsheet_local_range: refx=%d, not in range(%d)" \
% (refx, len(bk._externsheet_info)), file=bk.logfile)
return (-101, -101)
ref_recordx, ref_first_sheetx, ref_last_sheetx = info
if ref_recordx == bk._supbook_addins_inx:
if blah:
print("/// get_externsheet_local_range(refx=%d) -> addins %r" % (refx, info), file=bk.logfile)
assert ref_first_sheetx == 0xFFFE == ref_last_sheetx
return (-5, -5)
if ref_recordx != bk._supbook_locals_inx:
if blah:
print("/// get_externsheet_local_range(refx=%d) -> external %r" % (refx, info), file=bk.logfile)
return (-4, -4) # external reference
if ref_first_sheetx == 0xFFFE == ref_last_sheetx:
if blah:
print("/// get_externsheet_local_range(refx=%d) -> unspecified sheet %r" % (refx, info), file=bk.logfile)
return (-1, -1) # internal reference, any sheet
if ref_first_sheetx == 0xFFFF == ref_last_sheetx:
if blah:
print("/// get_externsheet_local_range(refx=%d) -> deleted sheet(s)" % (refx, ), file=bk.logfile)
return (-2, -2) # internal reference, deleted sheet(s)
nsheets = len(bk._all_sheets_map)
if not(0 <= ref_first_sheetx <= ref_last_sheetx < nsheets):
if blah:
print("/// get_externsheet_local_range(refx=%d) -> %r" % (refx, info), file=bk.logfile)
print("--- first/last sheet not in range(%d)" % nsheets, file=bk.logfile)
return (-102, -102) # stuffed up somewhere :-(
xlrd_sheetx1 = bk._all_sheets_map[ref_first_sheetx]
xlrd_sheetx2 = bk._all_sheets_map[ref_last_sheetx]
if not(0 <= xlrd_sheetx1 <= xlrd_sheetx2):
return (-3, -3) # internal reference, but to a macro sheet
return xlrd_sheetx1, xlrd_sheetx2
def get_externsheet_local_range_b57(
bk, raw_extshtx, ref_first_sheetx, ref_last_sheetx, blah=0):
if raw_extshtx > 0:
if blah:
print("/// get_externsheet_local_range_b57(raw_extshtx=%d) -> external" % raw_extshtx, file=bk.logfile)
return (-4, -4) # external reference
if ref_first_sheetx == -1 and ref_last_sheetx == -1:
return (-2, -2) # internal reference, deleted sheet(s)
nsheets = len(bk._all_sheets_map)
if not(0 <= ref_first_sheetx <= ref_last_sheetx < nsheets):
if blah:
print("/// get_externsheet_local_range_b57(%d, %d, %d) -> ???" \
% (raw_extshtx, ref_first_sheetx, ref_last_sheetx), file=bk.logfile)
print("--- first/last sheet not in range(%d)" % nsheets, file=bk.logfile)
return (-103, -103) # stuffed up somewhere :-(
xlrd_sheetx1 = bk._all_sheets_map[ref_first_sheetx]
xlrd_sheetx2 = bk._all_sheets_map[ref_last_sheetx]
if not(0 <= xlrd_sheetx1 <= xlrd_sheetx2):
return (-3, -3) # internal reference, but to a macro sheet
return xlrd_sheetx1, xlrd_sheetx2
class FormulaError(Exception):
pass
##
# Used in evaluating formulas.
# The following table describes the kinds and how their values
# are represented.</p>
#
# <table border="1" cellpadding="7">
# <tr>
# <th>Kind symbol</th>
# <th>Kind number</th>
# <th>Value representation</th>
# </tr>
# <tr>
# <td>oBOOL</td>
# <td align="center">3</td>
# <td>integer: 0 => False; 1 => True</td>
# </tr>
# <tr>
# <td>oERR</td>
# <td align="center">4</td>
# <td>None, or an int error code (same as XL_CELL_ERROR in the Cell class).
# </td>
# </tr>
# <tr>
# <td>oMSNG</td>
# <td align="center">5</td>
# <td>Used by Excel as a placeholder for a missing (not supplied) function
# argument. Should *not* appear as a final formula result. Value is None.</td>
# </tr>
# <tr>
# <td>oNUM</td>
# <td align="center">2</td>
# <td>A float. Note that there is no way of distinguishing dates.</td>
# </tr>
# <tr>
# <td>oREF</td>
# <td align="center">-1</td>
# <td>The value is either None or a non-empty list of
# absolute Ref3D instances.<br>
# </td>
# </tr>
# <tr>
# <td>oREL</td>
# <td align="center">-2</td>
# <td>The value is None or a non-empty list of
# fully or partially relative Ref3D instances.
# </td>
# </tr>
# <tr>
# <td>oSTRG</td>
# <td align="center">1</td>
# <td>A Unicode string.</td>
# </tr>
# <tr>
# <td>oUNK</td>
# <td align="center">0</td>
# <td>The kind is unknown or ambiguous. The value is None</td>
# </tr>
# </table>
#<p></p>
class Operand(object):
##
# None means that the actual value of the operand is a variable
# (depends on cell data), not a constant.
value = None
##
# oUNK means that the kind of operand is not known unambiguously.
kind = oUNK
##
# The reconstituted text of the original formula. Function names will be
# in English irrespective of the original language, which doesn't seem
# to be recorded anywhere. The separator is ",", not ";" or whatever else
# might be more appropriate for the end-user's locale; patches welcome.
text = '?'
def __init__(self, akind=None, avalue=None, arank=0, atext='?'):
if akind is not None:
self.kind = akind
if avalue is not None:
self.value = avalue
self.rank = arank
# rank is an internal gizmo (operator precedence);
# it's used in reconstructing formula text.
self.text = atext
def __repr__(self):
kind_text = okind_dict.get(self.kind, "?Unknown kind?")
return "Operand(kind=%s, value=%r, text=%r)" \
% (kind_text, self.value, self.text)
##
# <p>Represents an absolute or relative 3-dimensional reference to a box
# of one or more cells.<br />
# -- New in version 0.6.0
# </p>
#
# <p>The <i>coords</i> attribute is a tuple of the form:<br />
# (shtxlo, shtxhi, rowxlo, rowxhi, colxlo, colxhi)<br />
# where 0 <= thingxlo <= thingx < thingxhi.<br />
# Note that it is quite possible to have thingx > nthings; for example
# Print_Titles could have colxhi == 256 and/or rowxhi == 65536
# irrespective of how many columns/rows are actually used in the worksheet.
# The caller will need to decide how to handle this situation.
# Keyword: IndexError :-)
# </p>
#
# <p>The components of the coords attribute are also available as individual
# attributes: shtxlo, shtxhi, rowxlo, rowxhi, colxlo, and colxhi.</p>
#
# <p>The <i>relflags</i> attribute is a 6-tuple of flags which indicate whether
# the corresponding (sheet|row|col)(lo|hi) is relative (1) or absolute (0).<br>
# Note that there is necessarily no information available as to what cell(s)
# the reference could possibly be relative to. The caller must decide what if
# any use to make of oREL operands. Note also that a partially relative
# reference may well be a typo.
# For example, define name A1Z10 as $a$1:$z10 (missing $ after z)
# while the cursor is on cell Sheet3!A27.<br>
# The resulting Ref3D instance will have coords = (2, 3, 0, -16, 0, 26)
# and relflags = (0, 0, 0, 1, 0, 0).<br>
# So far, only one possibility of a sheet-relative component in
# a reference has been noticed: a 2D reference located in the "current sheet".
# <br /> This will appear as coords = (0, 1, ...) and relflags = (1, 1, ...).
class Ref3D(tuple):
def __init__(self, atuple):
self.coords = atuple[0:6]
self.relflags = atuple[6:12]
if not self.relflags:
self.relflags = (0, 0, 0, 0, 0, 0)
(self.shtxlo, self.shtxhi,
self.rowxlo, self.rowxhi,
self.colxlo, self.colxhi) = self.coords
def __repr__(self):
if not self.relflags or self.relflags == (0, 0, 0, 0, 0, 0):
return "Ref3D(coords=%r)" % (self.coords, )
else:
return "Ref3D(coords=%r, relflags=%r)" \
% (self.coords, self.relflags)
tAdd = 0x03
tSub = 0x04
tMul = 0x05
tDiv = 0x06
tPower = 0x07
tConcat = 0x08
tLT, tLE, tEQ, tGE, tGT, tNE = range(0x09, 0x0F)
import operator as opr
def nop(x):
return x
def _opr_pow(x, y): return x ** y
def _opr_lt(x, y): return x < y
def _opr_le(x, y): return x <= y
def _opr_eq(x, y): return x == y
def _opr_ge(x, y): return x >= y
def _opr_gt(x, y): return x > y
def _opr_ne(x, y): return x != y
def num2strg(num):
"""Attempt to emulate Excel's default conversion
from number to string.
"""
s = str(num)
if s.endswith(".0"):
s = s[:-2]
return s
_arith_argdict = {oNUM: nop, oSTRG: float}
_cmp_argdict = {oNUM: nop, oSTRG: nop}
# Seems no conversions done on relops; in Excel, "1" > 9 produces TRUE.
_strg_argdict = {oNUM:num2strg, oSTRG:nop}
binop_rules = {
tAdd: (_arith_argdict, oNUM, opr.add, 30, '+'),
tSub: (_arith_argdict, oNUM, opr.sub, 30, '-'),
tMul: (_arith_argdict, oNUM, opr.mul, 40, '*'),
tDiv: (_arith_argdict, oNUM, opr.truediv, 40, '/'),
tPower: (_arith_argdict, oNUM, _opr_pow, 50, '^',),
tConcat:(_strg_argdict, oSTRG, opr.add, 20, '&'),
tLT: (_cmp_argdict, oBOOL, _opr_lt, 10, '<'),
tLE: (_cmp_argdict, oBOOL, _opr_le, 10, '<='),
tEQ: (_cmp_argdict, oBOOL, _opr_eq, 10, '='),
tGE: (_cmp_argdict, oBOOL, _opr_ge, 10, '>='),
tGT: (_cmp_argdict, oBOOL, _opr_gt, 10, '>'),
tNE: (_cmp_argdict, oBOOL, _opr_ne, 10, '<>'),
}
unop_rules = {
0x13: (lambda x: -x, 70, '-', ''), # unary minus
0x12: (lambda x: x, 70, '+', ''), # unary plus
0x14: (lambda x: x / 100.0, 60, '', '%'),# percent
}
LEAF_RANK = 90
FUNC_RANK = 90
STACK_ALARM_LEVEL = 5
STACK_PANIC_LEVEL = 10
def evaluate_name_formula(bk, nobj, namex, blah=0, level=0):
if level > STACK_ALARM_LEVEL:
blah = 1
data = nobj.raw_formula
fmlalen = nobj.basic_formula_len
bv = bk.biff_version
reldelta = 1 # All defined name formulas use "Method B" [OOo docs]
if blah:
print("::: evaluate_name_formula %r %r %d %d %r level=%d" \
% (namex, nobj.name, fmlalen, bv, data, level), file=bk.logfile)
hex_char_dump(data, 0, fmlalen, fout=bk.logfile)
if level > STACK_PANIC_LEVEL:
raise XLRDError("Excessive indirect references in NAME formula")
sztab = szdict[bv]
pos = 0
stack = []
any_rel = 0
any_err = 0
any_external = 0
unk_opnd = Operand(oUNK, None)
error_opnd = Operand(oERR, None)
spush = stack.append
def do_binop(opcd, stk):
assert len(stk) >= 2
bop = stk.pop()
aop = stk.pop()
argdict, result_kind, func, rank, sym = binop_rules[opcd]
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
resop = Operand(result_kind, None, rank, otext)
try:
bconv = argdict[bop.kind]
aconv = argdict[aop.kind]
except KeyError:
stk.append(resop)
return
if bop.value is None or aop.value is None:
stk.append(resop)
return
bval = bconv(bop.value)
aval = aconv(aop.value)
result = func(aval, bval)
if result_kind == oBOOL:
result = 1 if result else 0
resop.value = result
stk.append(resop)
def do_unaryop(opcode, result_kind, stk):
assert len(stk) >= 1
aop = stk.pop()
val = aop.value
func, rank, sym1, sym2 = unop_rules[opcode]
otext = ''.join([
sym1,
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym2,
])
if val is not None:
val = func(val)
stk.append(Operand(result_kind, val, rank, otext))
def not_in_name_formula(op_arg, oname_arg):
msg = "ERROR *** Token 0x%02x (%s) found in NAME formula" \
% (op_arg, oname_arg)
raise FormulaError(msg)
if fmlalen == 0:
stack = [unk_opnd]
while 0 <= pos < fmlalen:
op = BYTES_ORD(data[pos])
opcode = op & 0x1f
optype = (op & 0x60) >> 5
if optype:
opx = opcode + 32
else:
opx = opcode
oname = onames[opx] # + [" RVA"][optype]
sz = sztab[opx]
if blah:
print("Pos:%d Op:0x%02x Name:t%s Sz:%d opcode:%02xh optype:%02xh" \
% (pos, op, oname, sz, opcode, optype), file=bk.logfile)
print("Stack =", stack, file=bk.logfile)
if sz == -2:
msg = 'ERROR *** Unexpected token 0x%02x ("%s"); biff_version=%d' \
% (op, oname, bv)
raise FormulaError(msg)
if not optype:
if 0x00 <= opcode <= 0x02: # unk_opnd, tExp, tTbl
not_in_name_formula(op, oname)
elif 0x03 <= opcode <= 0x0E:
# Add, Sub, Mul, Div, Power
# tConcat
# tLT, ..., tNE
do_binop(opcode, stack)
elif opcode == 0x0F: # tIsect
if blah: print("tIsect pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ' '
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF)
res.text = otext
if bop.kind == oERR or aop.kind == oERR:
res.kind = oERR
elif bop.kind == oUNK or aop.kind == oUNK:
# This can happen with undefined
# (go search in the current sheet) labels.
# For example =Bob Sales
# Each label gets a NAME record with an empty formula (!)
# Evaluation of the tName token classifies it as oUNK
# res.kind = oREF
pass
elif bop.kind == oREF == aop.kind:
if aop.value is not None and bop.value is not None:
assert len(aop.value) == 1
assert len(bop.value) == 1
coords = do_box_funcs(
tIsectFuncs, aop.value[0], bop.value[0])
res.value = [Ref3D(coords)]
elif bop.kind == oREL == aop.kind:
res.kind = oREL
if aop.value is not None and bop.value is not None:
assert len(aop.value) == 1
assert len(bop.value) == 1
coords = do_box_funcs(
tIsectFuncs, aop.value[0], bop.value[0])
relfa = aop.value[0].relflags
relfb = bop.value[0].relflags
if relfa == relfb:
res.value = [Ref3D(coords + relfa)]
else:
pass
spush(res)
if blah: print("tIsect post", stack, file=bk.logfile)
elif opcode == 0x10: # tList
if blah: print("tList pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ','
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF, None, rank, otext)
if bop.kind == oERR or aop.kind == oERR:
res.kind = oERR
elif bop.kind in (oREF, oREL) and aop.kind in (oREF, oREL):
res.kind = oREF
if aop.kind == oREL or bop.kind == oREL:
res.kind = oREL
if aop.value is not None and bop.value is not None:
assert len(aop.value) >= 1
assert len(bop.value) == 1
res.value = aop.value + bop.value
else:
pass
spush(res)
if blah: print("tList post", stack, file=bk.logfile)
elif opcode == 0x11: # tRange
if blah: print("tRange pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ':'
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF, None, rank, otext)
if bop.kind == oERR or aop.kind == oERR:
res = oERR
elif bop.kind == oREF == aop.kind:
if aop.value is not None and bop.value is not None:
assert len(aop.value) == 1
assert len(bop.value) == 1
coords = do_box_funcs(
tRangeFuncs, aop.value[0], bop.value[0])
res.value = [Ref3D(coords)]
elif bop.kind == oREL == aop.kind:
res.kind = oREL
if aop.value is not None and bop.value is not None:
assert len(aop.value) == 1
assert len(bop.value) == 1
coords = do_box_funcs(
tRangeFuncs, aop.value[0], bop.value[0])
relfa = aop.value[0].relflags
relfb = bop.value[0].relflags
if relfa == relfb:
res.value = [Ref3D(coords + relfa)]
else:
pass
spush(res)
if blah: print("tRange post", stack, file=bk.logfile)
elif 0x12 <= opcode <= 0x14: # tUplus, tUminus, tPercent
do_unaryop(opcode, oNUM, stack)
elif opcode == 0x15: # tParen
# source cosmetics
pass
elif opcode == 0x16: # tMissArg
spush(Operand(oMSNG, None, LEAF_RANK, ''))
elif opcode == 0x17: # tStr
if bv <= 70:
strg, newpos = unpack_string_update_pos(
data, pos+1, bk.encoding, lenlen=1)
else:
strg, newpos = unpack_unicode_update_pos(
data, pos+1, lenlen=1)
sz = newpos - pos
if blah: print(" sz=%d strg=%r" % (sz, strg), file=bk.logfile)
text = '"' + strg.replace('"', '""') + '"'
spush(Operand(oSTRG, strg, LEAF_RANK, text))
elif opcode == 0x18: # tExtended
# new with BIFF 8
assert bv >= 80
# not in OOo docs
raise FormulaError("tExtended token not implemented")
elif opcode == 0x19: # tAttr
subop, nc = unpack("<BH", data[pos+1:pos+4])
subname = tAttrNames.get(subop, "??Unknown??")
if subop == 0x04: # Choose
sz = nc * 2 + 6
elif subop == 0x10: # Sum (single arg)
sz = 4
if blah: print("tAttrSum", stack, file=bk.logfile)
assert len(stack) >= 1
aop = stack[-1]
otext = 'SUM(%s)' % aop.text
stack[-1] = Operand(oNUM, None, FUNC_RANK, otext)
else:
sz = 4
if blah:
print(" subop=%02xh subname=t%s sz=%d nc=%02xh" \
% (subop, subname, sz, nc), file=bk.logfile)
elif 0x1A <= opcode <= 0x1B: # tSheet, tEndSheet
assert bv < 50
raise FormulaError("tSheet & tEndsheet tokens not implemented")
elif 0x1C <= opcode <= 0x1F: # tErr, tBool, tInt, tNum
inx = opcode - 0x1C
nb = [1, 1, 2, 8][inx]
kind = [oERR, oBOOL, oNUM, oNUM][inx]
value, = unpack("<" + "BBHd"[inx], data[pos+1:pos+1+nb])
if inx == 2: # tInt
value = float(value)
text = str(value)
elif inx == 3: # tNum
text = str(value)
elif inx == 1: # tBool
text = ('FALSE', 'TRUE')[value]
else:
text = '"' +error_text_from_code[value] + '"'
spush(Operand(kind, value, LEAF_RANK, text))
else:
raise FormulaError("Unhandled opcode: 0x%02x" % opcode)
if sz <= 0:
raise FormulaError("Size not set for opcode 0x%02x" % opcode)
pos += sz
continue
if opcode == 0x00: # tArray
spush(unk_opnd)
elif opcode == 0x01: # tFunc
nb = 1 + int(bv >= 40)
funcx = unpack("<" + " BH"[nb], data[pos+1:pos+1+nb])[0]
func_attrs = func_defs.get(funcx, None)
if not func_attrs:
print("*** formula/tFunc unknown FuncID:%d" \
% funcx, file=bk.logfile)
spush(unk_opnd)
else:
func_name, nargs = func_attrs[:2]
if blah:
print(" FuncID=%d name=%s nargs=%d" \
% (funcx, func_name, nargs), file=bk.logfile)
assert len(stack) >= nargs
if nargs:
argtext = listsep.join([arg.text for arg in stack[-nargs:]])
otext = "%s(%s)" % (func_name, argtext)
del stack[-nargs:]
else:
otext = func_name + "()"
res = Operand(oUNK, None, FUNC_RANK, otext)
spush(res)
elif opcode == 0x02: #tFuncVar
nb = 1 + int(bv >= 40)
nargs, funcx = unpack("<B" + " BH"[nb], data[pos+1:pos+2+nb])
prompt, nargs = divmod(nargs, 128)
macro, funcx = divmod(funcx, 32768)
if blah:
print(" FuncID=%d nargs=%d macro=%d prompt=%d" \
% (funcx, nargs, macro, prompt), file=bk.logfile)
func_attrs = func_defs.get(funcx, None)
if not func_attrs:
print("*** formula/tFuncVar unknown FuncID:%d" \
% funcx, file=bk.logfile)
spush(unk_opnd)
else:
func_name, minargs, maxargs = func_attrs[:3]
if blah:
print(" name: %r, min~max args: %d~%d" \
% (func_name, minargs, maxargs), file=bk.logfile)
assert minargs <= nargs <= maxargs
assert len(stack) >= nargs
assert len(stack) >= nargs
argtext = listsep.join([arg.text for arg in stack[-nargs:]])
otext = "%s(%s)" % (func_name, argtext)
res = Operand(oUNK, None, FUNC_RANK, otext)
if funcx == 1: # IF
testarg = stack[-nargs]
if testarg.kind not in (oNUM, oBOOL):
if blah and testarg.kind != oUNK:
print("IF testarg kind?", file=bk.logfile)
elif testarg.value not in (0, 1):
if blah and testarg.value is not None:
print("IF testarg value?", file=bk.logfile)
else:
if nargs == 2 and not testarg.value:
# IF(FALSE, tv) => FALSE
res.kind, res.value = oBOOL, 0
else:
respos = -nargs + 2 - int(testarg.value)
chosen = stack[respos]
if chosen.kind == oMSNG:
res.kind, res.value = oNUM, 0
else:
res.kind, res.value = chosen.kind, chosen.value
if blah:
print("$$$$$$ IF => constant", file=bk.logfile)
elif funcx == 100: # CHOOSE
testarg = stack[-nargs]
if testarg.kind == oNUM:
if 1 <= testarg.value < nargs:
chosen = stack[-nargs + int(testarg.value)]
if chosen.kind == oMSNG:
res.kind, res.value = oNUM, 0
else:
res.kind, res.value = chosen.kind, chosen.value
del stack[-nargs:]
spush(res)
elif opcode == 0x03: #tName
tgtnamex = unpack("<H", data[pos+1:pos+3])[0] - 1
# Only change with BIFF version is number of trailing UNUSED bytes!
if blah: print(" tgtnamex=%d" % tgtnamex, file=bk.logfile)
tgtobj = bk.name_obj_list[tgtnamex]
if not tgtobj.evaluated:
### recursive ###
evaluate_name_formula(bk, tgtobj, tgtnamex, blah, level+1)
if tgtobj.macro or tgtobj.binary \
or tgtobj.any_err:
if blah:
tgtobj.dump(
bk.logfile,
header="!!! tgtobj has problems!!!",
footer="----------- --------",
)
res = Operand(oUNK, None)
any_err = any_err or tgtobj.macro or tgtobj.binary or tgtobj.any_err
any_rel = any_rel or tgtobj.any_rel
else:
assert len(tgtobj.stack) == 1
res = copy.deepcopy(tgtobj.stack[0])
res.rank = LEAF_RANK
if tgtobj.scope == -1:
res.text = tgtobj.name
else:
res.text = "%s!%s" \
% (bk._sheet_names[tgtobj.scope], tgtobj.name)
if blah:
print(" tName: setting text to", repr(res.text), file=bk.logfile)
spush(res)
elif opcode == 0x04: # tRef
# not_in_name_formula(op, oname)
res = get_cell_addr(data, pos+1, bv, reldelta)
if blah: print(" ", res, file=bk.logfile)
rowx, colx, row_rel, col_rel = res
shx1 = shx2 = 0 ####### N.B. relative to the CURRENT SHEET
any_rel = 1
coords = (shx1, shx2+1, rowx, rowx+1, colx, colx+1)
if blah: print(" ", coords, file=bk.logfile)
res = Operand(oUNK, None)
if optype == 1:
relflags = (1, 1, row_rel, row_rel, col_rel, col_rel)
res = Operand(oREL, [Ref3D(coords + relflags)])
spush(res)
elif opcode == 0x05: # tArea
# not_in_name_formula(op, oname)
res1, res2 = get_cell_range_addr(data, pos+1, bv, reldelta)
if blah: print(" ", res1, res2, file=bk.logfile)
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
shx1 = shx2 = 0 ####### N.B. relative to the CURRENT SHEET
any_rel = 1
coords = (shx1, shx2+1, rowx1, rowx2+1, colx1, colx2+1)
if blah: print(" ", coords, file=bk.logfile)
res = Operand(oUNK, None)
if optype == 1:
relflags = (1, 1, row_rel1, row_rel2, col_rel1, col_rel2)
res = Operand(oREL, [Ref3D(coords + relflags)])
spush(res)
elif opcode == 0x06: # tMemArea
not_in_name_formula(op, oname)
elif opcode == 0x09: # tMemFunc
nb = unpack("<H", data[pos+1:pos+3])[0]
if blah: print(" %d bytes of cell ref formula" % nb, file=bk.logfile)
# no effect on stack
elif opcode == 0x0C: #tRefN
not_in_name_formula(op, oname)
# res = get_cell_addr(data, pos+1, bv, reldelta=1)
# # note *ALL* tRefN usage has signed offset for relative addresses
# any_rel = 1
# if blah: print >> bk.logfile, " ", res
# spush(res)
elif opcode == 0x0D: #tAreaN
not_in_name_formula(op, oname)
# res = get_cell_range_addr(data, pos+1, bv, reldelta=1)
# # note *ALL* tAreaN usage has signed offset for relative addresses
# any_rel = 1
# if blah: print >> bk.logfile, " ", res
elif opcode == 0x1A: # tRef3d
if bv >= 80:
res = get_cell_addr(data, pos+3, bv, reldelta)
refx = unpack("<H", data[pos+1:pos+3])[0]
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
else:
res = get_cell_addr(data, pos+15, bv, reldelta)
raw_extshtx, raw_shx1, raw_shx2 = \
unpack("<hxxxxxxxxhh", data[pos+1:pos+15])
if blah:
print("tRef3d", raw_extshtx, raw_shx1, raw_shx2, file=bk.logfile)
shx1, shx2 = get_externsheet_local_range_b57(
bk, raw_extshtx, raw_shx1, raw_shx2, blah)
rowx, colx, row_rel, col_rel = res
is_rel = row_rel or col_rel
any_rel = any_rel or is_rel
coords = (shx1, shx2+1, rowx, rowx+1, colx, colx+1)
any_err |= shx1 < -1
if blah: print(" ", coords, file=bk.logfile)
res = Operand(oUNK, None)
if is_rel:
relflags = (0, 0, row_rel, row_rel, col_rel, col_rel)
ref3d = Ref3D(coords + relflags)
res.kind = oREL
res.text = rangename3drel(bk, ref3d, r1c1=1)
else:
ref3d = Ref3D(coords)
res.kind = oREF
res.text = rangename3d(bk, ref3d)
res.rank = LEAF_RANK
if optype == 1:
res.value = [ref3d]
spush(res)
elif opcode == 0x1B: # tArea3d
if bv >= 80:
res1, res2 = get_cell_range_addr(data, pos+3, bv, reldelta)
refx = unpack("<H", data[pos+1:pos+3])[0]
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
else:
res1, res2 = get_cell_range_addr(data, pos+15, bv, reldelta)
raw_extshtx, raw_shx1, raw_shx2 = \
unpack("<hxxxxxxxxhh", data[pos+1:pos+15])
if blah:
print("tArea3d", raw_extshtx, raw_shx1, raw_shx2, file=bk.logfile)
shx1, shx2 = get_externsheet_local_range_b57(
bk, raw_extshtx, raw_shx1, raw_shx2, blah)
any_err |= shx1 < -1
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
is_rel = row_rel1 or col_rel1 or row_rel2 or col_rel2
any_rel = any_rel or is_rel
coords = (shx1, shx2+1, rowx1, rowx2+1, colx1, colx2+1)
if blah: print(" ", coords, file=bk.logfile)
res = Operand(oUNK, None)
if is_rel:
relflags = (0, 0, row_rel1, row_rel2, col_rel1, col_rel2)
ref3d = Ref3D(coords + relflags)
res.kind = oREL
res.text = rangename3drel(bk, ref3d, r1c1=1)
else:
ref3d = Ref3D(coords)
res.kind = oREF
res.text = rangename3d(bk, ref3d)
res.rank = LEAF_RANK
if optype == 1:
res.value = [ref3d]
spush(res)
elif opcode == 0x19: # tNameX
dodgy = 0
res = Operand(oUNK, None)
if bv >= 80:
refx, tgtnamex = unpack("<HH", data[pos+1:pos+5])
tgtnamex -= 1
origrefx = refx
else:
refx, tgtnamex = unpack("<hxxxxxxxxH", data[pos+1:pos+13])
tgtnamex -= 1
origrefx = refx
if refx > 0:
refx -= 1
elif refx < 0:
refx = -refx - 1
else:
dodgy = 1
if blah:
print(" origrefx=%d refx=%d tgtnamex=%d dodgy=%d" \
% (origrefx, refx, tgtnamex, dodgy), file=bk.logfile)
if tgtnamex == namex:
if blah: print("!!!! Self-referential !!!!", file=bk.logfile)
dodgy = any_err = 1
if not dodgy:
if bv >= 80:
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
elif origrefx > 0:
shx1, shx2 = (-4, -4) # external ref
else:
exty = bk._externsheet_type_b57[refx]
if exty == 4: # non-specific sheet in own doc't
shx1, shx2 = (-1, -1) # internal, any sheet
else:
shx1, shx2 = (-666, -666)
if dodgy or shx1 < -1:
otext = "<<Name #%d in external(?) file #%d>>" \
% (tgtnamex, origrefx)
res = Operand(oUNK, None, LEAF_RANK, otext)
else:
tgtobj = bk.name_obj_list[tgtnamex]
if not tgtobj.evaluated:
### recursive ###
evaluate_name_formula(bk, tgtobj, tgtnamex, blah, level+1)
if tgtobj.macro or tgtobj.binary \
or tgtobj.any_err:
if blah:
tgtobj.dump(
bk.logfile,
header="!!! bad tgtobj !!!",
footer="------------------",
)
res = Operand(oUNK, None)
any_err = any_err or tgtobj.macro or tgtobj.binary or tgtobj.any_err
any_rel = any_rel or tgtobj.any_rel
else:
assert len(tgtobj.stack) == 1
res = copy.deepcopy(tgtobj.stack[0])
res.rank = LEAF_RANK
if tgtobj.scope == -1:
res.text = tgtobj.name
else:
res.text = "%s!%s" \
% (bk._sheet_names[tgtobj.scope], tgtobj.name)
if blah:
print(" tNameX: setting text to", repr(res.text), file=bk.logfile)
spush(res)
elif opcode in error_opcodes:
any_err = 1
spush(error_opnd)
else:
if blah:
print("FORMULA: /// Not handled yet: t" + oname, file=bk.logfile)
any_err = 1
if sz <= 0:
raise FormulaError("Fatal: token size is not positive")
pos += sz
any_rel = not not any_rel
if blah:
fprintf(bk.logfile, "End of formula. level=%d any_rel=%d any_err=%d stack=%r\n",
level, not not any_rel, any_err, stack)
if len(stack) >= 2:
print("*** Stack has unprocessed args", file=bk.logfile)
print(file=bk.logfile)
nobj.stack = stack
if len(stack) != 1:
nobj.result = None
else:
nobj.result = stack[0]
nobj.any_rel = any_rel
nobj.any_err = any_err
nobj.any_external = any_external
nobj.evaluated = 1
#### under construction #############################################################################
def decompile_formula(bk, fmla, fmlalen,
fmlatype=None, browx=None, bcolx=None,
blah=0, level=0, r1c1=0):
if level > STACK_ALARM_LEVEL:
blah = 1
reldelta = fmlatype in (FMLA_TYPE_SHARED, FMLA_TYPE_NAME, FMLA_TYPE_COND_FMT, FMLA_TYPE_DATA_VAL)
data = fmla
bv = bk.biff_version
if blah:
print("::: decompile_formula len=%d fmlatype=%r browx=%r bcolx=%r reldelta=%d %r level=%d" \
% (fmlalen, fmlatype, browx, bcolx, reldelta, data, level), file=bk.logfile)
hex_char_dump(data, 0, fmlalen, fout=bk.logfile)
if level > STACK_PANIC_LEVEL:
raise XLRDError("Excessive indirect references in formula")
sztab = szdict[bv]
pos = 0
stack = []
any_rel = 0
any_err = 0
any_external = 0
unk_opnd = Operand(oUNK, None)
error_opnd = Operand(oERR, None)
spush = stack.append
def do_binop(opcd, stk):
assert len(stk) >= 2
bop = stk.pop()
aop = stk.pop()
argdict, result_kind, func, rank, sym = binop_rules[opcd]
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
resop = Operand(result_kind, None, rank, otext)
stk.append(resop)
def do_unaryop(opcode, result_kind, stk):
assert len(stk) >= 1
aop = stk.pop()
func, rank, sym1, sym2 = unop_rules[opcode]
otext = ''.join([
sym1,
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym2,
])
stk.append(Operand(result_kind, None, rank, otext))
def unexpected_opcode(op_arg, oname_arg):
msg = "ERROR *** Unexpected token 0x%02x (%s) found in formula type %s" \
% (op_arg, oname_arg, FMLA_TYPEDESCR_MAP[fmlatype])
print(msg, file=bk.logfile)
# raise FormulaError(msg)
if fmlalen == 0:
stack = [unk_opnd]
while 0 <= pos < fmlalen:
op = BYTES_ORD(data[pos])
opcode = op & 0x1f
optype = (op & 0x60) >> 5
if optype:
opx = opcode + 32
else:
opx = opcode
oname = onames[opx] # + [" RVA"][optype]
sz = sztab[opx]
if blah:
print("Pos:%d Op:0x%02x opname:t%s Sz:%d opcode:%02xh optype:%02xh" \
% (pos, op, oname, sz, opcode, optype), file=bk.logfile)
print("Stack =", stack, file=bk.logfile)
if sz == -2:
msg = 'ERROR *** Unexpected token 0x%02x ("%s"); biff_version=%d' \
% (op, oname, bv)
raise FormulaError(msg)
if _TOKEN_NOT_ALLOWED(opx, 0) & fmlatype:
unexpected_opcode(op, oname)
if not optype:
if opcode <= 0x01: # tExp
if bv >= 30:
fmt = '<x2H'
else:
fmt = '<xHB'
assert pos == 0 and fmlalen == sz and not stack
rowx, colx = unpack(fmt, data)
text = "SHARED FMLA at rowx=%d colx=%d" % (rowx, colx)
spush(Operand(oUNK, None, LEAF_RANK, text))
if not fmlatype & (FMLA_TYPE_CELL | FMLA_TYPE_ARRAY):
unexpected_opcode(op, oname)
elif 0x03 <= opcode <= 0x0E:
# Add, Sub, Mul, Div, Power
# tConcat
# tLT, ..., tNE
do_binop(opcode, stack)
elif opcode == 0x0F: # tIsect
if blah: print("tIsect pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ' '
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF)
res.text = otext
if bop.kind == oERR or aop.kind == oERR:
res.kind = oERR
elif bop.kind == oUNK or aop.kind == oUNK:
# This can happen with undefined
# (go search in the current sheet) labels.
# For example =Bob Sales
# Each label gets a NAME record with an empty formula (!)
# Evaluation of the tName token classifies it as oUNK
# res.kind = oREF
pass
elif bop.kind == oREF == aop.kind:
pass
elif bop.kind == oREL == aop.kind:
res.kind = oREL
else:
pass
spush(res)
if blah: print("tIsect post", stack, file=bk.logfile)
elif opcode == 0x10: # tList
if blah: print("tList pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ','
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF, None, rank, otext)
if bop.kind == oERR or aop.kind == oERR:
res.kind = oERR
elif bop.kind in (oREF, oREL) and aop.kind in (oREF, oREL):
res.kind = oREF
if aop.kind == oREL or bop.kind == oREL:
res.kind = oREL
else:
pass
spush(res)
if blah: print("tList post", stack, file=bk.logfile)
elif opcode == 0x11: # tRange
if blah: print("tRange pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
sym = ':'
rank = 80 ########## check #######
otext = ''.join([
'('[:aop.rank < rank],
aop.text,
')'[:aop.rank < rank],
sym,
'('[:bop.rank < rank],
bop.text,
')'[:bop.rank < rank],
])
res = Operand(oREF, None, rank, otext)
if bop.kind == oERR or aop.kind == oERR:
res = oERR
elif bop.kind == oREF == aop.kind:
pass
else:
pass
spush(res)
if blah: print("tRange post", stack, file=bk.logfile)
elif 0x12 <= opcode <= 0x14: # tUplus, tUminus, tPercent
do_unaryop(opcode, oNUM, stack)
elif opcode == 0x15: # tParen
# source cosmetics
pass
elif opcode == 0x16: # tMissArg
spush(Operand(oMSNG, None, LEAF_RANK, ''))
elif opcode == 0x17: # tStr
if bv <= 70:
strg, newpos = unpack_string_update_pos(
data, pos+1, bk.encoding, lenlen=1)
else:
strg, newpos = unpack_unicode_update_pos(
data, pos+1, lenlen=1)
sz = newpos - pos
if blah: print(" sz=%d strg=%r" % (sz, strg), file=bk.logfile)
text = '"' + strg.replace('"', '""') + '"'
spush(Operand(oSTRG, None, LEAF_RANK, text))
elif opcode == 0x18: # tExtended
# new with BIFF 8
assert bv >= 80
# not in OOo docs, don't even know how to determine its length
raise FormulaError("tExtended token not implemented")
elif opcode == 0x19: # tAttr
subop, nc = unpack("<BH", data[pos+1:pos+4])
subname = tAttrNames.get(subop, "??Unknown??")
if subop == 0x04: # Choose
sz = nc * 2 + 6
elif subop == 0x10: # Sum (single arg)
sz = 4
if blah: print("tAttrSum", stack, file=bk.logfile)
assert len(stack) >= 1
aop = stack[-1]
otext = 'SUM(%s)' % aop.text
stack[-1] = Operand(oNUM, None, FUNC_RANK, otext)
else:
sz = 4
if blah:
print(" subop=%02xh subname=t%s sz=%d nc=%02xh" \
% (subop, subname, sz, nc), file=bk.logfile)
elif 0x1A <= opcode <= 0x1B: # tSheet, tEndSheet
assert bv < 50
raise FormulaError("tSheet & tEndsheet tokens not implemented")
elif 0x1C <= opcode <= 0x1F: # tErr, tBool, tInt, tNum
inx = opcode - 0x1C
nb = [1, 1, 2, 8][inx]
kind = [oERR, oBOOL, oNUM, oNUM][inx]
value, = unpack("<" + "BBHd"[inx], data[pos+1:pos+1+nb])
if inx == 2: # tInt
value = float(value)
text = str(value)
elif inx == 3: # tNum
text = str(value)
elif inx == 1: # tBool
text = ('FALSE', 'TRUE')[value]
else:
text = '"' +error_text_from_code[value] + '"'
spush(Operand(kind, None, LEAF_RANK, text))
else:
raise FormulaError("Unhandled opcode: 0x%02x" % opcode)
if sz <= 0:
raise FormulaError("Size not set for opcode 0x%02x" % opcode)
pos += sz
continue
if opcode == 0x00: # tArray
spush(unk_opnd)
elif opcode == 0x01: # tFunc
nb = 1 + int(bv >= 40)
funcx = unpack("<" + " BH"[nb], data[pos+1:pos+1+nb])[0]
func_attrs = func_defs.get(funcx, None)
if not func_attrs:
print("*** formula/tFunc unknown FuncID:%d" % funcx, file=bk.logfile)
spush(unk_opnd)
else:
func_name, nargs = func_attrs[:2]
if blah:
print(" FuncID=%d name=%s nargs=%d" \
% (funcx, func_name, nargs), file=bk.logfile)
assert len(stack) >= nargs
if nargs:
argtext = listsep.join([arg.text for arg in stack[-nargs:]])
otext = "%s(%s)" % (func_name, argtext)
del stack[-nargs:]
else:
otext = func_name + "()"
res = Operand(oUNK, None, FUNC_RANK, otext)
spush(res)
elif opcode == 0x02: #tFuncVar
nb = 1 + int(bv >= 40)
nargs, funcx = unpack("<B" + " BH"[nb], data[pos+1:pos+2+nb])
prompt, nargs = divmod(nargs, 128)
macro, funcx = divmod(funcx, 32768)
if blah:
print(" FuncID=%d nargs=%d macro=%d prompt=%d" \
% (funcx, nargs, macro, prompt), file=bk.logfile)
#### TODO #### if funcx == 255: # call add-in function
if funcx == 255:
func_attrs = ("CALL_ADDIN", 1, 30)
else:
func_attrs = func_defs.get(funcx, None)
if not func_attrs:
print("*** formula/tFuncVar unknown FuncID:%d" \
% funcx, file=bk.logfile)
spush(unk_opnd)
else:
func_name, minargs, maxargs = func_attrs[:3]
if blah:
print(" name: %r, min~max args: %d~%d" \
% (func_name, minargs, maxargs), file=bk.logfile)
assert minargs <= nargs <= maxargs
assert len(stack) >= nargs
assert len(stack) >= nargs
argtext = listsep.join([arg.text for arg in stack[-nargs:]])
otext = "%s(%s)" % (func_name, argtext)
res = Operand(oUNK, None, FUNC_RANK, otext)
del stack[-nargs:]
spush(res)
elif opcode == 0x03: #tName
tgtnamex = unpack("<H", data[pos+1:pos+3])[0] - 1
# Only change with BIFF version is number of trailing UNUSED bytes!
if blah: print(" tgtnamex=%d" % tgtnamex, file=bk.logfile)
tgtobj = bk.name_obj_list[tgtnamex]
if tgtobj.scope == -1:
otext = tgtobj.name
else:
otext = "%s!%s" % (bk._sheet_names[tgtobj.scope], tgtobj.name)
if blah:
print(" tName: setting text to", repr(otext), file=bk.logfile)
res = Operand(oUNK, None, LEAF_RANK, otext)
spush(res)
elif opcode == 0x04: # tRef
res = get_cell_addr(data, pos+1, bv, reldelta, browx, bcolx)
if blah: print(" ", res, file=bk.logfile)
rowx, colx, row_rel, col_rel = res
is_rel = row_rel or col_rel
if is_rel:
okind = oREL
else:
okind = oREF
otext = cellnamerel(rowx, colx, row_rel, col_rel, browx, bcolx, r1c1)
res = Operand(okind, None, LEAF_RANK, otext)
spush(res)
elif opcode == 0x05: # tArea
res1, res2 = get_cell_range_addr(
data, pos+1, bv, reldelta, browx, bcolx)
if blah: print(" ", res1, res2, file=bk.logfile)
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
coords = (rowx1, rowx2+1, colx1, colx2+1)
relflags = (row_rel1, row_rel2, col_rel1, col_rel2)
if sum(relflags): # relative
okind = oREL
else:
okind = oREF
if blah: print(" ", coords, relflags, file=bk.logfile)
otext = rangename2drel(coords, relflags, browx, bcolx, r1c1)
res = Operand(okind, None, LEAF_RANK, otext)
spush(res)
elif opcode == 0x06: # tMemArea
not_in_name_formula(op, oname)
elif opcode == 0x09: # tMemFunc
nb = unpack("<H", data[pos+1:pos+3])[0]
if blah: print(" %d bytes of cell ref formula" % nb, file=bk.logfile)
# no effect on stack
elif opcode == 0x0C: #tRefN
res = get_cell_addr(data, pos+1, bv, reldelta, browx, bcolx)
# note *ALL* tRefN usage has signed offset for relative addresses
any_rel = 1
if blah: print(" ", res, file=bk.logfile)
rowx, colx, row_rel, col_rel = res
is_rel = row_rel or col_rel
if is_rel:
okind = oREL
else:
okind = oREF
otext = cellnamerel(rowx, colx, row_rel, col_rel, browx, bcolx, r1c1)
res = Operand(okind, None, LEAF_RANK, otext)
spush(res)
elif opcode == 0x0D: #tAreaN
# res = get_cell_range_addr(data, pos+1, bv, reldelta, browx, bcolx)
# # note *ALL* tAreaN usage has signed offset for relative addresses
# any_rel = 1
# if blah: print >> bk.logfile, " ", res
res1, res2 = get_cell_range_addr(
data, pos+1, bv, reldelta, browx, bcolx)
if blah: print(" ", res1, res2, file=bk.logfile)
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
coords = (rowx1, rowx2+1, colx1, colx2+1)
relflags = (row_rel1, row_rel2, col_rel1, col_rel2)
if sum(relflags): # relative
okind = oREL
else:
okind = oREF
if blah: print(" ", coords, relflags, file=bk.logfile)
otext = rangename2drel(coords, relflags, browx, bcolx, r1c1)
res = Operand(okind, None, LEAF_RANK, otext)
spush(res)
elif opcode == 0x1A: # tRef3d
if bv >= 80:
res = get_cell_addr(data, pos+3, bv, reldelta, browx, bcolx)
refx = unpack("<H", data[pos+1:pos+3])[0]
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
else:
res = get_cell_addr(data, pos+15, bv, reldelta, browx, bcolx)
raw_extshtx, raw_shx1, raw_shx2 = \
unpack("<hxxxxxxxxhh", data[pos+1:pos+15])
if blah:
print("tRef3d", raw_extshtx, raw_shx1, raw_shx2, file=bk.logfile)
shx1, shx2 = get_externsheet_local_range_b57(
bk, raw_extshtx, raw_shx1, raw_shx2, blah)
rowx, colx, row_rel, col_rel = res
is_rel = row_rel or col_rel
any_rel = any_rel or is_rel
coords = (shx1, shx2+1, rowx, rowx+1, colx, colx+1)
any_err |= shx1 < -1
if blah: print(" ", coords, file=bk.logfile)
res = Operand(oUNK, None)
if is_rel:
relflags = (0, 0, row_rel, row_rel, col_rel, col_rel)
ref3d = Ref3D(coords + relflags)
res.kind = oREL
res.text = rangename3drel(bk, ref3d, browx, bcolx, r1c1)
else:
ref3d = Ref3D(coords)
res.kind = oREF
res.text = rangename3d(bk, ref3d)
res.rank = LEAF_RANK
res.value = None
spush(res)
elif opcode == 0x1B: # tArea3d
if bv >= 80:
res1, res2 = get_cell_range_addr(data, pos+3, bv, reldelta)
refx = unpack("<H", data[pos+1:pos+3])[0]
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
else:
res1, res2 = get_cell_range_addr(data, pos+15, bv, reldelta)
raw_extshtx, raw_shx1, raw_shx2 = \
unpack("<hxxxxxxxxhh", data[pos+1:pos+15])
if blah:
print("tArea3d", raw_extshtx, raw_shx1, raw_shx2, file=bk.logfile)
shx1, shx2 = get_externsheet_local_range_b57(
bk, raw_extshtx, raw_shx1, raw_shx2, blah)
any_err |= shx1 < -1
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
is_rel = row_rel1 or col_rel1 or row_rel2 or col_rel2
any_rel = any_rel or is_rel
coords = (shx1, shx2+1, rowx1, rowx2+1, colx1, colx2+1)
if blah: print(" ", coords, file=bk.logfile)
res = Operand(oUNK, None)
if is_rel:
relflags = (0, 0, row_rel1, row_rel2, col_rel1, col_rel2)
ref3d = Ref3D(coords + relflags)
res.kind = oREL
res.text = rangename3drel(bk, ref3d, browx, bcolx, r1c1)
else:
ref3d = Ref3D(coords)
res.kind = oREF
res.text = rangename3d(bk, ref3d)
res.rank = LEAF_RANK
spush(res)
elif opcode == 0x19: # tNameX
dodgy = 0
res = Operand(oUNK, None)
if bv >= 80:
refx, tgtnamex = unpack("<HH", data[pos+1:pos+5])
tgtnamex -= 1
origrefx = refx
else:
refx, tgtnamex = unpack("<hxxxxxxxxH", data[pos+1:pos+13])
tgtnamex -= 1
origrefx = refx
if refx > 0:
refx -= 1
elif refx < 0:
refx = -refx - 1
else:
dodgy = 1
if blah:
print(" origrefx=%d refx=%d tgtnamex=%d dodgy=%d" \
% (origrefx, refx, tgtnamex, dodgy), file=bk.logfile)
# if tgtnamex == namex:
# if blah: print >> bk.logfile, "!!!! Self-referential !!!!"
# dodgy = any_err = 1
if not dodgy:
if bv >= 80:
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
elif origrefx > 0:
shx1, shx2 = (-4, -4) # external ref
else:
exty = bk._externsheet_type_b57[refx]
if exty == 4: # non-specific sheet in own doc't
shx1, shx2 = (-1, -1) # internal, any sheet
else:
shx1, shx2 = (-666, -666)
okind = oUNK
ovalue = None
if shx1 == -5: # addin func name
okind = oSTRG
ovalue = bk.addin_func_names[tgtnamex]
otext = '"' + ovalue.replace('"', '""') + '"'
elif dodgy or shx1 < -1:
otext = "<<Name #%d in external(?) file #%d>>" \
% (tgtnamex, origrefx)
else:
tgtobj = bk.name_obj_list[tgtnamex]
if tgtobj.scope == -1:
otext = tgtobj.name
else:
otext = "%s!%s" \
% (bk._sheet_names[tgtobj.scope], tgtobj.name)
if blah:
print(" tNameX: setting text to", repr(res.text), file=bk.logfile)
res = Operand(okind, ovalue, LEAF_RANK, otext)
spush(res)
elif opcode in error_opcodes:
any_err = 1
spush(error_opnd)
else:
if blah:
print("FORMULA: /// Not handled yet: t" + oname, file=bk.logfile)
any_err = 1
if sz <= 0:
raise FormulaError("Fatal: token size is not positive")
pos += sz
any_rel = not not any_rel
if blah:
print("End of formula. level=%d any_rel=%d any_err=%d stack=%r" % \
(level, not not any_rel, any_err, stack), file=bk.logfile)
if len(stack) >= 2:
print("*** Stack has unprocessed args", file=bk.logfile)
print(file=bk.logfile)
if len(stack) != 1:
result = None
else:
result = stack[0].text
return result
#### under deconstruction ###
def dump_formula(bk, data, fmlalen, bv, reldelta, blah=0, isname=0):
if blah:
print("dump_formula", fmlalen, bv, len(data), file=bk.logfile)
hex_char_dump(data, 0, fmlalen, fout=bk.logfile)
assert bv >= 80 #### this function needs updating ####
sztab = szdict[bv]
pos = 0
stack = []
any_rel = 0
any_err = 0
spush = stack.append
while 0 <= pos < fmlalen:
op = BYTES_ORD(data[pos])
opcode = op & 0x1f
optype = (op & 0x60) >> 5
if optype:
opx = opcode + 32
else:
opx = opcode
oname = onames[opx] # + [" RVA"][optype]
sz = sztab[opx]
if blah:
print("Pos:%d Op:0x%02x Name:t%s Sz:%d opcode:%02xh optype:%02xh" \
% (pos, op, oname, sz, opcode, optype), file=bk.logfile)
if not optype:
if 0x01 <= opcode <= 0x02: # tExp, tTbl
# reference to a shared formula or table record
rowx, colx = unpack("<HH", data[pos+1:pos+5])
if blah: print(" ", (rowx, colx), file=bk.logfile)
elif opcode == 0x10: # tList
if blah: print("tList pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
spush(aop + bop)
if blah: print("tlist post", stack, file=bk.logfile)
elif opcode == 0x11: # tRange
if blah: print("tRange pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
assert len(aop) == 1
assert len(bop) == 1
result = do_box_funcs(tRangeFuncs, aop[0], bop[0])
spush(result)
if blah: print("tRange post", stack, file=bk.logfile)
elif opcode == 0x0F: # tIsect
if blah: print("tIsect pre", stack, file=bk.logfile)
assert len(stack) >= 2
bop = stack.pop()
aop = stack.pop()
assert len(aop) == 1
assert len(bop) == 1
result = do_box_funcs(tIsectFuncs, aop[0], bop[0])
spush(result)
if blah: print("tIsect post", stack, file=bk.logfile)
elif opcode == 0x19: # tAttr
subop, nc = unpack("<BH", data[pos+1:pos+4])
subname = tAttrNames.get(subop, "??Unknown??")
if subop == 0x04: # Choose
sz = nc * 2 + 6
else:
sz = 4
if blah: print(" subop=%02xh subname=t%s sz=%d nc=%02xh" % (subop, subname, sz, nc), file=bk.logfile)
elif opcode == 0x17: # tStr
if bv <= 70:
nc = BYTES_ORD(data[pos+1])
strg = data[pos+2:pos+2+nc] # left in 8-bit encoding
sz = nc + 2
else:
strg, newpos = unpack_unicode_update_pos(data, pos+1, lenlen=1)
sz = newpos - pos
if blah: print(" sz=%d strg=%r" % (sz, strg), file=bk.logfile)
else:
if sz <= 0:
print("**** Dud size; exiting ****", file=bk.logfile)
return
pos += sz
continue
if opcode == 0x00: # tArray
pass
elif opcode == 0x01: # tFunc
nb = 1 + int(bv >= 40)
funcx = unpack("<" + " BH"[nb], data[pos+1:pos+1+nb])
if blah: print(" FuncID=%d" % funcx, file=bk.logfile)
elif opcode == 0x02: #tFuncVar
nb = 1 + int(bv >= 40)
nargs, funcx = unpack("<B" + " BH"[nb], data[pos+1:pos+2+nb])
prompt, nargs = divmod(nargs, 128)
macro, funcx = divmod(funcx, 32768)
if blah: print(" FuncID=%d nargs=%d macro=%d prompt=%d" % (funcx, nargs, macro, prompt), file=bk.logfile)
elif opcode == 0x03: #tName
namex = unpack("<H", data[pos+1:pos+3])
# Only change with BIFF version is the number of trailing UNUSED bytes!!!
if blah: print(" namex=%d" % namex, file=bk.logfile)
elif opcode == 0x04: # tRef
res = get_cell_addr(data, pos+1, bv, reldelta)
if blah: print(" ", res, file=bk.logfile)
elif opcode == 0x05: # tArea
res = get_cell_range_addr(data, pos+1, bv, reldelta)
if blah: print(" ", res, file=bk.logfile)
elif opcode == 0x09: # tMemFunc
nb = unpack("<H", data[pos+1:pos+3])[0]
if blah: print(" %d bytes of cell ref formula" % nb, file=bk.logfile)
elif opcode == 0x0C: #tRefN
res = get_cell_addr(data, pos+1, bv, reldelta=1)
# note *ALL* tRefN usage has signed offset for relative addresses
any_rel = 1
if blah: print(" ", res, file=bk.logfile)
elif opcode == 0x0D: #tAreaN
res = get_cell_range_addr(data, pos+1, bv, reldelta=1)
# note *ALL* tAreaN usage has signed offset for relative addresses
any_rel = 1
if blah: print(" ", res, file=bk.logfile)
elif opcode == 0x1A: # tRef3d
refx = unpack("<H", data[pos+1:pos+3])[0]
res = get_cell_addr(data, pos+3, bv, reldelta)
if blah: print(" ", refx, res, file=bk.logfile)
rowx, colx, row_rel, col_rel = res
any_rel = any_rel or row_rel or col_rel
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
any_err |= shx1 < -1
coords = (shx1, shx2+1, rowx, rowx+1, colx, colx+1)
if blah: print(" ", coords, file=bk.logfile)
if optype == 1: spush([coords])
elif opcode == 0x1B: # tArea3d
refx = unpack("<H", data[pos+1:pos+3])[0]
res1, res2 = get_cell_range_addr(data, pos+3, bv, reldelta)
if blah: print(" ", refx, res1, res2, file=bk.logfile)
rowx1, colx1, row_rel1, col_rel1 = res1
rowx2, colx2, row_rel2, col_rel2 = res2
any_rel = any_rel or row_rel1 or col_rel1 or row_rel2 or col_rel2
shx1, shx2 = get_externsheet_local_range(bk, refx, blah)
any_err |= shx1 < -1
coords = (shx1, shx2+1, rowx1, rowx2+1, colx1, colx2+1)
if blah: print(" ", coords, file=bk.logfile)
if optype == 1: spush([coords])
elif opcode == 0x19: # tNameX
refx, namex = unpack("<HH", data[pos+1:pos+5])
if blah: print(" refx=%d namex=%d" % (refx, namex), file=bk.logfile)
elif opcode in error_opcodes:
any_err = 1
else:
if blah: print("FORMULA: /// Not handled yet: t" + oname, file=bk.logfile)
any_err = 1
if sz <= 0:
print("**** Dud size; exiting ****", file=bk.logfile)
return
pos += sz
if blah:
print("End of formula. any_rel=%d any_err=%d stack=%r" % \
(not not any_rel, any_err, stack), file=bk.logfile)
if len(stack) >= 2:
print("*** Stack has unprocessed args", file=bk.logfile)
# === Some helper functions for displaying cell references ===
# I'm aware of only one possibility of a sheet-relative component in
# a reference: a 2D reference located in the "current sheet".
# xlrd stores this internally with bounds of (0, 1, ...) and
# relative flags of (1, 1, ...). These functions display the
# sheet component as empty, just like Excel etc.
def rownamerel(rowx, rowxrel, browx=None, r1c1=0):
# if no base rowx is provided, we have to return r1c1
if browx is None:
r1c1 = True
if not rowxrel:
if r1c1:
return "R%d" % (rowx+1)
return "$%d" % (rowx+1)
if r1c1:
if rowx:
return "R[%d]" % rowx
return "R"
return "%d" % ((browx + rowx) % 65536 + 1)
def colnamerel(colx, colxrel, bcolx=None, r1c1=0):
# if no base colx is provided, we have to return r1c1
if bcolx is None:
r1c1 = True
if not colxrel:
if r1c1:
return "C%d" % (colx + 1)
return "$" + colname(colx)
if r1c1:
if colx:
return "C[%d]" % colx
return "C"
return colname((bcolx + colx) % 256)
##
# Utility function: (5, 7) => 'H6'
def cellname(rowx, colx):
""" (5, 7) => 'H6' """
return "%s%d" % (colname(colx), rowx+1)
##
# Utility function: (5, 7) => '$H$6'
def cellnameabs(rowx, colx, r1c1=0):
""" (5, 7) => '$H$6' or 'R8C6'"""
if r1c1:
return "R%dC%d" % (rowx+1, colx+1)
return "$%s$%d" % (colname(colx), rowx+1)
def cellnamerel(rowx, colx, rowxrel, colxrel, browx=None, bcolx=None, r1c1=0):
if not rowxrel and not colxrel:
return cellnameabs(rowx, colx, r1c1)
if (rowxrel and browx is None) or (colxrel and bcolx is None):
# must flip the whole cell into R1C1 mode
r1c1 = True
c = colnamerel(colx, colxrel, bcolx, r1c1)
r = rownamerel(rowx, rowxrel, browx, r1c1)
if r1c1:
return r + c
return c + r
##
# Utility function: 7 => 'H', 27 => 'AB'
def colname(colx):
""" 7 => 'H', 27 => 'AB' """
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if colx <= 25:
return alphabet[colx]
else:
xdiv26, xmod26 = divmod(colx, 26)
return alphabet[xdiv26 - 1] + alphabet[xmod26]
def rangename2d(rlo, rhi, clo, chi, r1c1=0):
""" (5, 20, 7, 10) => '$H$6:$J$20' """
if r1c1:
return
if rhi == rlo+1 and chi == clo+1:
return cellnameabs(rlo, clo, r1c1)
return "%s:%s" % (cellnameabs(rlo, clo, r1c1), cellnameabs(rhi-1, chi-1, r1c1))
def rangename2drel(rlo_rhi_clo_chi, rlorel_rhirel_clorel_chirel, browx=None, bcolx=None, r1c1=0):
rlo, rhi, clo, chi = rlo_rhi_clo_chi
rlorel, rhirel, clorel, chirel = rlorel_rhirel_clorel_chirel
if (rlorel or rhirel) and browx is None:
r1c1 = True
if (clorel or chirel) and bcolx is None:
r1c1 = True
return "%s:%s" % (
cellnamerel(rlo, clo, rlorel, clorel, browx, bcolx, r1c1),
cellnamerel(rhi-1, chi-1, rhirel, chirel, browx, bcolx, r1c1)
)
##
# Utility function:
# <br /> Ref3D((1, 4, 5, 20, 7, 10)) => 'Sheet2:Sheet3!$H$6:$J$20'
def rangename3d(book, ref3d):
""" Ref3D(1, 4, 5, 20, 7, 10) => 'Sheet2:Sheet3!$H$6:$J$20'
(assuming Excel's default sheetnames) """
coords = ref3d.coords
return "%s!%s" % (
sheetrange(book, *coords[:2]),
rangename2d(*coords[2:6]))
##
# Utility function:
# <br /> Ref3D(coords=(0, 1, -32, -22, -13, 13), relflags=(0, 0, 1, 1, 1, 1))
# R1C1 mode => 'Sheet1!R[-32]C[-13]:R[-23]C[12]'
# A1 mode => depends on base cell (browx, bcolx)
def rangename3drel(book, ref3d, browx=None, bcolx=None, r1c1=0):
coords = ref3d.coords
relflags = ref3d.relflags
shdesc = sheetrangerel(book, coords[:2], relflags[:2])
rngdesc = rangename2drel(coords[2:6], relflags[2:6], browx, bcolx, r1c1)
if not shdesc:
return rngdesc
return "%s!%s" % (shdesc, rngdesc)
def quotedsheetname(shnames, shx):
if shx >= 0:
shname = shnames[shx]
else:
shname = {
-1: "?internal; any sheet?",
-2: "internal; deleted sheet",
-3: "internal; macro sheet",
-4: "<<external>>",
}.get(shx, "?error %d?" % shx)
if "'" in shname:
return "'" + shname.replace("'", "''") + "'"
if " " in shname:
return "'" + shname + "'"
return shname
def sheetrange(book, slo, shi):
shnames = book.sheet_names()
shdesc = quotedsheetname(shnames, slo)
if slo != shi-1:
shdesc += ":" + quotedsheetname(shnames, shi-1)
return shdesc
def sheetrangerel(book, srange, srangerel):
slo, shi = srange
slorel, shirel = srangerel
if not slorel and not shirel:
return sheetrange(book, slo, shi)
assert (slo == 0 == shi-1) and slorel and shirel
return ""
# ==============================================================
| apache-2.0 |
40223247/2015cdb_0622 | static/Brython3.1.1-20150328-091302/Lib/threading.py | 730 | 45641 | """Thread module emulating a subset of Java's threading model."""
import sys as _sys
import _thread
from time import sleep as _sleep
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
from traceback import format_exc as _format_exc
from _weakrefset import WeakSet
# Note regarding PEP 8 compliant names
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. Those original names are not in any imminent danger of
# being deprecated (even for Py3k),so this module provides them as an
# alias for the PEP 8 compliant names
# Note that using the new PEP 8 compliant names facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
__all__ = ['active_count', 'Condition', 'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread', 'Barrier',
'Timer', 'ThreadError', 'setprofile', 'settrace', 'local', 'stack_size']
# Rename some stuff so "from threading import *" is safe
_start_new_thread = _thread.start_new_thread
_allocate_lock = _thread.allocate_lock
get_ident = _thread.get_ident
ThreadError = _thread.error
try:
_CRLock = _thread.RLock
except AttributeError:
_CRLock = None
TIMEOUT_MAX = _thread.TIMEOUT_MAX
del _thread
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
"""Set a profile function for all threads started from the threading module.
The func will be passed to sys.setprofile() for each thread, before its
run() method is called.
"""
global _profile_hook
_profile_hook = func
def settrace(func):
"""Set a trace function for all threads started from the threading module.
The func will be passed to sys.settrace() for each thread, before its run()
method is called.
"""
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
"""Factory function that returns a new reentrant lock.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it again
without blocking; the thread must release it once for each time it has
acquired it.
"""
if _CRLock is None:
return _PyRLock(*args, **kwargs)
return _CRLock(*args, **kwargs)
class _RLock:
"""This class implements reentrant lock objects.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it
again without blocking; the thread must release it once for each time it
has acquired it.
"""
def __init__(self):
self._block = _allocate_lock()
self._owner = None
self._count = 0
def __repr__(self):
owner = self._owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self._count)
def acquire(self, blocking=True, timeout=-1):
"""Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.
When invoked with the floating-point timeout argument set to a positive
value, block for at most the number of seconds specified by timeout
and as long as the lock cannot be acquired. Return true if the lock has
been acquired, false if the timeout has elapsed.
"""
me = get_ident()
if self._owner == me:
self._count = self._count + 1
return 1
rc = self._block.acquire(blocking, timeout)
if rc:
self._owner = me
self._count = 1
return rc
__enter__ = acquire
def release(self):
"""Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the lock remains
locked and owned by the calling thread.
Only call this method when the calling thread owns the lock. A
RuntimeError is raised if this method is called when the lock is
unlocked.
There is no return value.
"""
if self._owner != get_ident():
raise RuntimeError("cannot release un-acquired lock")
self._count = count = self._count - 1
if not count:
self._owner = None
self._block.release()
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, state):
self._block.acquire()
self._count, self._owner = state
def _release_save(self):
if self._count == 0:
raise RuntimeError("cannot release un-acquired lock")
count = self._count
self._count = 0
owner = self._owner
self._owner = None
self._block.release()
return (count, owner)
def _is_owned(self):
return self._owner == get_ident()
_PyRLock = _RLock
class Condition:
"""Class that implements a condition variable.
A condition variable allows one or more threads to wait until they are
notified by another thread.
If the lock argument is given and not None, it must be a Lock or RLock
object, and it is used as the underlying lock. Otherwise, a new RLock object
is created and used as the underlying lock.
"""
def __init__(self, lock=None):
if lock is None:
lock = RLock()
self._lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self._waiters = []
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self._lock, len(self._waiters))
def _release_save(self):
self._lock.release() # No state to save
def _acquire_restore(self, x):
self._lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self._lock.acquire(0):
self._lock.release()
return False
else:
return True
def wait(self, timeout=None):
"""Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notify_all() call for the same condition
variable in another thread, or until the optional timeout occurs. Once
awakened or timed out, it re-acquires the lock and returns.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
When the underlying lock is an RLock, it is not released using its
release() method, since this may not actually unlock the lock when it
was acquired multiple times recursively. Instead, an internal interface
of the RLock class is used, which really unlocks it even when it has
been recursively acquired several times. Another internal interface is
then used to restore the recursion level when the lock is reacquired.
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self._waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
gotit = True
else:
if timeout > 0:
gotit = waiter.acquire(True, timeout)
else:
gotit = waiter.acquire(False)
if not gotit:
try:
self._waiters.remove(waiter)
except ValueError:
pass
return gotit
finally:
self._acquire_restore(saved_state)
def wait_for(self, predicate, timeout=None):
"""Wait until a condition evaluates to True.
predicate should be a callable which result will be interpreted as a
boolean value. A timeout may be provided giving the maximum time to
wait.
"""
endtime = None
waittime = timeout
result = predicate()
while not result:
if waittime is not None:
if endtime is None:
endtime = _time() + waittime
else:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
def notify(self, n=1):
"""Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.
"""
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self._waiters
waiters = __waiters[:n]
if not waiters:
return
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
"""Wake up all threads waiting on this condition.
If the calling thread has not acquired the lock when this method
is called, a RuntimeError is raised.
"""
self.notify(len(self._waiters))
notifyAll = notify_all
class Semaphore:
"""This class implements semaphore objects.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = Condition(Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
When invoked with a timeout other than None, it will block for at
most timeout seconds. If acquire does not complete successfully in
that interval, return false. Return true otherwise.
"""
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
with self._cond:
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value = self._value - 1
rc = True
return rc
__enter__ = acquire
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
with self._cond:
self._value = self._value + 1
self._cond.notify()
def __exit__(self, t, v, tb):
self.release()
class BoundedSemaphore(Semaphore):
"""Implements a bounded semaphore.
A bounded semaphore checks to make sure its current value doesn't exceed its
initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity.
If the semaphore is released too many times it's a sign of a bug. If not
given, value defaults to 1.
Like regular semaphores, bounded semaphores manage a counter representing
the number of release() calls minus the number of acquire() calls, plus an
initial value. The acquire() method blocks if necessary until it can return
without making the counter negative. If not given, value defaults to 1.
"""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
If the number of releases exceeds the number of acquires,
raise a ValueError.
"""
with self._cond:
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
self._value += 1
self._cond.notify()
class Event:
"""Class implementing event objects.
Events manage a flag that can be set to true with the set() method and reset
to false with the clear() method. The wait() method blocks until the flag is
true. The flag is initially false.
"""
# After Tim Peters' event class (without is_posted())
def __init__(self):
self._cond = Condition(Lock())
self._flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self._cond.__init__()
def is_set(self):
"""Return true if and only if the internal flag is true."""
return self._flag
isSet = is_set
def set(self):
"""Set the internal flag to true.
All threads waiting for it to become true are awakened. Threads
that call wait() once the flag is true will not block at all.
"""
self._cond.acquire()
try:
self._flag = True
self._cond.notify_all()
finally:
self._cond.release()
def clear(self):
"""Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.
"""
self._cond.acquire()
try:
self._flag = False
finally:
self._cond.release()
def wait(self, timeout=None):
"""Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.
"""
self._cond.acquire()
try:
signaled = self._flag
if not signaled:
signaled = self._cond.wait(timeout)
return signaled
finally:
self._cond.release()
# A barrier class. Inspired in part by the pthread_barrier_* api and
# the CyclicBarrier class from Java. See
# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and
# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/
# CyclicBarrier.html
# for information.
# We maintain two main states, 'filling' and 'draining' enabling the barrier
# to be cyclic. Threads are not allowed into it until it has fully drained
# since the previous cycle. In addition, a 'resetting' state exists which is
# similar to 'draining' except that threads leave with a BrokenBarrierError,
# and a 'broken' state in which all threads get the exception.
class Barrier:
"""Implements a Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and are simultaneously once they have all
made that call.
"""
def __init__(self, parties, action=None, timeout=None):
"""Create a barrier, initialised to 'parties' threads.
'action' is a callable which, when supplied, will be called by one of
the threads after they have all entered the barrier and just prior to
releasing them all. If a 'timeout' is provided, it is uses as the
default for all subsequent 'wait()' calls.
"""
self._cond = Condition(Lock())
self._action = action
self._timeout = timeout
self._parties = parties
self._state = 0 #0 filling, 1, draining, -1 resetting, -2 broken
self._count = 0
def wait(self, timeout=None):
"""Wait for the barrier.
When the specified number of threads have started waiting, they are all
simultaneously awoken. If an 'action' was provided for the barrier, one
of the threads will have executed that callback prior to returning.
Returns an individual index number from 0 to 'parties-1'.
"""
if timeout is None:
timeout = self._timeout
with self._cond:
self._enter() # Block while the barrier drains.
index = self._count
self._count += 1
try:
if index + 1 == self._parties:
# We release the barrier
self._release()
else:
# We wait until someone releases us
self._wait(timeout)
return index
finally:
self._count -= 1
# Wake up any threads waiting for barrier to drain.
self._exit()
# Block until the barrier is ready for us, or raise an exception
# if it is broken.
def _enter(self):
while self._state in (-1, 1):
# It is draining or resetting, wait until done
self._cond.wait()
#see if the barrier is in a broken state
if self._state < 0:
raise BrokenBarrierError
assert self._state == 0
# Optionally run the 'action' and release the threads waiting
# in the barrier.
def _release(self):
try:
if self._action:
self._action()
# enter draining state
self._state = 1
self._cond.notify_all()
except:
#an exception during the _action handler. Break and reraise
self._break()
raise
# Wait in the barrier until we are relased. Raise an exception
# if the barrier is reset or broken.
def _wait(self, timeout):
if not self._cond.wait_for(lambda : self._state != 0, timeout):
#timed out. Break the barrier
self._break()
raise BrokenBarrierError
if self._state < 0:
raise BrokenBarrierError
assert self._state == 1
# If we are the last thread to exit the barrier, signal any threads
# waiting for the barrier to drain.
def _exit(self):
if self._count == 0:
if self._state in (-1, 1):
#resetting or draining
self._state = 0
self._cond.notify_all()
def reset(self):
"""Reset the barrier to the initial state.
Any threads currently waiting will get the BrokenBarrier exception
raised.
"""
with self._cond:
if self._count > 0:
if self._state == 0:
#reset the barrier, waking up threads
self._state = -1
elif self._state == -2:
#was broken, set it to reset state
#which clears when the last thread exits
self._state = -1
else:
self._state = 0
self._cond.notify_all()
def abort(self):
"""Place the barrier into a 'broken' state.
Useful in case of error. Any currently waiting threads and threads
attempting to 'wait()' will have BrokenBarrierError raised.
"""
with self._cond:
self._break()
def _break(self):
# An internal error was detected. The barrier is set to
# a broken state all parties awakened.
self._state = -2
self._cond.notify_all()
@property
def parties(self):
"""Return the number of threads required to trip the barrier."""
return self._parties
@property
def n_waiting(self):
"""Return the number of threads currently waiting at the barrier."""
# We don't need synchronization here since this is an ephemeral result
# anyway. It returns the correct value in the steady state.
if self._state == 0:
return self._count
return 0
@property
def broken(self):
"""Return True if the barrier is in a broken state."""
return self._state == -2
# exception raised by the Barrier class
class BrokenBarrierError(RuntimeError):
pass
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# For debug and leak testing
_dangling = WeakSet()
# Main class for threads
class Thread:
"""A class that represents a thread of control.
This class can be safely subclassed in a limited fashion. There are two ways
to specify the activity: by passing a callable object to the constructor, or
by overriding the run() method in a subclass.
"""
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
#XXX __exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, *, daemon=None):
"""This constructor should always be called with keyword arguments. Arguments are:
*group* should be None; reserved for future extension when a ThreadGroup
class is implemented.
*target* is the callable object to be invoked by the run()
method. Defaults to None, meaning nothing is called.
*name* is the thread name. By default, a unique name is constructed of
the form "Thread-N" where N is a small decimal number.
*args* is the argument tuple for the target invocation. Defaults to ().
*kwargs* is a dictionary of keyword arguments for the target
invocation. Defaults to {}.
If a subclass overrides the constructor, it must make sure to invoke
the base class constructor (Thread.__init__()) before doing anything
else to the thread.
"""
assert group is None, "group argument must be None for now"
if kwargs is None:
kwargs = {}
self._target = target
self._name = str(name or _newname())
self._args = args
self._kwargs = kwargs
if daemon is not None:
self._daemonic = daemon
else:
self._daemonic = current_thread().daemon
self._ident = None
self._started = Event()
self._stopped = False
self._block = Condition(Lock())
self._initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self._stderr = _sys.stderr
_dangling.add(self)
def _reset_internal_locks(self):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_block'): # DummyThread deletes _block
self._block.__init__()
self._started._reset_internal_locks()
def __repr__(self):
assert self._initialized, "Thread.__init__() was not called"
status = "initial"
if self._started.is_set():
status = "started"
if self._stopped:
status = "stopped"
if self._daemonic:
status += " daemon"
if self._ident is not None:
status += " %s" % self._ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self._name, status)
def start(self):
"""Start the thread's activity.
It must be called at most once per thread object. It arranges for the
object's run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.
"""
if not self._initialized:
raise RuntimeError("thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("threads can only be started once")
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self._bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self._started.wait()
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
try:
if self._target:
self._target(*self._args, **self._kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self._target, self._args, self._kwargs
def _bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# _bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# _bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self._bootstrap_inner()
except:
if self._daemonic and _sys is None:
return
raise
def _set_ident(self):
self._ident = get_ident()
def _bootstrap_inner(self):
try:
self._set_ident()
self._started.set()
with _active_limbo_lock:
_active[self._ident] = self
del _limbo[self]
if _trace_hook:
_sys.settrace(_trace_hook)
if _profile_hook:
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
pass
except:
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self._stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self._exc_info()
try:
print((
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):"), file=self._stderr)
print((
"Traceback (most recent call last):"), file=self._stderr)
while exc_tb:
print((
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name)), file=self._stderr)
exc_tb = exc_tb.tb_next
print(("%s: %s" % (exc_type, exc_value)), file=self._stderr)
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
#XXX self.__exc_clear()
pass
finally:
with _active_limbo_lock:
self._stop()
try:
# We don't call self._delete() because it also
# grabs _active_limbo_lock.
del _active[get_ident()]
except:
pass
def _stop(self):
self._block.acquire()
self._stopped = True
self._block.notify_all()
self._block.release()
def _delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with _dummy_thread:
#
# Must take care to not raise an exception if _dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). _dummy_thread.get_ident() always returns -1 since
# there is only one thread if _dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from _dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
"""Wait until the thread terminates.
This blocks the calling thread until the thread whose join() method is
called terminates -- either normally or through an unhandled exception
or until the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof). As join() always returns None, you must call
isAlive() after join() to decide whether a timeout happened -- if the
thread is still alive, the join() call timed out.
When the timeout argument is not present or None, the operation will
block until the thread terminates.
A thread can be join()ed many times.
join() raises a RuntimeError if an attempt is made to join the current
thread as that would cause a deadlock. It is also an error to join() a
thread before it has been started and attempts to do so raises the same
exception.
"""
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if not self._started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
self._block.acquire()
try:
if timeout is None:
while not self._stopped:
self._block.wait()
else:
deadline = _time() + timeout
while not self._stopped:
delay = deadline - _time()
if delay <= 0:
break
self._block.wait(delay)
finally:
self._block.release()
@property
def name(self):
"""A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.
"""
assert self._initialized, "Thread.__init__() not called"
return self._name
@name.setter
def name(self, name):
assert self._initialized, "Thread.__init__() not called"
self._name = str(name)
@property
def ident(self):
"""Thread identifier of this thread or None if it has not been started.
This is a nonzero integer. See the thread.get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.
"""
assert self._initialized, "Thread.__init__() not called"
return self._ident
def is_alive(self):
"""Return whether the thread is alive.
This method returns True just before the run() method starts until just
after the run() method terminates. The module function enumerate()
returns a list of all alive threads.
"""
assert self._initialized, "Thread.__init__() not called"
return self._started.is_set() and not self._stopped
isAlive = is_alive
@property
def daemon(self):
"""A boolean value indicating whether this thread is a daemon thread.
This must be set before start() is called, otherwise RuntimeError is
raised. Its initial value is inherited from the creating thread; the
main thread is not a daemon thread and therefore all threads created in
the main thread default to daemon = False.
The entire Python program exits when no alive non-daemon threads are
left.
"""
assert self._initialized, "Thread.__init__() not called"
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
if not self._initialized:
raise RuntimeError("Thread.__init__() not called")
if self._started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self._daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
class Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=None, kwargs=None)
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=None, kwargs=None):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args if args is not None else []
self.kwargs = kwargs if kwargs is not None else {}
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet."""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread", daemon=False)
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _exitfunc(self):
self._stop()
t = _pickSomeNonDaemonThread()
while t:
t.join()
t = _pickSomeNonDaemonThread()
self._delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"), daemon=True)
# Thread._block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._block
self._started.set()
self._set_ident()
with _active_limbo_lock:
_active[self._ident] = self
def _stop(self):
pass
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def current_thread():
"""Return the current Thread object, corresponding to the caller's thread of control.
If the caller's thread of control was not created through the threading
module, a dummy thread object with limited functionality is returned.
"""
try:
return _active[get_ident()]
except KeyError:
return _DummyThread()
currentThread = current_thread
def active_count():
"""Return the number of Thread objects currently alive.
The returned count is equal to the length of the list returned by
enumerate().
"""
with _active_limbo_lock:
return len(_active) + len(_limbo)
activeCount = active_count
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return list(_active.values()) + list(_limbo.values())
def enumerate():
"""Return a list of all Thread objects currently alive.
The list includes daemonic threads, dummy thread objects created by
current_thread(), and the main thread. It excludes terminated threads and
threads that have not yet been started.
"""
with _active_limbo_lock:
return list(_active.values()) + list(_limbo.values())
from _thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from _thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _enumerate():
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
thread._reset_internal_locks()
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = get_ident()
thread._ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
| gpl-3.0 |
gunchleoc/django | tests/introspection/tests.py | 108 | 8561 | from __future__ import unicode_literals
from unittest import skipUnless
from django.db import connection
from django.db.utils import DatabaseError
from django.test import TransactionTestCase, mock, skipUnlessDBFeature
from .models import Article, Reporter
class IntrospectionTests(TransactionTestCase):
available_apps = ['introspection']
def test_table_names(self):
tl = connection.introspection.table_names()
self.assertEqual(tl, sorted(tl))
self.assertIn(Reporter._meta.db_table, tl,
"'%s' isn't in table_list()." % Reporter._meta.db_table)
self.assertIn(Article._meta.db_table, tl,
"'%s' isn't in table_list()." % Article._meta.db_table)
def test_django_table_names(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);')
tl = connection.introspection.django_table_names()
cursor.execute("DROP TABLE django_ixn_test_table;")
self.assertNotIn('django_ixn_test_table', tl,
"django_table_names() returned a non-Django table")
def test_django_table_names_retval_type(self):
# Table name is a list #15216
tl = connection.introspection.django_table_names(only_existing=True)
self.assertIs(type(tl), list)
tl = connection.introspection.django_table_names(only_existing=False)
self.assertIs(type(tl), list)
def test_table_names_with_views(self):
with connection.cursor() as cursor:
try:
cursor.execute(
'CREATE VIEW introspection_article_view AS SELECT headline '
'from introspection_article;')
except DatabaseError as e:
if 'insufficient privileges' in str(e):
self.fail("The test user has no CREATE VIEW privileges")
else:
raise
self.assertIn('introspection_article_view',
connection.introspection.table_names(include_views=True))
self.assertNotIn('introspection_article_view',
connection.introspection.table_names())
def test_installed_models(self):
tables = [Article._meta.db_table, Reporter._meta.db_table]
models = connection.introspection.installed_models(tables)
self.assertEqual(models, {Article, Reporter})
def test_sequence_list(self):
sequences = connection.introspection.sequence_list()
expected = {'table': Reporter._meta.db_table, 'column': 'id'}
self.assertIn(expected, sequences,
'Reporter sequence not found in sequence_list()')
def test_get_table_description_names(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual([r[0] for r in desc],
[f.column for f in Reporter._meta.fields])
def test_get_table_description_types(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[datatype(r[1], r) for r in desc],
['AutoField' if connection.features.can_introspect_autofield else 'IntegerField',
'CharField', 'CharField', 'CharField',
'BigIntegerField' if connection.features.can_introspect_big_integer_field else 'IntegerField',
'BinaryField' if connection.features.can_introspect_binary_field else 'TextField',
'SmallIntegerField' if connection.features.can_introspect_small_integer_field else 'IntegerField']
)
# The following test fails on Oracle due to #17202 (can't correctly
# inspect the length of character columns).
@skipUnlessDBFeature('can_introspect_max_length')
def test_get_table_description_col_lengths(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[r[3] for r in desc if datatype(r[1], r) == 'CharField'],
[30, 30, 254]
)
@skipUnlessDBFeature('can_introspect_null')
def test_get_table_description_nullable(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
nullable_by_backend = connection.features.interprets_empty_strings_as_nulls
self.assertEqual(
[r[6] for r in desc],
[False, nullable_by_backend, nullable_by_backend, nullable_by_backend, True, True, False]
)
# Regression test for #9991 - 'real' types in postgres
@skipUnlessDBFeature('has_real_datatype')
def test_postgresql_real_type(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);")
desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table')
cursor.execute('DROP TABLE django_ixn_real_test_table;')
self.assertEqual(datatype(desc[0][1], desc[0]), 'FloatField')
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_get_relations(self):
with connection.cursor() as cursor:
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
# That's {field_name: (field_name_other_table, other_table)}
expected_relations = {
'reporter_id': ('id', Reporter._meta.db_table),
'response_to_id': ('id', Article._meta.db_table),
}
self.assertEqual(relations, expected_relations)
# Removing a field shouldn't disturb get_relations (#17785)
body = Article._meta.get_field('body')
with connection.schema_editor() as editor:
editor.remove_field(Article, body)
with connection.cursor() as cursor:
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
with connection.schema_editor() as editor:
editor.add_field(Article, body)
self.assertEqual(relations, expected_relations)
@skipUnless(connection.vendor == 'sqlite', "This is an sqlite-specific issue")
def test_get_relations_alt_format(self):
"""With SQLite, foreign keys can be added with different syntaxes."""
with connection.cursor() as cursor:
cursor.fetchone = mock.Mock(
return_value=[
"CREATE TABLE track(id, art_id INTEGER, FOREIGN KEY(art_id) REFERENCES {}(id));".format(
Article._meta.db_table
)
]
)
relations = connection.introspection.get_relations(cursor, 'mocked_table')
self.assertEqual(relations, {'art_id': ('id', Article._meta.db_table)})
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_get_key_columns(self):
with connection.cursor() as cursor:
key_columns = connection.introspection.get_key_columns(cursor, Article._meta.db_table)
self.assertEqual(
set(key_columns),
{('reporter_id', Reporter._meta.db_table, 'id'),
('response_to_id', Article._meta.db_table, 'id')})
def test_get_primary_key_column(self):
with connection.cursor() as cursor:
primary_key_column = connection.introspection.get_primary_key_column(cursor, Article._meta.db_table)
self.assertEqual(primary_key_column, 'id')
def test_get_indexes(self):
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, Article._meta.db_table)
self.assertEqual(indexes['reporter_id'], {'unique': False, 'primary_key': False})
def test_get_indexes_multicol(self):
"""
Test that multicolumn indexes are not included in the introspection
results.
"""
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, Reporter._meta.db_table)
self.assertNotIn('first_name', indexes)
self.assertIn('id', indexes)
def datatype(dbtype, description):
"""Helper to convert a data type into a string."""
dt = connection.introspection.get_field_type(dbtype, description)
if type(dt) is tuple:
return dt[0]
else:
return dt
| bsd-3-clause |
praneethkumarpidugu/matchmaking | lib/python2.7/site-packages/django/utils/lru_cache.py | 270 | 7647 | try:
from functools import lru_cache
except ImportError:
# backport of Python's 3.3 lru_cache, written by Raymond Hettinger and
# licensed under MIT license, from:
# <http://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/>
# Should be removed when Django only supports Python 3.2 and above.
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = {int, str, frozenset, type(None)},
sorted=sorted, tuple=tuple, type=type, len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
oldvalue = root[RESULT]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
| mit |
superdesk/Live-Blog | documentor/libraries/docutils-0.9.1-py3.2/docutils/io.py | 2 | 17894 | # $Id: io.py 7440 2012-06-13 14:14:12Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
I/O classes provide a uniform API for low-level input and output. Subclasses
will exist for a variety of input/output mechanisms.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import codecs
from docutils import TransformSpec
from docutils._compat import b
from docutils.error_reporting import locale_encoding, ErrorString, ErrorOutput
class InputError(IOError): pass
class OutputError(IOError): pass
def check_encoding(stream, encoding):
"""Test, whether the encoding of `stream` matches `encoding`.
Returns
:None: if `encoding` or `stream.encoding` are not a valid encoding
argument (e.g. ``None``) or `stream.encoding is missing.
:True: if the encoding argument resolves to the same value as `encoding`,
:False: if the encodings differ.
"""
try:
return codecs.lookup(stream.encoding) == codecs.lookup(encoding)
except (LookupError, AttributeError, TypeError):
return None
class Input(TransformSpec):
"""
Abstract base class for input wrappers.
"""
component_type = 'input'
default_source_path = None
def __init__(self, source=None, source_path=None, encoding=None,
error_handler='strict'):
self.encoding = encoding
"""Text encoding for the input source."""
self.error_handler = error_handler
"""Text decoding error handler."""
self.source = source
"""The source of input data."""
self.source_path = source_path
"""A text reference to the source."""
if not source_path:
self.source_path = self.default_source_path
self.successful_encoding = None
"""The encoding that successfully decoded the source data."""
def __repr__(self):
return '%s: source=%r, source_path=%r' % (self.__class__, self.source,
self.source_path)
def read(self):
raise NotImplementedError
def decode(self, data):
"""
Decode a string, `data`, heuristically.
Raise UnicodeError if unsuccessful.
The client application should call ``locale.setlocale`` at the
beginning of processing::
locale.setlocale(locale.LC_ALL, '')
"""
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, str), (
'input encoding is "unicode" '
'but input is not a unicode object')
if isinstance(data, str):
# Accept unicode even if self.encoding != 'unicode'.
return data
if self.encoding:
# We believe the user/application when the encoding is
# explicitly given.
encodings = [self.encoding]
else:
data_encoding = self.determine_encoding_from_data(data)
if data_encoding:
# If the data declares its encoding (explicitly or via a BOM),
# we believe it.
encodings = [data_encoding]
else:
# Apply heuristics only if no encoding is explicitly given and
# no BOM found. Start with UTF-8, because that only matches
# data that *IS* UTF-8:
encodings = ['utf-8', 'latin-1']
if locale_encoding:
encodings.insert(1, locale_encoding)
for enc in encodings:
try:
decoded = str(data, enc, self.error_handler)
self.successful_encoding = enc
# Return decoded, removing BOMs.
return decoded.replace('\ufeff', '')
except (UnicodeError, LookupError) as err:
error = err # in Python 3, the <exception instance> is
# local to the except clause
raise UnicodeError(
'Unable to decode input data. Tried the following encodings: '
'%s.\n(%s)' % (', '.join([repr(enc) for enc in encodings]),
ErrorString(error)))
coding_slug = re.compile(b("coding[:=]\s*([-\w.]+)"))
"""Encoding declaration pattern."""
byte_order_marks = ((codecs.BOM_UTF8, 'utf-8'), # 'utf-8-sig' new in v2.5
(codecs.BOM_UTF16_BE, 'utf-16-be'),
(codecs.BOM_UTF16_LE, 'utf-16-le'),)
"""Sequence of (start_bytes, encoding) tuples for encoding detection.
The first bytes of input data are checked against the start_bytes strings.
A match indicates the given encoding."""
def determine_encoding_from_data(self, data):
"""
Try to determine the encoding of `data` by looking *in* `data`.
Check for a byte order mark (BOM) or an encoding declaration.
"""
# check for a byte order mark:
for start_bytes, encoding in self.byte_order_marks:
if data.startswith(start_bytes):
return encoding
# check for an encoding declaration pattern in first 2 lines of file:
for line in data.splitlines()[:2]:
match = self.coding_slug.search(line)
if match:
return match.group(1).decode('ascii')
return None
class Output(TransformSpec):
"""
Abstract base class for output wrappers.
"""
component_type = 'output'
default_destination_path = None
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict'):
self.encoding = encoding
"""Text encoding for the output destination."""
self.error_handler = error_handler or 'strict'
"""Text encoding error handler."""
self.destination = destination
"""The destination for output data."""
self.destination_path = destination_path
"""A text reference to the destination."""
if not destination_path:
self.destination_path = self.default_destination_path
def __repr__(self):
return ('%s: destination=%r, destination_path=%r'
% (self.__class__, self.destination, self.destination_path))
def write(self, data):
"""`data` is a Unicode string, to be encoded by `self.encode`."""
raise NotImplementedError
def encode(self, data):
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, str), (
'the encoding given is "unicode" but the output is not '
'a Unicode string')
return data
if not isinstance(data, str):
# Non-unicode (e.g. binary) output.
return data
else:
return data.encode(self.encoding, self.error_handler)
class FileInput(Input):
"""
Input for single, simple file-like objects.
"""
def __init__(self, source=None, source_path=None,
encoding=None, error_handler='strict',
autoclose=True, handle_io_errors=True, mode='rU'):
"""
:Parameters:
- `source`: either a file-like object (which is read directly), or
`None` (which implies `sys.stdin` if no `source_path` given).
- `source_path`: a path to a file, which is opened and then read.
- `encoding`: the expected text encoding of the input file.
- `error_handler`: the encoding error handler to use.
- `autoclose`: close automatically after read (except when
`sys.stdin` is the source).
- `handle_io_errors`: summarize I/O errors here, and exit?
- `mode`: how the file is to be opened (see standard function
`open`). The default 'rU' provides universal newline support
for text files.
"""
Input.__init__(self, source, source_path, encoding, error_handler)
self.autoclose = autoclose
self.handle_io_errors = handle_io_errors
self._stderr = ErrorOutput()
if source is None:
if source_path:
# Specify encoding in Python 3
if sys.version_info >= (3,0):
kwargs = {'encoding': self.encoding,
'errors': self.error_handler}
else:
kwargs = {}
try:
self.source = open(source_path, mode, **kwargs)
except IOError as error:
if handle_io_errors:
print(ErrorString(error), file=self._stderr)
print((
'Unable to open source file for reading ("%s").'
'Exiting.' % source_path), file=self._stderr)
sys.exit(1)
raise InputError(error.errno, error.strerror, source_path)
else:
self.source = sys.stdin
elif (sys.version_info >= (3,0) and
check_encoding(self.source, self.encoding) is False):
# TODO: re-open, warn or raise error?
raise UnicodeError('Encoding clash: encoding given is "%s" '
'but source is opened with encoding "%s".' %
(self.encoding, self.source.encoding))
if not source_path:
try:
self.source_path = self.source.name
except AttributeError:
pass
def read(self):
"""
Read and decode a single file and return the data (Unicode string).
"""
try: # In Python < 2.5, try...except has to be nested in try...finally.
try:
if self.source is sys.stdin and sys.version_info >= (3,0):
# read as binary data to circumvent auto-decoding
data = self.source.buffer.read()
# normalize newlines
data = b('\n').join(data.splitlines()) + b('\n')
else:
data = self.source.read()
except (UnicodeError, LookupError) as err: # (in Py3k read() decodes)
if not self.encoding and self.source_path:
# re-read in binary mode and decode with heuristics
b_source = open(self.source_path, 'rb')
data = b_source.read()
b_source.close()
# normalize newlines
data = b('\n').join(data.splitlines()) + b('\n')
else:
raise
finally:
if self.autoclose:
self.close()
return self.decode(data)
def readlines(self):
"""
Return lines of a single file as list of Unicode strings.
"""
return self.read().splitlines(True)
def close(self):
if self.source is not sys.stdin:
self.source.close()
class FileOutput(Output):
"""
Output for single, simple file-like objects.
"""
mode = 'w'
"""The mode argument for `open()`."""
# 'wb' for binary (e.g. OpenOffice) files.
# (Do not use binary mode ('wb') for text files, as this prevents the
# conversion of newlines to the system specific default.)
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict', autoclose=True,
handle_io_errors=True, mode=None):
"""
:Parameters:
- `destination`: either a file-like object (which is written
directly) or `None` (which implies `sys.stdout` if no
`destination_path` given).
- `destination_path`: a path to a file, which is opened and then
written.
- `encoding`: the text encoding of the output file.
- `error_handler`: the encoding error handler to use.
- `autoclose`: close automatically after write (except when
`sys.stdout` or `sys.stderr` is the destination).
- `handle_io_errors`: summarize I/O errors here, and exit?
- `mode`: how the file is to be opened (see standard function
`open`). The default is 'w', providing universal newline
support for text files.
"""
Output.__init__(self, destination, destination_path,
encoding, error_handler)
self.opened = True
self.autoclose = autoclose
self.handle_io_errors = handle_io_errors
if mode is not None:
self.mode = mode
self._stderr = ErrorOutput()
if destination is None:
if destination_path:
self.opened = False
else:
self.destination = sys.stdout
elif (# destination is file-type object -> check mode:
mode and hasattr(self.destination, 'mode')
and mode != self.destination.mode):
print(('Destination mode "%s" '
'differs from specified mode "%s"' %
(self.destination.mode, mode)), file=self._stderr)
if not destination_path:
try:
self.destination_path = self.destination.name
except AttributeError:
pass
# Special cases under Python 3: different encoding or binary output
if sys.version_info >= (3,0):
if ('b' in self.mode
and self.destination in (sys.stdout, sys.stderr)
):
self.destination = self.destination.buffer
if check_encoding(self.destination, self.encoding) is False:
if self.destination in (sys.stdout, sys.stderr):
self.destination = self.destination.buffer
else: # TODO: try the `write to .buffer` scheme instead?
raise ValueError('Encoding of %s (%s) differs \n'
' from specified encoding (%s)' %
(self.destination_path or 'destination',
destination.encoding, encoding))
def open(self):
# Specify encoding in Python 3.
if sys.version_info >= (3,0):
kwargs = {'encoding': self.encoding,
'errors': self.error_handler}
else:
kwargs = {}
try:
self.destination = open(self.destination_path, self.mode, **kwargs)
except IOError as error:
if self.handle_io_errors:
print(ErrorString(error), file=self._stderr)
print(('Unable to open destination file'
" for writing ('%s'). Exiting." % self.destination_path), file=self._stderr)
sys.exit(1)
raise OutputError(error.errno, error.strerror,
self.destination_path)
self.opened = True
def write(self, data):
"""Encode `data`, write it to a single file, and return it.
With Python 3 or binary output mode, `data` is returned unchanged,
except when specified encoding and output encoding differ.
"""
if not self.opened:
self.open()
try: # In Python < 2.5, try...except has to be nested in try...finally.
try:
if 'b' not in self.mode and (sys.version_info < (3,0) or
check_encoding(self.destination, self.encoding) is False):
data = self.encode(data)
if sys.version_info >= (3,0) and os.linesep != '\n':
# writing as binary data -> fix endings
data = data.replace('\n', os.linesep)
self.destination.write(data)
except (UnicodeError, LookupError) as err:
raise UnicodeError(
'Unable to encode output data. output-encoding is: '
'%s.\n(%s)' % (self.encoding, ErrorString(err)))
finally:
if self.autoclose:
self.close()
return data
def close(self):
if self.destination not in (sys.stdout, sys.stderr):
self.destination.close()
self.opened = False
class BinaryFileOutput(FileOutput):
"""
A version of docutils.io.FileOutput which writes to a binary file.
"""
# Used by core.publish_cmdline_to_binary() which in turn is used by
# rst2odt (OpenOffice writer)
mode = 'wb'
class StringInput(Input):
"""
Direct string input.
"""
default_source_path = '<string>'
def read(self):
"""Decode and return the source string."""
return self.decode(self.source)
class StringOutput(Output):
"""
Direct string output.
"""
default_destination_path = '<string>'
def write(self, data):
"""Encode `data`, store it in `self.destination`, and return it."""
self.destination = self.encode(data)
return self.destination
class NullInput(Input):
"""
Degenerate input: read nothing.
"""
default_source_path = 'null input'
def read(self):
"""Return a null string."""
return ''
class NullOutput(Output):
"""
Degenerate output: write nothing.
"""
default_destination_path = 'null output'
def write(self, data):
"""Do nothing ([don't even] send data to the bit bucket)."""
pass
class DocTreeInput(Input):
"""
Adapter for document tree input.
The document tree must be passed in the ``source`` parameter.
"""
default_source_path = 'doctree input'
def read(self):
"""Return the document tree."""
return self.source
| agpl-3.0 |
jshiv/turntable | test/lib/python2.7/site-packages/setuptools/command/alias.py | 467 | 2381 | from distutils.errors import DistutilsOptionError
from setuptools.command.setopt import edit_config, option_base, config_file
def shquote(arg):
"""Quote an argument for later parsing by shlex.split()"""
for c in '"', "'", "\\", "#":
if c in arg:
return repr(arg)
if arg.split() != [arg]:
return repr(arg)
return arg
class alias(option_base):
"""Define a shortcut that invokes one or more commands"""
description = "define a shortcut to invoke one or more commands"
command_consumes_arguments = True
user_options = [
('remove', 'r', 'remove (unset) the alias'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.args = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.remove and len(self.args) != 1:
raise DistutilsOptionError(
"Must specify exactly one argument (the alias name) when "
"using --remove"
)
def run(self):
aliases = self.distribution.get_option_dict('aliases')
if not self.args:
print("Command Aliases")
print("---------------")
for alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
elif len(self.args) == 1:
alias, = self.args
if self.remove:
command = None
elif alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
else:
print("No alias definition found for %r" % alias)
return
else:
alias = self.args[0]
command = ' '.join(map(shquote, self.args[1:]))
edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
def format_alias(name, aliases):
source, command = aliases[name]
if source == config_file('global'):
source = '--global-config '
elif source == config_file('user'):
source = '--user-config '
elif source == config_file('local'):
source = ''
else:
source = '--filename=%r' % source
return source + name + ' ' + command
| mit |
pytroll/mpop | mpop/saturn/two_line_elements.py | 2 | 14593 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011, 2013.
# Author(s):
# Martin Raspaud <martin.raspaud@smhi.se>
# This file is part of mpop.
# mpop is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
# mpop is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# mpop. If not, see <http://www.gnu.org/licenses/>.
"""Module to compute satellite positions from TLE.
"""
import datetime
import sys
import urllib2
import numpy as np
CK2 = 5.413080e-4
CK4 = 0.62098875e-6
E6A = 1.0e-6
QOMS2T = 1.88027916e-9
S = 1.01222928
XJ3 = -0.253881e-5
XKE = 0.743669161e-1
XKMPER = 6378.137
XMNPDA = 1440.0
AE = 1.0
# earth flattening
F = 1/298.257223563
if sys.version_info < (2, 5):
import time
def strptime(string, fmt=None):
"""This function is available in the datetime module only
from Python >= 2.5.
"""
return datetime.datetime(*time.strptime(string, fmt)[:6])
else:
strptime = datetime.datetime.strptime
class Tle(object):
"""The TLE object holds information and methods for orbit position
estimation.
"""
def __init__(self, tle=None, satellite=None):
self.tle = tle
if satellite:
tles_dict = {}
import glob
filelist = glob.glob("/data/24/saf/polar_in/tle/tle-*.txt")
if len(filelist) > 0:
filelist.sort()
tlef = open(filelist[-1])
tles = [item.strip() for item in tlef]
tlef.close()
for i in xrange(0, len(tles) - 2, 3):
tles_dict[tles[i]] = tles[i+1]+"\n"+tles[i+2]
else:
for fname in ["resource.txt", "weather.txt"]:
url = "http://celestrak.com/NORAD/elements/" + fname
tles = urllib2.urlopen(url).readlines()
tles = [item.strip() for item in tles]
for i in xrange(0, len(tles) - 2, 3):
tles_dict[tles[i]] = tles[i+1]+"\n"+tles[i+2]
self._read_tle(tles_dict[satellite.upper()])
self._preprocess()
def _read_tle(self, lines):
"""Read the raw tle.
"""
def _read_tle_decimal(rep):
"""Read tle decimal point numbers.
"""
num = int(rep[:-2]) * 1.0e-5
exp = int(rep[-2:])
return num * 10 ** exp
tlist = lines.split()
self.tle = {}
self.tle["satnumber"] = tlist[1][:5]
self.tle["classification"] = tlist[1][5:]
self.tle["id_launch_year"] = tlist[2][:2]
self.tle["id_launch_number"] = tlist[2][2:5]
self.tle["id_launch_piece"] = tlist[2][5:]
self.tle["epoch_year"] = int(tlist[3][:2])
self.tle["epoch_day"] = float(tlist[3][2:])
self.tle["epoch"] = (strptime(tlist[3][:2], "%y") +
datetime.timedelta(days=float(tlist[3][2:]) - 1))
self.tle["mean_motion_derivative"] = float(tlist[4])
self.tle["mean_motion_sec_derivative"] = _read_tle_decimal(tlist[5])
self.tle["bstar"] = _read_tle_decimal(tlist[6])
self.tle["ephemeris_type"] = int(tlist[7])
self.tle["element_number"] = int(tlist[8][:-1])
self.tle["inclination"] = float(tlist[11])
self.tle["right_ascension"] = float(tlist[12])
self.tle["excentricity"] = int(tlist[13]) * 10 ** -7
self.tle["arg_perigee"] = float(tlist[14])
self.tle["mean_anomaly"] = float(tlist[15])
self.tle["mean_motion"] = float(tlist[16][:11])
self.tle["orbit"] = int(tlist[16][11:-1])
def _preprocess(self):
"""Derivate some values from raw tle.
"""
self.tle["inclination"] = np.deg2rad(self.tle["inclination"])
self.tle["right_ascension"] = np.deg2rad(self.tle["right_ascension"])
self.tle["arg_perigee"] = np.deg2rad(self.tle["arg_perigee"])
self.tle["mean_anomaly"] = np.deg2rad(self.tle["mean_anomaly"])
self.tle["mean_motion"] *= (np.pi * 2 / XMNPDA)
self.tle["mean_motion_derivative"] *= np.pi * 2 / XMNPDA ** 2
self.tle["mean_motion_sec_derivative"] *= np.pi * 2 / XMNPDA ** 3
self.tle["bstar"] *= AE
n_0 = self.tle["mean_motion"]
k_e = XKE
k_2 = CK2
i_0 = self.tle["inclination"]
e_0 = self.tle["excentricity"]
a_1 = (k_e / n_0) ** (2.0/3)
delta_1 = ((3/2.0) * (k_2 / a_1**2) * ((3 * np.cos(i_0)**2 - 1) /
(1 - e_0**2)**(2.0/3)))
a_0 = a_1 * (1 - delta_1/3 - delta_1**2 - (134.0/81) * delta_1**3)
delta_0 = ((3/2.0) * (k_2 / a_0**2) * ((3 * np.cos(i_0)**2 - 1) /
(1 - e_0**2)**(2.0/3)))
# original mean motion
n_0pp = n_0 / (1 + delta_0)
self.tle["original_mean_motion"] = n_0pp
# semi major axis
a_0pp = a_0 / (1 - delta_0)
self.tle["semi_major_axis"] = a_0pp
self.tle["period"] = np.pi * 2 / n_0pp
self.tle["perigee"] = (a_0pp * (1 - e_0) / AE - AE) * XKMPER
now = self.tle["epoch"]
self.tle["right_ascension_lon"] = (self.tle["right_ascension"]
- gmst(now))
if self.tle["right_ascension_lon"] > np.pi:
self.tle["right_ascension_lon"] -= 2 * np.pi
# pylint: disable-msg=C0103
def get_position(self, current_time):
"""Get cartesian position and velocity.
"""
# for near earth orbits, period must be < 255 minutes
perigee = self.tle["perigee"]
a_0pp = self.tle["semi_major_axis"]
e_0 = self.tle["excentricity"]
i_0 = self.tle["inclination"]
n_0pp = self.tle["original_mean_motion"]
k_2 = CK2
k_4 = CK4
k_e = XKE
bstar = self.tle["bstar"]
w_0 = self.tle["arg_perigee"]
M_0 = self.tle["mean_anomaly"]
W_0 = self.tle["right_ascension"]
t_0 = self.tle["epoch"]
A30 = -XJ3 * AE**3
if perigee < 98:
s = 20/XKMPER + AE
qoms2t = (QOMS2T ** 0.25 + S - s) ** 4
elif perigee < 156:
s = a_0pp * (1 - e_0) - S + AE
qoms2t = (QOMS2T ** 0.25 + S - s) ** 4
else:
qoms2t = QOMS2T
s = S
theta = np.cos(i_0)
xi = 1 / (a_0pp - s)
beta_0 = np.sqrt(1 - e_0 ** 2)
eta = a_0pp * e_0 * xi
C_2 = (qoms2t * xi**4 * n_0pp * (1 - eta**2)**(-3.5) *
(a_0pp * (1 + 1.5 * eta**2 + 4 * e_0 * eta + e_0 * eta**3) +
1.5 * (k_2 * xi) / (1 - eta**2) * (-0.5 + 1.5 * theta**2)*
(8 + 24 * eta**2 + 3 * eta**4)))
C_1 = bstar * C_2
C_3 = (qoms2t * xi ** 5 * A30 * n_0pp * AE * np.sin(i_0) / (k_2 * e_0))
coef = 2 * qoms2t * xi**4 * a_0pp * beta_0**2*(1-eta**2)**(-7/2.0)
C_4 = (coef * n_0pp *
((2 * eta * (1 + e_0 * eta) + e_0/2.0 + (eta**3)/2.0) -
2 * k_2 * xi / (a_0pp * (1 - eta**2)) *
(3*(1-3*theta**2) *
(1 + (3*eta**2)/2.0 - 2*e_0*eta - e_0*eta**3/2.0) +
3/4.0*(1-theta**2)*
(2*eta**2 - e_0*eta - e_0*eta**3)*np.cos(2*w_0))))
C_5 = coef * (1 + 11/4.0 * eta * (eta + e_0) + e_0 * eta**3)
D_2 = 4 * a_0pp * xi * C_1**2
D_3 = 4/3.0 * a_0pp * xi**2 * (17*a_0pp + s) * C_1**3
D_4 = 2/3.0 * a_0pp * xi**3 * (221*a_0pp + 31*s) * C_1**4
# Secular effects of atmospheric drag and gravitation
dt = _days(current_time - t_0) * XMNPDA
M_df = (M_0 + (1 +
3*k_2*(-1 + 3*theta**2)/(2*a_0pp**2 * beta_0**3) +
3*k_2**2*(13 - 78*theta**2 + 137*theta**4)/
(16*a_0pp**4*beta_0**7))*
n_0pp*dt)
w_df = (w_0 + (-3*k_2*(1 - 5*theta**2)/(2*a_0pp**2*beta_0**4) +
3 * k_2**2 * (7 - 114*theta**2 + 395*theta**4)/
(16*a_0pp*beta_0**8) +
5*k_4*(3-36*theta**2+49*theta**4)/
(4*a_0pp**4*beta_0**8))*
n_0pp*dt)
W_df = (W_0 + (-3*k_2*theta/(a_0pp**2*beta_0**4) +
3*k_2**2*(4*theta- 19*theta**3)/(2*a_0pp**4*beta_0**8) +
5*k_4*theta*(3-7*theta**2)/(2*a_0pp**4*beta_0**8))*
n_0pp*dt)
deltaw = bstar * C_3 * np.cos(w_0)*dt
deltaM = (-2/3.0 * qoms2t * bstar * xi**4 * AE / (e_0*eta) *
((1 + eta * np.cos(M_df))**3 - (1 + eta * np.cos(M_0))**3))
M_p = M_df + deltaw + deltaM
w = w_df - deltaw - deltaM
W = (W_df - 21/2.0 * (n_0pp * k_2 * theta)/(a_0pp**2 * beta_0**2) *
C_1 * dt**2)
e = (e_0 -
bstar * C_4 * dt -
bstar * C_5 * (np.sin(M_p) - np.sin(M_0)))
a = a_0pp * (1 - C_1 * dt - D_2 * dt**2 - D_3 * dt**3 - D_4 * dt**4)**2
L = M_p + w + W + n_0pp * (3/2.0 * C_1 * dt**2 +
(D_2 + 2 * C_1 ** 2) * dt**3 +
1/4.0 *
(3*D_3 + 12*C_1*D_2 + 10*C_1**3)*dt**4 +
1.0/5 * (3*D_4 + 12*C_1*D_3 + 6*D_2**2 +
30*C_1**2*D_2 + 15*C_1**4)*dt**5)
beta = np.sqrt(1 - e**2)
n = k_e / (a ** (3/2.0))
# Long-period periodic terms
a_xN = e * np.cos(w)
a_yNL = A30 * np.sin(i_0) / (4.0 * k_2 * a * beta**2)
L_L = a_yNL/2 * a_xN * ((3 + 5 * theta) / (1 + theta))
L_T = L + L_L
a_yN = e * np.sin(w) + a_yNL
U = (L_T - W) % (np.pi * 2)
Epw = U
for i in range(10):
DeltaEpw = ((U - a_yN * np.cos(Epw) + a_xN * np.sin(Epw) - Epw) /
(-a_yN * np.sin(Epw) - a_xN * np.cos(Epw) + 1))
Epw = Epw + DeltaEpw
if DeltaEpw < 10e-12:
break
# preliminary quantities for short-period periodics
ecosE = a_xN * np.cos(Epw) + a_yN * np.sin(Epw)
esinE = a_xN * np.sin(Epw) - a_yN * np.cos(Epw)
e_L = (a_xN**2 + a_yN**2)**(0.5)
p_L = a * (1 - e_L**2)
r = a * (1 - ecosE)
rdot = k_e * np.sqrt(a)/r * esinE
rfdot = k_e * np.sqrt(p_L) / r
cosu = a / r * (np.cos(Epw) - a_xN +
(a_yN * (esinE) / (1 + np.sqrt(1 - e_L**2))))
sinu = a / r * (np.sin(Epw) - a_yN +
(a_xN * (esinE) / (1 + np.sqrt(1 - e_L**2))))
u = np.arctan2(sinu, cosu)
cos2u = np.cos(2*u)
sin2u = np.sin(2*u)
Deltar = k_2/(2*p_L) * (1 - theta**2) * cos2u
Deltau = -k_2/(4*p_L**2) * (7*theta**2 - 1) * sin2u
DeltaW = 3*k_2 * theta / (2 * p_L**2) * sin2u
Deltai = 3*k_2 * theta / (2 * p_L**2) * cos2u * np.sin(i_0)
Deltardot = - k_2 * n / p_L * (1 - theta**2) * sin2u
Deltarfdot = k_2 * n / p_L * ((1 - theta**2) * cos2u -
3/2.0 * (1 - 3*theta**2))
# osculating quantities
r_k = r * (1 - 3/2.0 * k_2 * np.sqrt(1 - e_L**2)/p_L**2 *
(3 * theta**2 - 1)) + Deltar
u_k = u + Deltau
W_k = W + DeltaW
i_k = i_0 + Deltai
rdot_k = rdot + Deltardot
rfdot_k = rfdot + Deltarfdot
M_x = -np.sin(W_k) * np.cos(i_k)
M_y = np.cos(W_k) * np.cos(i_k)
M_z = np.sin(i_k)
N_x = np.cos(W_k)
N_y = np.sin(W_k)
N_z = 0
U_x = M_x * np.sin(u_k) + N_x * np.cos(u_k)
U_y = M_y * np.sin(u_k) + N_y * np.cos(u_k)
U_z = M_z * np.sin(u_k) + N_z * np.cos(u_k)
V_x = M_x * np.cos(u_k) - N_x * np.sin(u_k)
V_y = M_y * np.cos(u_k) - N_y * np.sin(u_k)
V_z = M_z * np.cos(u_k) - N_z * np.sin(u_k)
r_x = r_k * U_x
r_y = r_k * U_y
r_z = r_k * U_z
rdot_x = rdot_k * U_x + rfdot_k * V_x
rdot_y = rdot_k * U_y + rfdot_k * V_y
rdot_z = rdot_k * U_z + rfdot_k * V_z
return r_x, r_y, r_z, rdot_x, rdot_y, rdot_z
def get_latlonalt(self, current_time):
"""Get lon lat and altitude for current time
"""
pos_x, pos_y, pos_z, vel_x, vel_y, vel_z = \
self.get_position(current_time)
del vel_x, vel_y, vel_z
lon = ((np.arctan2(pos_y * XKMPER, pos_x * XKMPER) - gmst(current_time))
% (2 * np.pi))
if lon > np.pi:
lon -= np.pi * 2
if lon <= -np.pi:
lon += np.pi * 2
r = np.sqrt(pos_x ** 2 + pos_y ** 2)
lat = np.arctan2(pos_z, r)
e2 = F * (2 - F)
while True:
lat2 = lat
c = 1/(np.sqrt(1 - e2 * (np.sin(lat2) ** 2)))
lat = np.arctan2(pos_z + c * e2 *np.sin(lat2), r)
if abs(lat - lat2) < 1e-10:
break
alt = r / np.cos(lat)- c
alt *= XKMPER
return lat, lon, alt
# pylint: enable-msg=C0103
def _jdays(current_time):
"""Get the julian day of *current_time*.
"""
d_t = current_time - datetime.datetime(2000, 1, 1, 12, 0)
return _days(d_t)
def _days(d_t):
"""Get the days (floating point) from *d_t*.
"""
return (d_t.days +
(d_t.seconds +
d_t.microseconds / (1000000.0)) / (24 * 3600.0))
def gmst(current_time):
"""Greenwich mean sidereal current_time, in radians.
http://celestrak.com/columns/v02n02/
"""
now = current_time
#now = datetime.datetime(1995, 10, 1, 9, 0)
now0 = datetime.datetime(now.year, now.month, now.day)
epoch = datetime.datetime(2000, 1, 1, 12, 0)
du2 = _days(now - epoch)
d_u = _days(now0 - epoch)
dus = (du2 - d_u) * 86400
t_u = d_u / 36525.0
theta_g_0 = (24110.54841 + t_u * (8640184.812866 +
t_u * (0.093104 - t_u * 6.2 * 10e-6)))
theta_g = (theta_g_0 + dus * 1.00273790934) % 86400
return (theta_g / 86400.0) * 2 * np.pi
if __name__ == "__main__":
pass
| gpl-3.0 |
evidation-health/bokeh | bokeh/util/testing.py | 9 | 5179 | """ Functions to help with testing Bokeh and reporting issues.
"""
from __future__ import absolute_import, print_function
import mock
import threading
import time
import uuid
import unittest
import requests
from requests.exceptions import ConnectionError
from bokeh.server import start, configure
from bokeh.server.app import bokeh_app, app
from bokeh.server.models import user
from bokeh.server.settings import settings as server_settings
def skipIfPy3(message):
""" unittest decoractor to skip a test for Python 3
"""
from unittest import skipIf
from .platform import is_py3
return skipIf(is_py3(), message)
def skipIfPyPy(message):
""" unittest decoractor to skip a test for PyPy
"""
from unittest import skipIf
from .platform import is_pypy
return skipIf(is_pypy(), message)
def print_versions():
""" Print the versions for Bokeh and the current Python and OS.
Returns:
None
"""
import platform as pt
from .. import __version__
message = """
Bokeh version: %s
Python version: %s-%s
Platform: %s
""" % (__version__, pt.python_version(),
pt.python_implementation(), pt.platform())
print(message)
def runtests(args=None):
"""
Run the Bokeh tests under the bokeh python directory using pytest.
Does not run tests from bokehjs or examples.
Args:
args(list, optional): List of command line arguments accepted by py.test
e.g. args=['-s', '-k charts'] prevents capture of standard
out and only runs tests that match charts. For more py.test options see
http://pytest.org/latest/usage.html#usage.
Returns:
int: pytest exitcode
"""
import pytest
import os
try:
import faulthandler
faulthandler.enable()
except ImportError:
# We can live without in python 2.7
pass
# change to the bokeh python source directory, for test collection
rootdir = os.path.join(os.path.dirname(__file__), os.pardir)
os.chdir(rootdir)
return pytest.main(args=args)
#----------------------------
# Bokeh server test utils
#----------------------------
def wait_flask():
def helper():
try:
return requests.get('http://localhost:5006/bokeh/ping')
except ConnectionError:
return False
return wait_until(helper)
def wait_until(func, timeout=1.0, interval=0.01):
st = time.time()
while True:
if func():
return True
if (time.time() - st) > timeout:
return False
time.sleep(interval)
class BaseBokehServerTestCase(unittest.TestCase):
options = {}
class MemoryBokehServerTestCase(BaseBokehServerTestCase):
def setUp(self):
# clear tornado ioloop instance
server_settings.reset()
server_settings.model_backend = {'type': 'memory'}
for k, v in self.options.items():
setattr(server_settings, k, v)
bokeh_app.stdout = None
bokeh_app.stderr = None
self.serverthread = threading.Thread(target=start.start_simple_server)
self.serverthread.start()
wait_flask()
# not great - but no good way to wait for zmq to come up
time.sleep(0.1)
make_default_user(bokeh_app)
def tearDown(self):
start.stop()
self.serverthread.join()
BokehServerTestCase = MemoryBokehServerTestCase
def make_default_user(bokeh_app):
bokehuser = user.new_user(bokeh_app.servermodel_storage, "defaultuser",
str(uuid.uuid4()), apikey='nokey', docs=[])
return bokehuser
class FlaskClientTestCase(BaseBokehServerTestCase):
def setUp(self):
server_settings.reset()
for k, v in self.options.items():
setattr(server_settings, k, v)
server_settings.model_backend = {'type': 'memory'}
configure.configure_flask()
with mock.patch('bokeh.server.configure.logging'):
configure.register_blueprint()
#ugh..need better way to initialize this
app.secret_key = server_settings.secret_key
app.debug = True
self.client = app.test_client()
def tearDown(self):
pass
#----------------------
# For testing charts
#----------------------
def create_chart(klass, values, compute_values=True, **kws):
""" Create a new chart klass instance with values and the extra kws keyword
parameters.
Args:
klass (class): chart class to be created
values (iterable): chart data series
compute_values (bool): if == True underlying chart attributes (like data,
ranges, source, etc..) are computed by calling _setup_show,
_prepare_show and _show_teardown methods.
**kws (refer to klass arguments specification details)
Return:
_chart: klass chart instance
"""
_chart = klass(
values, title="title", xlabel="xlabel", ylabel="ylabel",
legend="top_left", xscale="linear", yscale="linear",
width=800, height=600, tools=True,
filename=False, server=False, notebook=False,
**kws
)
return _chart
| bsd-3-clause |
gmatteo/pymatgen | pymatgen/transformations/standard_transformations.py | 4 | 35611 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module defines standard transformations which transforms a structure into
another structure. Standard transformations operate in a structure-wide manner,
rather than site-specific manner.
All transformations should inherit the AbstractTransformation ABC.
"""
import logging
from fractions import Fraction
from typing import Optional, Union
from numpy import around
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.analysis.elasticity.strain import Deformation
from pymatgen.analysis.ewald import EwaldMinimizer, EwaldSummation
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.structure import Lattice, Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.site_transformations import (
PartialRemoveSitesTransformation,
)
from pymatgen.transformations.transformation_abc import AbstractTransformation
logger = logging.getLogger(__name__)
class RotationTransformation(AbstractTransformation):
"""
The RotationTransformation applies a rotation to a structure.
"""
def __init__(self, axis, angle, angle_in_radians=False):
"""
Args:
axis (3x1 array): Axis of rotation, e.g., [1, 0, 0]
angle (float): Angle to rotate
angle_in_radians (bool): Set to True if angle is supplied in radians.
Else degrees are assumed.
"""
self.axis = axis
self.angle = angle
self.angle_in_radians = angle_in_radians
self._symmop = SymmOp.from_axis_angle_and_translation(self.axis, self.angle, self.angle_in_radians)
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Rotated Structure.
"""
s = structure.copy()
s.apply_operation(self._symmop)
return s
def __str__(self):
return "Rotation Transformation about axis " + "{} with angle = {:.4f} {}".format(
self.axis, self.angle, "radians" if self.angle_in_radians else "degrees"
)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns:
Inverse Transformation.
"""
return RotationTransformation(self.axis, -self.angle, self.angle_in_radians)
@property
def is_one_to_many(self):
"""Returns: False"""
return False
class OxidationStateDecorationTransformation(AbstractTransformation):
"""
This transformation decorates a structure with oxidation states.
"""
def __init__(self, oxidation_states):
"""
Args:
oxidation_states (dict): Oxidation states supplied as a dict,
e.g., {"Li":1, "O":-2}
"""
self.oxidation_states = oxidation_states
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Oxidation state decorated Structure.
"""
s = structure.copy()
s.add_oxidation_state_by_element(self.oxidation_states)
return s
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class AutoOxiStateDecorationTransformation(AbstractTransformation):
"""
This transformation automatically decorates a structure with oxidation
states using a bond valence approach.
"""
def __init__(
self,
symm_tol=0.1,
max_radius=4,
max_permutations=100000,
distance_scale_factor=1.015,
):
"""
Args:
symm_tol (float): Symmetry tolerance used to determine which sites are
symmetrically equivalent. Set to 0 to turn off symmetry.
max_radius (float): Maximum radius in Angstrom used to find nearest
neighbors.
max_permutations (int): Maximum number of permutations of oxidation
states to test.
distance_scale_factor (float): A scale factor to be applied. This is
useful for scaling distances, esp in the case of
calculation-relaxed structures, which may tend to under (GGA) or
over bind (LDA). The default of 1.015 works for GGA. For
experimental structure, set this to 1.
"""
self.symm_tol = symm_tol
self.max_radius = max_radius
self.max_permutations = max_permutations
self.distance_scale_factor = distance_scale_factor
self.analyzer = BVAnalyzer(symm_tol, max_radius, max_permutations, distance_scale_factor)
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Oxidation state decorated Structure.
"""
return self.analyzer.get_oxi_state_decorated_structure(structure)
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class OxidationStateRemovalTransformation(AbstractTransformation):
"""
This transformation removes oxidation states from a structure.
"""
def __init__(self):
"""
No arg needed.
"""
pass
def apply_transformation(self, structure): # pylint: disable=R0201
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Non-oxidation state decorated Structure.
"""
s = structure.copy()
s.remove_oxidation_states()
return s
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class SupercellTransformation(AbstractTransformation):
"""
The RotationTransformation applies a rotation to a structure.
"""
def __init__(self, scaling_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1))):
"""
Args:
scaling_matrix: A matrix of transforming the lattice vectors.
Defaults to the identity matrix. Has to be all integers. e.g.,
[[2,1,0],[0,3,0],[0,0,1]] generates a new structure with
lattice vectors a" = 2a + b, b" = 3b, c" = c where a, b, and c
are the lattice vectors of the original structure.
"""
self.scaling_matrix = scaling_matrix
@staticmethod
def from_scaling_factors(scale_a=1, scale_b=1, scale_c=1):
"""
Convenience method to get a SupercellTransformation from a simple
series of three numbers for scaling each lattice vector. Equivalent to
calling the normal with [[scale_a, 0, 0], [0, scale_b, 0],
[0, 0, scale_c]]
Args:
scale_a: Scaling factor for lattice direction a. Defaults to 1.
scale_b: Scaling factor for lattice direction b. Defaults to 1.
scale_c: Scaling factor for lattice direction c. Defaults to 1.
Returns:
SupercellTransformation.
"""
return SupercellTransformation([[scale_a, 0, 0], [0, scale_b, 0], [0, 0, scale_c]])
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Supercell Structure.
"""
return structure * self.scaling_matrix
def __str__(self):
return "Supercell Transformation with scaling matrix " + "{}".format(self.scaling_matrix)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Raises: NotImplementedError
"""
raise NotImplementedError()
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class SubstitutionTransformation(AbstractTransformation):
"""
This transformation substitutes species for one another.
"""
def __init__(self, species_map):
"""
Args:
species_map: A dict or list of tuples containing the species mapping in
string-string pairs. E.g., {"Li":"Na"} or [("Fe2+","Mn2+")].
Multiple substitutions can be done. Overloaded to accept
sp_and_occu dictionary E.g. {"Si: {"Ge":0.75, "C":0.25}},
which substitutes a single species with multiple species to
generate a disordered structure.
"""
self.species_map = species_map
self._species_map = dict(species_map)
for k, v in self._species_map.items():
if isinstance(v, (tuple, list)):
self._species_map[k] = dict(v)
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Substituted Structure.
"""
species_map = {}
for k, v in self._species_map.items():
if isinstance(v, dict):
value = {get_el_sp(x): y for x, y in v.items()}
else:
value = get_el_sp(v)
species_map[get_el_sp(k)] = value
s = structure.copy()
s.replace_species(species_map)
return s
def __str__(self):
return "Substitution Transformation :" + ", ".join(
[str(k) + "->" + str(v) for k, v in self._species_map.items()]
)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns:
Inverse Transformation.
"""
inverse_map = {v: k for k, v in self._species_map.items()}
return SubstitutionTransformation(inverse_map)
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class RemoveSpeciesTransformation(AbstractTransformation):
"""
Remove all occurrences of some species from a structure.
"""
def __init__(self, species_to_remove):
"""
Args:
species_to_remove: List of species to remove. E.g., ["Li", "Mn"]
"""
self.species_to_remove = species_to_remove
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Structure with species removed.
"""
s = structure.copy()
for sp in self.species_to_remove:
s.remove_species([get_el_sp(sp)])
return s
def __str__(self):
return "Remove Species Transformation :" + ", ".join(self.species_to_remove)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class PartialRemoveSpecieTransformation(AbstractTransformation):
"""
Remove fraction of specie from a structure.
Requires an oxidation state decorated structure for ewald sum to be
computed.
Given that the solution to selecting the right removals is NP-hard, there
are several algorithms provided with varying degrees of accuracy and speed.
Please see
:class:`pymatgen.transformations.site_transformations.PartialRemoveSitesTransformation`.
"""
ALGO_FAST = 0
ALGO_COMPLETE = 1
ALGO_BEST_FIRST = 2
ALGO_ENUMERATE = 3
def __init__(self, specie_to_remove, fraction_to_remove, algo=ALGO_FAST):
"""
Args:
specie_to_remove: Species to remove. Must have oxidation state E.g.,
"Li+"
fraction_to_remove: Fraction of specie to remove. E.g., 0.5
algo: This parameter allows you to choose the algorithm to perform
ordering. Use one of PartialRemoveSpecieTransformation.ALGO_*
variables to set the algo.
"""
self.specie_to_remove = specie_to_remove
self.fraction_to_remove = fraction_to_remove
self.algo = algo
def apply_transformation(self, structure, return_ranked_list=False):
"""
Apply the transformation.
Args:
structure: input structure
return_ranked_list (bool/int): Boolean stating whether or not
multiple structures are returned. If return_ranked_list is
an int, that number of structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
the key "transformation" is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class.
"""
sp = get_el_sp(self.specie_to_remove)
specie_indices = [i for i in range(len(structure)) if structure[i].species == Composition({sp: 1})]
trans = PartialRemoveSitesTransformation([specie_indices], [self.fraction_to_remove], algo=self.algo)
return trans.apply_transformation(structure, return_ranked_list)
@property
def is_one_to_many(self):
"""
Returns: True
"""
return True
def __str__(self):
spec_str = [
"Species = {}".format(self.specie_to_remove),
"Fraction to remove = {}".format(self.fraction_to_remove),
"ALGO = {}".format(self.algo),
]
return "PartialRemoveSpecieTransformation : " + ", ".join(spec_str)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
class OrderDisorderedStructureTransformation(AbstractTransformation):
"""
Order a disordered structure. The disordered structure must be oxidation
state decorated for ewald sum to be computed. No attempt is made to perform
symmetry determination to reduce the number of combinations.
Hence, attempting to performing ordering on a large number of disordered
sites may be extremely expensive. The time scales approximately with the
number of possible combinations. The algorithm can currently compute
approximately 5,000,000 permutations per minute.
Also, simple rounding of the occupancies are performed, with no attempt
made to achieve a target composition. This is usually not a problem for
most ordering problems, but there can be times where rounding errors may
result in structures that do not have the desired composition.
This second step will be implemented in the next iteration of the code.
If multiple fractions for a single species are found for different sites,
these will be treated separately if the difference is above a threshold
tolerance. currently this is .1
For example, if a fraction of .25 Li is on sites 0,1,2,3 and .5 on sites
4, 5, 6, 7 then 1 site from [0,1,2,3] will be filled and 2 sites from [4,5,6,7]
will be filled, even though a lower energy combination might be found by
putting all lithium in sites [4,5,6,7].
USE WITH CARE.
"""
ALGO_FAST = 0
ALGO_COMPLETE = 1
ALGO_BEST_FIRST = 2
def __init__(self, algo=ALGO_FAST, symmetrized_structures=False, no_oxi_states=False):
"""
Args:
algo (int): Algorithm to use.
symmetrized_structures (bool): Whether the input structures are
instances of SymmetrizedStructure, and that their symmetry
should be used for the grouping of sites.
no_oxi_states (bool): Whether to remove oxidation states prior to
ordering.
"""
self.algo = algo
self._all_structures = []
self.no_oxi_states = no_oxi_states
self.symmetrized_structures = symmetrized_structures
def apply_transformation(self, structure, return_ranked_list=False):
"""
For this transformation, the apply_transformation method will return
only the ordered structure with the lowest Ewald energy, to be
consistent with the method signature of the other transformations.
However, all structures are stored in the all_structures attribute in
the transformation object for easy access.
Args:
structure: Oxidation state decorated disordered structure to order
return_ranked_list (bool): Whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
the key "transformation" is reserved for the transformation that
was actually applied to the structure.
This transformation is parsed by the alchemy classes for generating
a more specific transformation history. Any other information will
be stored in the transformation_parameters dictionary in the
transmuted structure class.
"""
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
num_to_return = max(1, num_to_return)
if self.no_oxi_states:
structure = Structure.from_sites(structure)
for i, site in enumerate(structure):
structure[i] = {"%s0+" % k.symbol: v for k, v in site.species.items()}
equivalent_sites = []
exemplars = []
# generate list of equivalent sites to order
# equivalency is determined by sp_and_occu and symmetry
# if symmetrized structure is true
for i, site in enumerate(structure):
if site.is_ordered:
continue
for j, ex in enumerate(exemplars):
sp = ex.species
if not site.species.almost_equals(sp):
continue
if self.symmetrized_structures:
sym_equiv = structure.find_equivalent_sites(ex)
sym_test = site in sym_equiv
else:
sym_test = True
if sym_test:
equivalent_sites[j].append(i)
break
else:
equivalent_sites.append([i])
exemplars.append(site)
# generate the list of manipulations and input structure
s = Structure.from_sites(structure)
m_list = []
for g in equivalent_sites:
total_occupancy = sum([structure[i].species for i in g], Composition())
total_occupancy = dict(total_occupancy.items())
# round total occupancy to possible values
for k, v in total_occupancy.items():
if abs(v - round(v)) > 0.25:
raise ValueError("Occupancy fractions not consistent " "with size of unit cell")
total_occupancy[k] = int(round(v))
# start with an ordered structure
initial_sp = max(total_occupancy.keys(), key=lambda x: abs(x.oxi_state))
for i in g:
s[i] = initial_sp
# determine the manipulations
for k, v in total_occupancy.items():
if k == initial_sp:
continue
m = [
k.oxi_state / initial_sp.oxi_state if initial_sp.oxi_state else 0,
v,
list(g),
k,
]
m_list.append(m)
# determine the number of empty sites
empty = len(g) - sum(total_occupancy.values())
if empty > 0.5:
m_list.append([0, empty, list(g), None])
matrix = EwaldSummation(s).total_energy_matrix
ewald_m = EwaldMinimizer(matrix, m_list, num_to_return, self.algo)
self._all_structures = []
lowest_energy = ewald_m.output_lists[0][0]
num_atoms = sum(structure.composition.values())
for output in ewald_m.output_lists:
s_copy = s.copy()
# do deletions afterwards because they screw up the indices of the
# structure
del_indices = []
for manipulation in output[1]:
if manipulation[1] is None:
del_indices.append(manipulation[0])
else:
s_copy[manipulation[0]] = manipulation[1]
s_copy.remove_sites(del_indices)
if self.no_oxi_states:
s_copy.remove_oxidation_states()
self._all_structures.append(
{
"energy": output[0],
"energy_above_minimum": (output[0] - lowest_energy) / num_atoms,
"structure": s_copy.get_sorted_structure(),
}
)
if return_ranked_list:
return self._all_structures[:num_to_return]
return self._all_structures[0]["structure"]
def __str__(self):
return "Order disordered structure transformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: True
"""
return True
@property
def lowest_energy_structure(self):
"""
:return: Lowest energy structure found.
"""
return self._all_structures[0]["structure"]
class PrimitiveCellTransformation(AbstractTransformation):
"""
This class finds the primitive cell of the input structure.
It returns a structure that is not necessarily orthogonalized
Author: Will Richards
"""
def __init__(self, tolerance=0.5):
"""
Args:
tolerance (float): Tolerance for each coordinate of a particular
site. For example, [0.5, 0, 0.5] in cartesian coordinates will be
considered to be on the same coordinates as [0, 0, 0] for a
tolerance of 0.5. Defaults to 0.5.
"""
self.tolerance = tolerance
def apply_transformation(self, structure):
"""
Returns most primitive cell for structure.
Args:
structure: A structure
Returns:
The most primitive structure found. The returned structure is
guaranteed to have len(new structure) <= len(structure).
"""
return structure.get_primitive_structure(tolerance=self.tolerance)
def __str__(self):
return "Primitive cell transformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class ConventionalCellTransformation(AbstractTransformation):
"""
This class finds the conventional cell of the input structure.
"""
def __init__(self, symprec=0.01, angle_tolerance=5, international_monoclinic=True):
"""
Args:
symprec (float): tolerance as in SpacegroupAnalyzer
angle_tolerance (float): angle tolerance as in SpacegroupAnalyzer
international_monoclinic (bool): whether to use beta (True) or alpha (False)
as the non-right-angle in the unit cell
"""
self.symprec = symprec
self.angle_tolerance = angle_tolerance
self.international_monoclinic = international_monoclinic
def apply_transformation(self, structure):
"""
Returns most primitive cell for structure.
Args:
structure: A structure
Returns:
The same structure in a conventional standard setting
"""
sga = SpacegroupAnalyzer(structure, symprec=self.symprec, angle_tolerance=self.angle_tolerance)
return sga.get_conventional_standard_structure(international_monoclinic=self.international_monoclinic)
def __str__(self):
return "Conventional cell transformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class PerturbStructureTransformation(AbstractTransformation):
"""
This transformation perturbs a structure by a specified distance in random
directions. Used for breaking symmetries.
"""
def __init__(
self,
distance: float = 0.01,
min_distance: Optional[Union[int, float]] = None,
):
"""
Args:
distance: Distance of perturbation in angstroms. All sites
will be perturbed by exactly that distance in a random
direction.
min_distance: if None, all displacements will be equidistant. If int
or float, perturb each site a distance drawn from the uniform
distribution between 'min_distance' and 'distance'.
"""
self.distance = distance
self.min_distance = min_distance
def apply_transformation(self, structure: Structure) -> Structure:
"""
Apply the transformation.
Args:
structure: Input Structure
Returns:
Structure with sites perturbed.
"""
s = structure.copy()
s.perturb(self.distance, min_distance=self.min_distance)
return s
def __str__(self):
return "PerturbStructureTransformation : " + "Min_distance = {}".format(self.min_distance)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class DeformStructureTransformation(AbstractTransformation):
"""
This transformation deforms a structure by a deformation gradient matrix
"""
def __init__(self, deformation=((1, 0, 0), (0, 1, 0), (0, 0, 1))):
"""
Args:
deformation (array): deformation gradient for the transformation
"""
self._deform = Deformation(deformation)
self.deformation = self._deform.tolist()
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Deformed Structure.
"""
return self._deform.apply_to_structure(structure)
def __str__(self):
return "DeformStructureTransformation : " + "Deformation = {}".format(str(self.deformation))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns:
Inverse Transformation.
"""
return DeformStructureTransformation(self._deform.inv)
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class DiscretizeOccupanciesTransformation(AbstractTransformation):
"""
Discretizes the site occupancies in a disordered structure; useful for
grouping similar structures or as a pre-processing step for order-disorder
transformations.
"""
def __init__(self, max_denominator=5, tol=None, fix_denominator=False):
"""
Args:
max_denominator:
An integer maximum denominator for discretization. A higher
denominator allows for finer resolution in the site occupancies.
tol:
A float that sets the maximum difference between the original and
discretized occupancies before throwing an error. If None, it is
set to 1 / (4 * max_denominator).
fix_denominator(bool):
If True, will enforce a common denominator for all species.
This prevents a mix of denominators (for example, 1/3, 1/4)
that might require large cell sizes to perform an enumeration.
'tol' needs to be > 1.0 in some cases.
"""
self.max_denominator = max_denominator
self.tol = tol if tol is not None else 1 / (4 * max_denominator)
self.fix_denominator = fix_denominator
def apply_transformation(self, structure):
"""
Discretizes the site occupancies in the structure.
Args:
structure: disordered Structure to discretize occupancies
Returns:
A new disordered Structure with occupancies discretized
"""
if structure.is_ordered:
return structure
species = [dict(sp) for sp in structure.species_and_occu]
for sp in species:
for k, v in sp.items():
old_occ = sp[k]
new_occ = float(Fraction(old_occ).limit_denominator(self.max_denominator))
if self.fix_denominator:
new_occ = around(old_occ * self.max_denominator) / self.max_denominator
if round(abs(old_occ - new_occ), 6) > self.tol:
raise RuntimeError("Cannot discretize structure within tolerance!")
sp[k] = new_occ
return Structure(structure.lattice, species, structure.frac_coords)
def __str__(self):
return "DiscretizeOccupanciesTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class ChargedCellTransformation(AbstractTransformation):
"""
The ChargedCellTransformation applies a charge to a structure (or defect
object).
"""
def __init__(self, charge=0):
"""
Args:
charge: A integer charge to apply to the structure.
Defaults to zero. Has to be a single integer. e.g. 2
"""
self.charge = charge
def apply_transformation(self, structure):
"""
Apply the transformation.
Args:
structure (Structure): Input Structure
Returns:
Charged Structure.
"""
s = structure.copy()
s.set_charge(self.charge)
return s
def __str__(self):
return "Structure with charge " + "{}".format(self.charge)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Raises: NotImplementedError
"""
raise NotImplementedError()
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
class ScaleToRelaxedTransformation(AbstractTransformation):
"""
Takes the unrelaxed and relaxed structure and applies its site and volume
relaxation to a structurally similar structures (e.g. bulk: NaCl and PbTe
(rock-salt), slab: Sc(10-10) and Mg(10-10) (hcp), GB: Mo(001) sigma 5 GB,
Fe(001) sigma 5). Useful for finding an initial guess of a set of similar
structures closer to its most relaxed state.
"""
def __init__(self, unrelaxed_structure, relaxed_structure, species_map=None):
"""
Args:
unrelaxed_structure (Structure): Initial, unrelaxed structure
relaxed_structure (Structure): Relaxed structure
species_map (dict): A dict or list of tuples containing the species mapping in
string-string pairs. The first species corresponds to the relaxed
structure while the second corresponds to the species in the
structure to be scaled. E.g., {"Li":"Na"} or [("Fe2+","Mn2+")].
Multiple substitutions can be done. Overloaded to accept
sp_and_occu dictionary E.g. {"Si: {"Ge":0.75, "C":0.25}},
which substitutes a single species with multiple species to
generate a disordered structure.
"""
# Get the ratio matrix for lattice relaxation which can be
# applied to any similar structure to simulate volumetric relaxation
relax_params = list(relaxed_structure.lattice.abc)
relax_params.extend(relaxed_structure.lattice.angles)
unrelax_params = list(unrelaxed_structure.lattice.abc)
unrelax_params.extend(unrelaxed_structure.lattice.angles)
self.params_percent_change = []
for i, p in enumerate(relax_params):
self.params_percent_change.append(relax_params[i] / unrelax_params[i])
self.unrelaxed_structure = unrelaxed_structure
self.relaxed_structure = relaxed_structure
self.species_map = species_map
def apply_transformation(self, structure):
"""
Returns a copy of structure with lattice parameters
and sites scaled to the same degree as the relaxed_structure.
Arg:
structure (Structure): A structurally similar structure in
regards to crystal and site positions.
"""
if self.species_map is None:
match = StructureMatcher()
s_map = match.get_best_electronegativity_anonymous_mapping(self.unrelaxed_structure, structure)
else:
s_map = self.species_map
params = list(structure.lattice.abc)
params.extend(structure.lattice.angles)
new_lattice = Lattice.from_parameters(*[p * self.params_percent_change[i] for i, p in enumerate(params)])
species, frac_coords = [], []
for site in self.relaxed_structure:
species.append(s_map[site.specie])
frac_coords.append(site.frac_coords)
return Structure(new_lattice, species, frac_coords)
def __str__(self):
return "ScaleToRelaxedTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
"""
Returns: None
"""
return None
@property
def is_one_to_many(self):
"""
Returns: False
"""
return False
| mit |
crobinso/pkgdb2 | tests/test_flask_ui_collections.py | 7 | 8467 | # -*- coding: utf-8 -*-
#
# Copyright © 2013-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
'''
pkgdb tests for the Flask application.
'''
__requires__ = ['SQLAlchemy >= 0.8']
import pkg_resources
import unittest
import sys
import os
from mock import patch
sys.path.insert(0, os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..'))
import pkgdb2
from pkgdb2.lib import model
from tests import (Modeltests, FakeFasUser, FakeFasUserAdmin,
create_package_acl, user_set)
class FlaskUiCollectionsTest(Modeltests):
""" Flask tests. """
def setUp(self):
""" Set up the environnment, ran before every tests. """
super(FlaskUiCollectionsTest, self).setUp()
pkgdb2.APP.config['TESTING'] = True
pkgdb2.SESSION = self.session
pkgdb2.ui.SESSION = self.session
pkgdb2.ui.acls.SESSION = self.session
pkgdb2.ui.admin.SESSION = self.session
pkgdb2.ui.collections.SESSION = self.session
pkgdb2.ui.packagers.SESSION = self.session
pkgdb2.ui.packages.SESSION = self.session
self.app = pkgdb2.APP.test_client()
def test_list_collections(self):
""" Test the list_collections function. """
output = self.app.get('/collections/')
self.assertEqual(output.status_code, 200)
self.assertTrue('<h1>Search collections</h1>' in output.data)
output = self.app.get('/collections/?limit=abc')
self.assertEqual(output.status_code, 200)
self.assertTrue('<h1>Search collections</h1>' in output.data)
def test_collection_info(self):
""" Test the collection_info function. """
create_package_acl(self.session)
output = self.app.get('/collection/master/')
self.assertEqual(output.status_code, 200)
self.assertTrue('<h1>Fedora devel</h1>' in output.data)
output = self.app.get('/collection/random/')
self.assertEqual(output.status_code, 200)
self.assertTrue('<li class="errors">No collection of this name '
'found.</li>' in output.data)
@patch('pkgdb2.is_admin')
def test_collection_new(self, login_func):
""" Test the collection_new function. """
login_func.return_value = None
create_package_acl(self.session)
user = FakeFasUser()
with user_set(pkgdb2.APP, user):
output = self.app.get('/new/collection/')
self.assertEqual(output.status_code, 302)
user = FakeFasUserAdmin()
with user_set(pkgdb2.APP, user):
output = self.app.get('/new/collection/')
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h1>Create a new collection</h1>' in output.data)
self.assertTrue(
'<input id="csrf_token" name="csrf_token"' in output.data)
csrf_token = output.data.split(
'name="csrf_token" type="hidden" value="')[1].split('">')[0]
data = {
'clt_name': '',
'version': '',
'clt_status': '',
'branchname': '',
'dist_tag': '',
'csrf_token': csrf_token,
}
output = self.app.post('/new/collection/', data=data)
self.assertEqual(output.status_code, 200)
self.assertEqual(
output.data.count(
'<td class="errors">This field is required.</td>'
), 6)
user = FakeFasUserAdmin()
with user_set(pkgdb2.APP, user):
output = self.app.get('/new/collection/')
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h1>Create a new collection</h1>' in output.data)
self.assertTrue(
'<input id="csrf_token" name="csrf_token"' in output.data)
csrf_token = output.data.split(
'name="csrf_token" type="hidden" value="')[1].split('">')[0]
data = {
'clt_name': 'Fedora',
'version': '19',
'clt_status': 'Active',
'branchname': 'f19',
'dist_tag': '.fc19',
'kojiname': 'f19',
'csrf_token': csrf_token,
}
output = self.app.post('/new/collection/', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<li class="message">Collection "f19" created</li>'
in output.data)
@patch('pkgdb2.is_admin')
def test_collection_edit(self, login_func):
""" Test the collection_edit function. """
login_func.return_value = None
create_package_acl(self.session)
user = FakeFasUser()
with user_set(pkgdb2.APP, user):
output = self.app.get('/collection/master/edit')
self.assertEqual(output.status_code, 302)
user = FakeFasUserAdmin()
with user_set(pkgdb2.APP, user):
output = self.app.get('/collection/master/edit')
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h1>Edit collection</h1>' in output.data)
self.assertTrue(
'<input id="csrf_token" name="csrf_token"' in output.data)
user = FakeFasUserAdmin()
with user_set(pkgdb2.APP, user):
output = self.app.get('/collection/random/edit')
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<li class="errors">No collection of this name found.</li>'
in output.data)
user = FakeFasUserAdmin()
with user_set(pkgdb2.APP, user):
output = self.app.get('/collection/f17/edit')
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<h1>Edit collection</h1>' in output.data)
self.assertTrue(
'<input id="csrf_token" name="csrf_token"' in output.data)
csrf_token = output.data.split(
'name="csrf_token" type="hidden" value="')[1].split('">')[0]
collections = model.Collection.by_name(self.session, 'f17')
self.assertEqual(
"Collection(u'Fedora', u'17', u'Active', owner:u'toshio')",
collections.__repr__())
self.assertEqual(collections.branchname, 'f17')
data = {
'clt_name': 'Fedora',
'version': '17',
'clt_status': 'Active',
'branchname': 'f17',
'dist_tag': '.fc17',
'kojiname': 'f17',
'csrf_token': csrf_token,
}
output = self.app.post('/collection/f17/edit', data=data,
follow_redirects=True)
self.assertEqual(output.status_code, 200)
self.assertTrue(
'<li class="message">Collection "f17" edited</li>'
in output.data)
collections = model.Collection.by_name(self.session, 'f17')
self.assertEqual(
"Collection(u'Fedora', u'17', u'Active', owner:u'toshio')",
collections.__repr__())
self.assertEqual(collections.branchname, 'f17')
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(FlaskUiCollectionsTest)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-2.0 |
michaelgugino/turbo-lister | sqlalchemy/dialects/postgresql/zxjdbc.py | 18 | 1395 | # postgresql/zxjdbc.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: postgresql+zxjdbc://scott:tiger@localhost/db
:driverurl: http://jdbc.postgresql.org/
"""
from ...connectors.zxJDBC import ZxJDBCConnector
from .base import PGDialect, PGExecutionContext
class PGExecutionContext_zxjdbc(PGExecutionContext):
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect):
jdbc_db_name = 'postgresql'
jdbc_driver_name = 'org.postgresql.Driver'
execution_ctx_cls = PGExecutionContext_zxjdbc
supports_native_decimal = True
def __init__(self, *args, **kwargs):
super(PGDialect_zxjdbc, self).__init__(*args, **kwargs)
from com.ziclix.python.sql.handler import PostgresqlDataHandler
self.DataHandler = PostgresqlDataHandler
def _get_server_version_info(self, connection):
parts = connection.connection.dbversion.split('.')
return tuple(int(x) for x in parts)
dialect = PGDialect_zxjdbc
| gpl-3.0 |
hhkaos/awesome-arcgis | node_modules/gitbook/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py | 1284 | 100329 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
from gyp.common import OrderedSet
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target(object):
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here. In this case, we also need to save the compile_deps for the target,
# so that the the target that directly depends on the .objs can also depend
# on those.
self.component_objs = None
self.compile_deps = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter(object):
def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.hash_for_rules = hash_for_rules
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
assert not os.path.isabs(path_dir), (
"'%s' can not be absolute path (see crbug.com/462153)." % path_dir)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets, order_only=None):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
assert not order_only
return None
if len(targets) > 1 or order_only:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
self.ninja.variable('cc_host', '$cl_' + arch)
self.ninja.variable('cxx_host', '$cl_' + arch)
self.ninja.variable('asm', '$ml_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
elif self.flavor == 'mac' and len(self.archs) > 1:
link_deps = collections.defaultdict(list)
compile_deps = self.target.actions_stamp or actions_depends
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
self.target.compile_deps = compile_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
compile_deps)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRulesOrActions(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
xcassets = self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends)
self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetToolchainEnv()
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'], self.hash_for_rules)
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
depfile = action.get('depfile', None)
if depfile:
depfile = self.ExpandSpecial(depfile, self.base_to_build)
pool = 'console' if int(action.get('ninja_use_console', 0)) else None
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env, pool,
depfile=depfile)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetToolchainEnv()
all_outputs = []
for rule in rules:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'], self.hash_for_rules)
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
pool = 'console' if int(rule.get('ninja_use_console', 0)) else None
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env, pool)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if '${%s}' % var in argument:
needed_variables.add(var)
def cygwin_munge(path):
# pylint: disable=cell-var-from-loop
if is_cygwin:
return path.replace('\\', '/')
return path
inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])]
# If there are n source files matching the rule, and m additional rule
# inputs, then adding 'inputs' to each build edge written below will
# write m * n inputs. Collapsing reduces this to m + n.
sources = rule.get('rule_sources', [])
num_inputs = len(inputs)
if prebuild:
num_inputs += 1
if num_inputs > 2 and len(sources) > 2:
inputs = [self.WriteCollapsedDependencies(
rule['rule_name'], inputs, order_only=prebuild)]
prebuild = []
# For each source file, write an edge that generates all the outputs.
for source in sources:
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
outputs = [self.GypPathToNinja(o, env) for o in outputs]
if self.flavor == 'win':
# WriteNewNinjaRule uses unique_name for creating an rsp file on win.
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetToolchainEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
xcassets = []
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
if os.path.splitext(output)[-1] != '.xcassets':
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource'), \
('binary', isBinary)])
bundle_depends.append(output)
else:
xcassets.append(res)
return xcassets
def WriteMacXCassets(self, xcassets, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources' .xcassets files.
This add an invocation of 'actool' via the 'mac_tool.py' helper script.
It assumes that the assets catalogs define at least one imageset and
thus an Assets.car file will be generated in the application resources
directory. If this is not the case, then the build will probably be done
at each invocation of ninja."""
if not xcassets:
return
extra_arguments = {}
settings_to_arg = {
'XCASSETS_APP_ICON': 'app-icon',
'XCASSETS_LAUNCH_IMAGE': 'launch-image',
}
settings = self.xcode_settings.xcode_settings[self.config_name]
for settings_key, arg_name in settings_to_arg.iteritems():
value = settings.get(settings_key)
if value:
extra_arguments[arg_name] = value
partial_info_plist = None
if extra_arguments:
partial_info_plist = self.GypPathToUniqueOutput(
'assetcatalog_generated_info.plist')
extra_arguments['output-partial-info-plist'] = partial_info_plist
outputs = []
outputs.append(
os.path.join(
self.xcode_settings.GetBundleResourceFolder(),
'Assets.car'))
if partial_info_plist:
outputs.append(partial_info_plist)
keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor)
extra_env = self.xcode_settings.GetPerTargetSettings()
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
bundle_depends.extend(self.ninja.build(
outputs, 'compile_xcassets', xcassets,
variables=[('env', env), ('keys', keys)]))
return partial_info_plist
def WriteMacInfoPlist(self, partial_info_plist, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
if partial_info_plist:
intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist')
info_plist = self.ninja.build(
intermediate_plist, 'merge_infoplist',
[partial_info_plist, info_plist])
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys),
('binary', isBinary)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
self.ninja.variable('nm', '$nm_host')
self.ninja.variable('readelf', '$readelf_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
asmflags = self.msvs_settings.GetAsmflags(config_name)
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
elif self.toolset == 'host':
cflags_c = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CFLAGS_host', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CXXFLAGS_host', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'asmflags',
map(self.ExpandSpecial, asmflags))
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetToolchainEnv()
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
if self.flavor == 'win':
midl_include_dirs = config.get('midl_include_dirs', [])
midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs(
midl_include_dirs, config_name)
self.WriteVariableList(ninja_file, 'midl_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in midl_include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
arflags = config.get('arflags', [])
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
self.WriteVariableList(ninja_file, 'arflags',
map(self.ExpandSpecial, arflags))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
build_output = output
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
# TODO(yyanagisawa): more work needed to fix:
# https://code.google.com/p/gyp/issues/detail?id=411
if (spec['type'] in ('shared_library', 'loadable_module') and
not self.is_mac_bundle):
extra_bindings.append(('lib', output))
self.ninja.build([output, output + '.TOC'], 'solipo', inputs,
variables=extra_bindings)
else:
self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
order_deps = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
if target.compile_deps:
order_deps.add(target.compile_deps)
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
map(self.ExpandSpecial, ldflags))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor != 'win':
link_file_list = output
if self.is_mac_bundle:
# 'Dependency Framework.framework/Versions/A/Dependency Framework' ->
# 'Dependency Framework.framework.rsp'
link_file_list = self.xcode_settings.GetWrapperName()
if arch:
link_file_list += '.' + arch
link_file_list += '.rsp'
# If an rspfile contains spaces, ninja surrounds the filename with
# quotes around it and then passes it to open(), creating a file with
# quotes in its name (and when looking for the rsp file, the name
# makes it through bash which strips the quotes) :-/
link_file_list = link_file_list.replace(' ', '_')
extra_bindings.append(
('link_file_list',
gyp.common.EncodePOSIXShellArgument(link_file_list)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if ('/NOENTRY' not in ldflags and
not self.msvs_settings.GetNoImportLibrary(config_name)):
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
order_only=list(order_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetToolchainEnv(self, additional_settings=None):
"""Returns the variables toolchain would set for build steps."""
env = self.GetSortedXcodeEnv(additional_settings=additional_settings)
if self.flavor == 'win':
env = self.GetMsvsToolchainEnv(
additional_settings=additional_settings)
return env
def GetMsvsToolchainEnv(self, additional_settings=None):
"""Returns the variables Visual Studio would set for build steps."""
return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=self.config_name)
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool,
depfile=None):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, depfile=depfile,
restat=True, pool=pool,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
exts = gyp.MSVSUtil.TARGET_TYPE_EXT
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable']
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library']
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library']
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0))
if pool_size:
return pool_size
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
# VS 2015 uses 20% more working set than VS 2013 and can consume all RAM
# on a 64 GB machine.
mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB
hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32)))
return min(mem_limit, hard_cap)
elif sys.platform.startswith('linux'):
if os.path.exists("/proc/meminfo"):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
ar = 'lib.exe'
# cc and cxx must be set to the correct architecture by overriding with one
# of cl_x86 or cl_x64 below.
cc = 'UNSET'
cxx = 'UNSET'
ld = 'link.exe'
ld_host = '$ld'
else:
ar = 'ar'
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
ar_host = 'ar'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
clang_cl = None
nm = 'nm'
nm_host = 'nm'
readelf = 'readelf'
readelf_host = 'readelf'
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_root, value)
if key == 'AR.host':
ar_host = os.path.join(build_to_root, value)
if key == 'CC':
cc = os.path.join(build_to_root, value)
if cc.endswith('clang-cl'):
clang_cl = cc
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key == 'NM':
nm = os.path.join(build_to_root, value)
if key == 'NM.host':
nm_host = os.path.join(build_to_root, value)
if key == 'READELF':
readelf = os.path.join(build_to_root, value)
if key == 'READELF.host':
readelf_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
configs = [target_dicts[qualified_target]['configurations'][config_name]
for qualified_target in target_list]
shared_system_includes = None
if not generator_flags.get('ninja_use_custom_environment_files', 0):
shared_system_includes = \
gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes(
configs, generator_flags)
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, shared_system_includes, OpenOutput)
for arch, path in cl_paths.iteritems():
if clang_cl:
# If we have selected clang-cl, use that instead.
path = clang_cl
command = CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, 'win'))
if clang_cl:
# Use clang-cl to cross-compile for x86 or x86_64.
command += (' -m32' if arch == 'x86' else ' -m64')
master_ninja.variable('cl_' + arch, command)
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', ar)
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('ml_x86', 'ml.exe')
master_ninja.variable('ml_x64', 'ml64.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar))
if flavor != 'mac':
# Mac does not use readelf/nm for .TOC generation, so avoiding polluting
# the master ninja with extra unused variables.
master_ninja.variable(
'nm', GetEnvironFallback(['NM_target', 'NM'], nm))
master_ninja.variable(
'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host))
master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host))
master_ninja.variable('readelf_host',
GetEnvironFallback(['READELF_host'], readelf_host))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$midl_includes $idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $out',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes $asmflags /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $arflags $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $arflags $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ]; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ $readelf -d $lib | grep SONAME ; '
'$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content=
'-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in -Wl,--end-group $solibs $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
master_ninja.rule(
'solipo',
description='SOLIPO $out, POSTBUILDS',
command=(
'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&'
'%(extract_toc)s > $lib.TOC'
% { 'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}))
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; '
'else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then '
'mv $lib.tmp $lib.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '@$link_file_list$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys')
master_ninja.rule(
'merge_infoplist',
description='MERGE INFOPLISTS $in',
command='$env ./gyp-mac-tool merge-info-plist $out $in')
master_ninja.rule(
'compile_xcassets',
description='COMPILE XCASSETS $in',
command='$env ./gyp-mac-tool compile-xcassets $keys $in')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
# short name of targets that were skipped because they didn't contain anything
# interesting.
# NOTE: there may be overlap between this an non_empty_target_names.
empty_target_names = set()
# Set of non-empty short target names.
# NOTE: there may be overlap between this an empty_target_names.
non_empty_target_names = set()
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
# If build_file is a symlink, we must not follow it because there's a chance
# it could point to a path above toplevel_dir, and we cannot correctly deal
# with that case at the moment.
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir,
False)
qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name,
toolset)
hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest()
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
non_empty_target_names.add(name)
else:
empty_target_names.add(name)
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
# Write phony targets for any empty targets that weren't written yet. As
# short names are not necessarily unique only do this for short names that
# haven't already been output for another target.
empty_target_names = empty_target_names - non_empty_target_names
if empty_target_names:
master_ninja.newline()
master_ninja.comment('Empty targets (output for completeness).')
for name in sorted(empty_target_names):
master_ninja.build(name, 'phony')
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| gpl-3.0 |
SCSSG/Odoo-SCS | addons/mail/report/__init__.py | 438 | 1057 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mrnamingo/vix4-34-enigma2-bcm | lib/python/Plugins/SystemPlugins/SABnzbdSetup/plugin.py | 82 | 7222 | from boxbranding import getMachineBrand, getMachineName
import time
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.Console import Console
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components.ActionMap import ActionMap
from Tools.Directories import fileExists
class SABnzbdSetupScreen(Screen):
skin = """
<screen position="center,center" size="560,310" title="Samba Setup">
<widget name="lab1" position="20,90" size="150,30" font="Regular;20" valign="center" transparent="0"/>
<widget name="labactive" position="180,90" size="250,30" font="Regular;20" valign="center" transparent="0"/>
<widget name="lab2" position="20,160" size="150,30" font="Regular;20" valign="center" transparent="0"/>
<widget name="labstop" position="180,160" size="100,30" font="Regular;20" valign="center" halign="center" backgroundColor="red"/>
<widget name="labrun" position="180,160" size="100,30" zPosition="1" font="Regular;20" valign="center" halign="center" backgroundColor="green"/>
<ePixmap pixmap="buttons/red.png" position="0,260" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,260" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,260" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,260" size="140,40" alphatest="on" />
<widget name="key_red" position="0,260" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget name="key_green" position="140,260" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="key_yellow" position="280,260" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="key_blue" position="420,260" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("SABnzbd Setup"))
self.skinName = "NetworkServiceSetup"
self.onChangedEntry = [ ]
self['lab1'] = Label(_("Autostart:"))
self['labactive'] = Label(_(_("Disabled")))
self['lab2'] = Label(_("Current Status:"))
self['labstop'] = Label(_("Stopped"))
self['labrun'] = Label(_("Running"))
self['key_red'] = Label(_("Remove Service"))
self['key_green'] = Label(_("Start"))
self['key_yellow'] = Label(_("Autostart"))
self['key_blue'] = Label()
self['status_summary'] = StaticText()
self['autostartstatus_summary'] = StaticText()
self.Console = Console()
self.my_sabnzbd_active = False
self.my_sabnzbd_run = False
self['actions'] = ActionMap(['WizardActions', 'ColorActions'], {'ok': self.close, 'back': self.close, 'red': self.UninstallCheck, 'green': self.SABnzbdStartStop, 'yellow': self.activateSABnzbd})
self.service_name = 'sabnzbd'
self.onLayoutFinish.append(self.InstallCheck)
def InstallCheck(self):
self.Console.ePopen('/usr/bin/opkg list_installed ' + self.service_name, self.InstalldataAvail)
def InstalldataAvail(self, str, retval, extra_args):
if not str:
restartbox = self.session.openWithCallback(self.InstallPackage,MessageBox,_('Your %s %s will be restarted after the installation of service.\n\nDo you want to install now ?') % (getMachineBrand(), getMachineName()), MessageBox.TYPE_YESNO)
restartbox.setTitle(_('Ready to install "%s" ?') % self.service_name)
else:
self.updateService()
def InstallPackage(self, val):
if val:
self.doInstall(self.installComplete, self.service_name)
else:
self.close()
def doInstall(self, callback, pkgname):
self["actions"].setEnabled(False)
self.message = self.session.open(MessageBox,_("please wait..."), MessageBox.TYPE_INFO)
self.message.setTitle(_('Installing Service'))
self.Console.ePopen('/usr/bin/opkg install ' + pkgname + ' sync', callback)
def installComplete(self,result = None, retval = None, extra_args = None):
self["actions"].setEnabled(True)
from Screens.Standby import TryQuitMainloop
self.session.open(TryQuitMainloop, 2)
def UninstallCheck(self):
self.Console.ePopen('/usr/bin/opkg list_installed ' + self.service_name, self.UninstalldataAvail)
def UninstalldataAvail(self, str, retval, extra_args):
if str:
restartbox = self.session.openWithCallback(self.RemovePackage,MessageBox,_('Your %s %s will be restarted after the removal of service\nDo you want to remove now ?') % (getMachineBrand(), getMachineName()), MessageBox.TYPE_YESNO)
restartbox.setTitle(_('Ready to remove "%s" ?') % self.service_name)
else:
self.updateService()
def RemovePackage(self, val):
if val:
self.doRemove(self.removeComplete, self.service_name)
def doRemove(self, callback, pkgname):
self["actions"].setEnabled(False)
self.message = self.session.open(MessageBox,_("please wait..."), MessageBox.TYPE_INFO)
self.message.setTitle(_('Removing Service'))
self.Console.ePopen('/usr/bin/opkg remove ' + pkgname + ' --force-remove --autoremove sync', callback)
def removeComplete(self,result = None, retval = None, extra_args = None):
self["actions"].setEnabled(True)
from Screens.Standby import TryQuitMainloop
self.session.open(TryQuitMainloop, 2)
def createSummary(self):
from Screens.NetworkSetup import NetworkServicesSummary
return NetworkServicesSummary
def SABnzbdStartStop(self):
if not self.my_sabnzbd_run:
self.Console.ePopen('/etc/init.d/sabnzbd start')
time.sleep(3)
self.updateService()
elif self.my_sabnzbd_run:
self.Console.ePopen('/etc/init.d/sabnzbd stop')
time.sleep(3)
self.updateService()
def activateSABnzbd(self):
if fileExists('/etc/rc2.d/S20sabnzbd'):
self.Console.ePopen('update-rc.d -f sabnzbd remove')
else:
self.Console.ePopen('update-rc.d -f sabnzbd defaults')
time.sleep(3)
self.updateService()
def updateService(self,result = None, retval = None, extra_args = None):
import process
p = process.ProcessList()
sabnzbd_process = str(p.named('SABnzbd.py')).strip('[]')
self['labrun'].hide()
self['labstop'].hide()
self['labactive'].setText(_("Disabled"))
self.my_sabnzbd_active = False
self.my_sabnzbd_run = False
if fileExists('/etc/rc2.d/S20sabnzbd'):
self['labactive'].setText(_("Enabled"))
self['labactive'].show()
self.my_sabnzbd_active = True
if sabnzbd_process:
self.my_sabnzbd_run = True
if self.my_sabnzbd_run:
self['labstop'].hide()
self['labactive'].show()
self['labrun'].show()
self['key_green'].setText(_("Stop"))
status_summary= self['lab2'].text + ' ' + self['labrun'].text
else:
self['labrun'].hide()
self['labstop'].show()
self['labactive'].show()
self['key_green'].setText(_("Start"))
status_summary= self['lab2'].text + ' ' + self['labstop'].text
title = _("SABnzbd Setup")
autostartstatus_summary = self['lab1'].text + ' ' + self['labactive'].text
for cb in self.onChangedEntry:
cb(title, status_summary, autostartstatus_summary)
def Plugins(**kwargs):
return []
| gpl-2.0 |
jonathanlurie/timelapseComposer | lib/python/gooey/_tmp/responding_to_error.py | 9 | 1594 | '''
Created on Dec 21, 2013
@author: Chris
'''
import sys
import hashlib
from time import time as _time
from time import sleep as _sleep
from gooey import Gooey
from gooey import GooeyParser
def main():
desc = "Example application to show Gooey's various widgets"
my_cool_parser = GooeyParser(description=desc)
my_cool_parser.add_argument("Example", help="fill ", widget="FileChooser") # positional
verbosity = my_cool_parser.add_mutually_exclusive_group()
verbosity.add_argument('-t', '--verbozze', dest='verbose', action="store_true", help="Show more details")
verbosity.add_argument('-q', '--quiet', dest='quiet', action="store_true", help="Only output on error")
print my_cool_parser._actions
print 'inside of main(), my_cool_parser =', my_cool_parser
args = my_cool_parser.parse_args()
print sys.argv
print args.countdown
print args.showtime
start_time = _time()
print 'Counting down from %s' % args.countdown
while _time() - start_time < args.countdown:
if args.showtime:
print 'printing message at: %s' % _time()
else:
print 'printing message at: %s' % hashlib.md5(str(_time())).hexdigest()
_sleep(.5)
print 'Finished running the program. Byeeeeesss!'
def here_is_smore():
pass
if __name__ == '__main__':
print sys.argv
main()
# import inspect
# import dis
# # print dir(main.__code__)
# # for i in dir(main.__code__):
# # print i, getattr(main.__code__, i)
# print dis.dis(main.__code__)
# # for i in inspect.getmembers(main):
# # print i
| mit |
tcstewar/telluride2014 | tank/tankudp.py | 1 | 2078 | import time
import socket
import nengo
class TankUDP(nengo.Node):
def __init__(self, address, port=8889, period=0.1):
self.target = (address, port)
self.period = period
self.data = [0,0,0,0]
self.last_time = None
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.settimeout(0.01)
super(TankUDP, self).__init__(output=self.send, size_in=3,
size_out=4)
def send(self, t, x):
now = time.time()
if self.last_time is None or now > self.last_time + self.period:
dead_zone = 0.05
if -dead_zone < x[0] < dead_zone:
left = 0
elif x[0] >= 0:
left = int(x[0]*120)
else:
left = int((-x[0])*120)+136
if -dead_zone < x[1] < dead_zone:
right = 0
elif x[1] >= 0:
right = int(x[1]*128)
else:
right = int((-x[1])*120)+136
grip = 'close' if x[2] > 0.5 else 'open'
msg = "%d,%d,%s" % (left, right, grip)
print 'send to tank', msg
self.socket.sendto(msg, self.target)
try:
data = self.socket.recv(2048)
data = data.strip().split(',')
max_sensor = 25.0
if len(data) == 5:
p_r, p_c, p_l = float(data[1]), float(data[2]), float(data[3])
print 1, p_r, p_c, p_l
p_r = (max_sensor - min(max(p_r, 0), max_sensor))/max_sensor
p_c = (max_sensor - min(max(p_c, 0), max_sensor))/max_sensor
p_l = (max_sensor - min(max(p_l, 0), max_sensor))/max_sensor
print 2, p_r, p_c, p_l
grip = 1 if data[4]=='catch' else 0
self.data = [p_r, p_c, p_l, grip]
except socket.error:
print 'missed packet'
self.last_time = now
return self.data
| gpl-3.0 |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/answers/browser/tests/test_question.py | 1 | 6315 | # Copyright 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for the question module."""
__metaclass__ = type
__all__ = []
from zope.security.proxy import removeSecurityProxy
from lp.answers.browser.question import QuestionTargetWidget
from lp.answers.interfaces.question import IQuestion
from lp.answers.publisher import AnswersLayer
from lp.app.enums import ServiceUsage
from lp.services.webapp.servers import LaunchpadTestRequest
from lp.testing import (
login_person,
logout,
person_logged_in,
TestCaseWithFactory,
)
from lp.testing.layers import DatabaseFunctionalLayer
from lp.testing.views import create_initialized_view
class TestQuestionAddView(TestCaseWithFactory):
"""Verify the behavior of the QuestionAddView."""
layer = DatabaseFunctionalLayer
def setUp(self):
super(TestQuestionAddView, self).setUp()
self.question_target = self.factory.makeProduct()
self.user = self.factory.makePerson()
login_person(self.user)
def getSearchForm(self, title, language='en'):
return {
'field.title': title,
'field.language': language,
'field.actions.continue': 'Continue',
}
def test_question_title_within_max_display_width(self):
# Titles (summary in the view) less than 250 characters are accepted.
form = self.getSearchForm('123456789 ' * 10)
view = create_initialized_view(
self.question_target, name='+addquestion', layer=AnswersLayer,
form=form, principal=self.user)
self.assertEqual([], view.errors)
def test_question_title_exceeds_max_display_width(self):
# Titles (summary in the view) cannot exceed 250 characters.
form = self.getSearchForm('123456789 ' * 26)
view = create_initialized_view(
self.question_target, name='+addquestion', layer=AnswersLayer,
form=form, principal=self.user)
self.assertEqual(1, len(view.errors))
self.assertEqual(
'The summary cannot exceed 250 characters.', view.errors[0])
def test_context_uses_answers(self):
# If a target doesn't use answers, it doesn't provide the form.
#logout()
owner = removeSecurityProxy(self.question_target).owner
with person_logged_in(owner):
self.question_target.answers_usage = ServiceUsage.NOT_APPLICABLE
login_person(self.user)
view = create_initialized_view(
self.question_target, name='+addquestion', layer=AnswersLayer,
principal=self.user)
self.assertFalse(view.context_uses_answers)
contents = view.render()
msg = "<strong>does not use</strong> Launchpad as its answer forum"
self.assertIn(msg, contents)
class QuestionEditViewTestCase(TestCaseWithFactory):
"""Verify the behavior of the QuestionEditView."""
layer = DatabaseFunctionalLayer
def getForm(self, question):
if question.assignee is None:
assignee = ''
else:
assignee = question.assignee.name
return {
'field.title': question.title,
'field.description': question.description,
'field.language': question.language.code,
'field.assignee': assignee,
'field.target': 'product',
'field.target.distribution': '',
'field.target.package': '',
'field.target.product': question.target.name,
'field.whiteboard': question.whiteboard,
'field.actions.change': 'Change',
}
def test_retarget_with_other_changed(self):
# Retargeting must be the last change made to the question
# to ensure that user permission do not change while there
# are more changes to make.
target = self.factory.makeProduct()
question = self.factory.makeQuestion(target=target)
other_target = self.factory.makeProduct()
login_person(target.owner)
form = self.getForm(question)
form['field.whiteboard'] = 'comment'
form['field.target.product'] = other_target.name
view = create_initialized_view(
question, name='+edit', layer=AnswersLayer, form=form)
self.assertEqual([], view.errors)
self.assertEqual(other_target, question.target)
self.assertEqual('comment', question.whiteboard)
class QuestionTargetWidgetTestCase(TestCaseWithFactory):
"""Test that QuestionTargetWidgetTestCase behaves as expected."""
layer = DatabaseFunctionalLayer
def getWidget(self, question):
field = IQuestion['target']
bound_field = field.bind(question)
request = LaunchpadTestRequest()
return QuestionTargetWidget(bound_field, request)
def test_getDistributionVocabulary_with_product_question(self):
# The vocabulary does not contain distros that do not use
# launchpad to track answers.
distribution = self.factory.makeDistribution()
product = self.factory.makeProduct()
question = self.factory.makeQuestion(target=product)
target_widget = self.getWidget(question)
vocabulary = target_widget.getDistributionVocabulary()
self.assertEqual(None, vocabulary.distribution)
self.assertFalse(
distribution in vocabulary,
"Vocabulary contains distros that do not use Launchpad Answers.")
def test_getDistributionVocabulary_with_distribution_question(self):
# The vocabulary does not contain distros that do not use
# launchpad to track answers.
distribution = self.factory.makeDistribution()
other_distribution = self.factory.makeDistribution()
question = self.factory.makeQuestion(target=distribution)
target_widget = self.getWidget(question)
vocabulary = target_widget.getDistributionVocabulary()
self.assertEqual(distribution, vocabulary.distribution)
self.assertTrue(
distribution in vocabulary,
"Vocabulary missing context distribution.")
self.assertFalse(
other_distribution in vocabulary,
"Vocabulary contains distros that do not use Launchpad Answers.")
| agpl-3.0 |
eykd/syml | syml/parser.py | 1 | 3207 | from parsimonious import NodeVisitor, Grammar, VisitationError
from . import grammars
from . import nodes
from .exceptions import OutOfContextNodeError
class TextOnlySymlParser(NodeVisitor):
grammar = Grammar(grammars.text_only_syml_grammar)
def reduce_children(self, children):
children = [c for c in children if c is not None]
if children:
return children if len(children) > 1 else children[0]
else:
return None
def visit_blank(self, node, children):
return None
def visit_line(self, node, children):
indent, value, _ = children
if value is not None:
value.level = indent
return value
def generic_visit(self, node, children):
return self.reduce_children(children)
def get_text(self, node, children):
return nodes.TextLeafNode(node, node.text)
def visit_comment(self, node, children):
_, text = children
return nodes.Comment(text)
visit_text = get_text
visit_key = get_text
def visit_indent(self, node, children):
return len(node.text.replace('\t', ' ' * 4).strip('\n'))
def visit_key_value(self, node, children):
section, _, value = children
section.incorporate_node(value)
return section
def visit_section(self, node, children):
key, _ = children
return nodes.KeyValue(node, key)
def visit_list_item(self, node, children):
_, _, value = children
li = nodes.ListItem(node)
li.incorporate_node(value)
return li
def visit_lines(self, node, children):
root = nodes.Root(node)
current = root
children = self.reduce_children(children)
if isinstance(children, nodes.LeafNode):
children = [children]
for child in children:
if isinstance(child, nodes.Comment):
current.comments.append(child)
else:
current = current.incorporate_node(child)
return root
def parse(self, *args, **kwargs):
try:
return super().parse(*args, **kwargs)
except VisitationError as e:
# Parsimonious swallows errors inside of `visit_` handlers and
# wraps them in VisitationError cruft.
if e.args[0].startswith('OutOfContextNodeError'):
# Extract the original error message, ignoring the cruft.
msg = e.args[0].split('\n\n\n')[0].split(':', 1)[1]
raise OutOfContextNodeError(msg)
else:
raise # pragma: no cover
class BooleanSymlParser(TextOnlySymlParser):
"""Syml with support for YAML-like boolean values.
"""
grammar = Grammar(grammars.boolean_syml_grammar)
def visit_truthy(self, node, children):
return nodes.RawValueLeafNode(node, node.text, value=True)
def visit_falsey(self, node, children):
return nodes.RawValueLeafNode(node, node.text, value=False)
def parse(source_syml, filename='', raw=True, booleans=False):
parser = BooleanSymlParser if booleans else TextOnlySymlParser
return parser().parse(source_syml).as_data(filename, raw=raw)
| mit |
remenska/rootpy | rootpy/plotting/contrib/plot_corrcoef_matrix.py | 5 | 12192 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
from ...extern.six.moves import range
from ...extern.six import string_types
__all__ = [
'plot_corrcoef_matrix',
'corrcoef',
'cov',
]
def plot_corrcoef_matrix(matrix, names=None,
cmap=None, cmap_text=None,
fontsize=12, grid=False,
axes=None):
"""
This function will draw a lower-triangular correlation matrix
Parameters
----------
matrix : 2-dimensional numpy array/matrix
A correlation coefficient matrix
names : list of strings, optional (default=None)
List of the parameter names corresponding to the rows in ``matrix``.
cmap : matplotlib color map, optional (default=None)
Color map used to color the matrix cells.
cmap_text : matplotlib color map, optional (default=None)
Color map used to color the cell value text. If None, then
all values will be black.
fontsize : int, optional (default=12)
Font size of parameter name and correlation value text.
grid : bool, optional (default=False)
If True, then draw dashed grid lines around the matrix elements.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
Notes
-----
NumPy and matplotlib are required
Examples
--------
>>> matrix = corrcoef(data.T, weights=weights)
>>> plot_corrcoef_matrix(matrix, names)
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
if axes is None:
axes = plt.gca()
matrix = np.asarray(matrix)
if matrix.ndim != 2:
raise ValueError("matrix is not a 2-dimensional array or matrix")
if matrix.shape[0] != matrix.shape[1]:
raise ValueError("matrix is not square")
if names is not None and len(names) != matrix.shape[0]:
raise ValueError("the number of names does not match the number of "
"rows/columns in the matrix")
# mask out the upper triangular matrix
matrix[np.triu_indices(matrix.shape[0])] = np.nan
if isinstance(cmap_text, string_types):
cmap_text = cm.get_cmap(cmap_text, 201)
if cmap is None:
cmap = cm.get_cmap('jet', 201)
elif isinstance(cmap, string_types):
cmap = cm.get_cmap(cmap, 201)
# make NaN pixels white
cmap.set_bad('w')
axes.imshow(matrix, interpolation='nearest',
cmap=cmap, origin='upper',
vmin=-1, vmax=1)
axes.set_frame_on(False)
plt.setp(axes.get_yticklabels(), visible=False)
plt.setp(axes.get_yticklines(), visible=False)
plt.setp(axes.get_xticklabels(), visible=False)
plt.setp(axes.get_xticklines(), visible=False)
if grid:
# draw grid lines
for slot in range(1, matrix.shape[0] - 1):
# vertical
axes.plot((slot - 0.5, slot - 0.5),
(slot - 0.5, matrix.shape[0] - 0.5), 'k:', linewidth=1)
# horizontal
axes.plot((-0.5, slot + 0.5),
(slot + 0.5, slot + 0.5), 'k:', linewidth=1)
if names is not None:
for slot in range(1, matrix.shape[0]):
# diagonal
axes.plot((slot - 0.5, slot + 1.5),
(slot - 0.5, slot - 2.5), 'k:', linewidth=1)
# label cell values
for row, col in zip(*np.tril_indices(matrix.shape[0], k=-1)):
value = matrix[row][col]
if cmap_text is not None:
color = cmap_text((value + 1.) / 2.)
else:
color = 'black'
axes.text(
col, row,
"{0:d}%".format(int(value * 100)),
color=color,
ha='center', va='center',
fontsize=fontsize)
if names is not None:
# write parameter names
for i, name in enumerate(names):
axes.annotate(
name, (i, i),
rotation=45,
ha='left', va='bottom',
transform=axes.transData,
fontsize=fontsize)
def cov(m, y=None, rowvar=1, bias=0, ddof=None, weights=None, repeat_weights=0):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
weights : array-like, optional
A 1-D array of weights with a length equal to the number of
observations.
repeat_weights : int, optional
The default treatment of weights in the weighted covariance is to first
normalize them to unit sum and use the biased weighted covariance
equation. If `repeat_weights` is 1 then the weights must represent an
integer number of occurrences of each observation and both a biased and
unbiased weighted covariance is defined because the total sample size
can be determined.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
import numpy as np
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
X = np.array(m, ndmin=2, dtype=float)
if X.size == 0:
# handle empty arrays
return np.array(m)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None), np.newaxis)
else:
axis = 1
tup = (np.newaxis, slice(None))
if y is not None:
y = np.array(y, copy=False, ndmin=2, dtype=float)
X = np.concatenate((X, y), axis)
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
if weights is not None:
weights = np.array(weights, dtype=float)
weights_sum = weights.sum()
if weights_sum <= 0:
raise ValueError(
"sum of weights is non-positive")
X -= np.average(X, axis=1-axis, weights=weights)[tup]
if repeat_weights:
# each weight represents a number of repetitions of an observation
# the total sample size can be determined in this case and we have
# both an unbiased and biased weighted covariance
fact = weights_sum - ddof
else:
# normalize weights so they sum to unity
weights /= weights_sum
# unbiased weighted covariance is not defined if the weights are
# not integral frequencies (repeat-type)
fact = (1. - np.power(weights, 2).sum())
else:
weights = 1
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
fact = float(N - ddof)
if not rowvar:
return (np.dot(weights * X.T, X.conj()) / fact).squeeze()
else:
return (np.dot(weights * X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None, weights=None,
repeat_weights=0):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
weights : array-like, optional
A 1-D array of weights with a length equal to the number of
observations.
repeat_weights : int, optional
The default treatment of weights in the weighted covariance is to first
normalize them to unit sum and use the biased weighted covariance
equation. If `repeat_weights` is 1 then the weights must represent an
integer number of occurrences of each observation and both a biased and
unbiased weighted covariance is defined because the total sample size
can be determined.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
import numpy as np
c = cov(x, y, rowvar, bias, ddof, weights, repeat_weights)
if c.size == 0:
# handle empty arrays
return c
try:
d = np.diag(c)
except ValueError: # scalar covariance
return 1
return c / np.sqrt(np.multiply.outer(d, d))
| gpl-3.0 |
wangsix/vmo | vmo/plot.py | 1 | 5141 | """
plot.py
drawing routines for vmo
Copyright (C) 8.20.2014 Cheng-i Wang
This file is part of vmo.
vmo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
vmo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with vmo. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import music21
import pretty_midi
try:
from PIL import Image, ImageDraw, ImageFilter #@UnresolvedImport @UnusedImport
except:
print('pil not loaded - hopefully running in max')
width = 900 * 4
height = 400 * 4
lrs_threshold = 0
def start_draw(_oracle, size=(900*4, 400*4)):
"""
:param _oracle: input vmo object
:param size: the size of the output image
:return: an update call the draw()
"""
width = size[0]
height = size[1]
current_state = 0
image = Image.new('RGB', (width, height))
oracle = _oracle
return draw(oracle, current_state, image, width, height)
def draw(oracle, current_state, image, width=width, height=height):
"""
:param oracle: input vmo object
:param current_state:
:param image: an PIL image object
:param width: width of the image
:param height: height of the image
:return: the updated PIL image object
"""
trn = oracle.trn
sfx = oracle.sfx
lrs = oracle.lrs
# handle to Draw object - PIL
N_states = len(sfx)
draw = ImageDraw.Draw(image)
for i in range(N_states):
# draw circle for each state
x_pos = (float(i) / N_states * width) + 0.5 * 1.0 / N_states * width
# iterate over forward transitions
for tran in trn[i]:
# if forward transition to next state
if tran == i + 1:
# draw forward transitions
next_x = (float(i + 1) / N_states * width) + 0.5 * 1.0 / N_states * width
current_x = x_pos + (0.25 / N_states * width)
draw.line((current_x, height/2, next_x, height/2), width=1, fill='white')
else:
if lrs[tran] >= lrs_threshold:
# forward transition to another state
current_x = x_pos
next_x = (float(tran) / N_states * width) + (0.5 / N_states * width)
arc_height = (height / 2) + (tran - i) * 0.125
draw.arc((int(current_x), int(height/2 - arc_height/2),
int(next_x), int(height/2 + arc_height / 2)), 180, 0,
fill='White')
if sfx[i] is not None and sfx[i] != 0 and lrs[sfx[i]] >= lrs_threshold:
current_x = x_pos
next_x = (float(sfx[i]) / N_states * width) + (0.5 / N_states * width)
# draw arc
arc_height = (height / 2) - (sfx[i] - i) * 0.125
draw.arc((int(next_x),
int(height/2 - arc_height/2),
int(current_x),
int(height/2 + arc_height/2)),
0,
180,
fill='White')
image.resize((900, 400), (Image.BILINEAR))
return image
def draw_compror():
raise NotImplementedError("Compror drawing is under construction, coming soon!")
def get_pattern_mat(oracle, pattern):
"""Output a matrix containing patterns in rows from a vmo.
:param oracle: input vmo object
:param pattern: pattern extracted from oracle
:return: a numpy matrix that could be used to visualize the pattern extracted.
"""
pattern_mat = np.zeros((len(pattern), oracle.n_states-1))
for i,p in enumerate(pattern):
length = p[1]
for s in p[0]:
pattern_mat[i][s-length:s-1] = 1
return pattern_mat
def plot_midi_frame(midi_data, beat_positions, frame_ind):
beat_start = beat_positions[frame_ind]
beat_end = beat_positions[frame_ind + 1]
n_list = []
for i in midi_data.instruments:
if not i.is_drum:
for n in i.notes:
if (n.start >= beat_start) & (n.start < beat_end) \
or (n.end >= beat_start) & (n.end < beat_end)\
or (n.start <= beat_start) & (n.end > beat_end):
note = music21.note.Note(pretty_midi.utilities.note_number_to_name(n.pitch))
if not note in n_list:
n_list.append(note)
chord = music21.chord.Chord(n_list)
return chord
def plot_chroma_as_chord(chroma_frame, n_pitch=3):
pitch_rank = np.argsort(chroma_frame)
n_list = []
for p in pitch_rank[-n_pitch:]:
note = pretty_midi.utilities.note_number_to_name(p + 60)
n_list.append(note)
chroma = music21.chord.Chord(n_list)
return chroma
| gpl-3.0 |
yashLadha/coala | tests/bearlib/aspects/ModuleTest.py | 10 | 1967 | from types import ModuleType
import coalib.bearlib.aspects
import pytest
class aspectsModuleTest:
def test_module(self):
# check that module is correctly wrapped
assert isinstance(coalib.bearlib.aspects, ModuleType)
assert type(coalib.bearlib.aspects) is not ModuleType
assert (type(coalib.bearlib.aspects) is
coalib.bearlib.aspects.aspectsModule)
def test__getitem__(self):
# check a leaf aspect
for aspectname in ['aspectsYEAH', 'spelling.aspectsYEAH',
'root.SPELLING.aspectsYEAH']:
assert (coalib.bearlib.aspects[aspectname] is
coalib.bearlib.aspects.Root.Spelling.aspectsYEAH)
# check a container aspect
for aspectname in ['Spelling', 'SPELLING', 'ROOT.spelling']:
assert (coalib.bearlib.aspects[aspectname] is
coalib.bearlib.aspects.Root.Spelling)
# check root aspect
for aspectname in ['Root', 'root', 'ROOT']:
assert (coalib.bearlib.aspects[aspectname] is
coalib.bearlib.aspects.Root)
def test__getitem__no_match(self):
for aspectname in ['noaspect', 'NOASPECT', 'Root.aspectsYEAH']:
with pytest.raises(LookupError) as exc:
coalib.bearlib.aspects[aspectname]
exc.match(r"^no aspect named '%s'$" % aspectname)
def test__getitem__multi_match(self):
for aspectname in ['Length', 'length', 'LENGTH']:
with pytest.raises(LookupError) as exc:
coalib.bearlib.aspects[aspectname]
exc.match(r"^multiple aspects named '%s'. " % aspectname +
r'choose from '
r'\[<aspectclass'
r" 'Root.Metadata.CommitMessage.Body.Length'>,"
r' <aspectclass'
r" 'Root.Metadata.CommitMessage.Shortlog.Length'>"
r'\]$')
| agpl-3.0 |
an7oine/WinVHS | Cygwin/lib/python2.7/subprocess.py | 4 | 59046 | # subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several older modules and functions:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
check_output(*popenargs, **kwargs):
Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
output = check_output(["ls", "-l", "/dev/null"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the child's point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() and check_output() will raise CalledProcessError, if the
called process returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen("cmd", mode='r', bufsize)
==>
pipe = Popen("cmd", shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen("cmd", mode='w', bufsize)
==>
pipe = Popen("cmd", shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2("cmd", mode, bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3("cmd", mode, bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4("cmd", mode,
bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
On Unix, os.popen2, os.popen3 and os.popen4 also accept a sequence as
the command to execute, in which case arguments will be passed
directly to the program without shell intervention. This usage can be
replaced as follows:
(child_stdin, child_stdout) = os.popen2(["/bin/ls", "-l"], mode,
bufsize)
==>
p = Popen(["/bin/ls", "-l"], bufsize=bufsize, stdin=PIPE, stdout=PIPE)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
Return code handling translates as follows:
pipe = os.popen("cmd", 'w')
...
rc = pipe.close()
if rc is not None and rc % 256:
print "There were some errors"
==>
process = Popen("cmd", 'w', shell=True, stdin=PIPE)
...
process.stdin.close()
if process.wait() != 0:
print "There were some errors"
Replacing popen2.*
------------------
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
On Unix, popen2 also accepts a sequence as the command to execute, in
which case arguments will be passed directly to the program without
shell intervention. This usage can be replaced as follows:
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize,
mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen2.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
import sys
mswindows = (sys.platform == "win32")
import os
import types
import traceback
import gc
import signal
import errno
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
import threading
import msvcrt
import _subprocess
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
_has_poll = hasattr(select, 'poll')
import fcntl
import pickle
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call",
"check_output", "CalledProcessError"]
if mswindows:
from _subprocess import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
STD_ERROR_HANDLE, SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxint)
if res is not None:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except (OSError, IOError) as e:
if e.errno == errno.EINTR:
continue
raise
# XXX This function is only used by multiprocessing and the test suite,
# but it's here so that it can be imported when Python is compiled without
# threads.
def _args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
flag_opt_map = {
'debug': 'd',
# 'inspect': 'i',
# 'interactive': 'i',
'optimize': 'O',
'dont_write_bytecode': 'B',
'no_user_site': 's',
'no_site': 'S',
'ignore_environment': 'E',
'verbose': 'v',
'bytes_warning': 'b',
'hash_randomization': 'R',
'py3k_warning': '3',
}
args = []
for flag, opt in flag_opt_map.items():
v = getattr(sys.flags, flag)
if v > 0:
args.append('-' + opt * v)
for opt in sys.warnoptions:
args.append('-W' + opt)
return args
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
_child_created = False # Set here since __del__ checks it
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds and (stdin is not None or stdout is not None or
stderr is not None):
raise ValueError("close_fds is not supported on Windows "
"platforms if you redirect stdin/stdout/stderr")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite), to_close = self._get_handles(stdin, stdout, stderr)
try:
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
except Exception:
# Preserve original exception in case os.close raises.
exc_type, exc_value, exc_trace = sys.exc_info()
for fd in to_close:
try:
if mswindows:
fd.Close()
else:
os.close(fd)
except EnvironmentError:
pass
raise exc_type, exc_value, exc_trace
if mswindows:
if p2cwrite is not None:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread is not None:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread is not None:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite is not None:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread is not None:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread is not None:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self, _maxint=sys.maxint):
# If __init__ hasn't had a chance to execute (e.g. if it
# was passed an undeclared keyword argument), we don't
# have a _child_created attribute at all.
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxint)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = _eintr_retry_call(self.stdout.read)
self.stdout.close()
elif self.stderr:
stderr = _eintr_retry_call(self.stderr.read)
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input)
def poll(self):
return self._internal_poll()
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
to_close = set()
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None), to_close
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = _subprocess.GetStdHandle(_subprocess.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _subprocess.CreatePipe(None, 0)
elif stdin == PIPE:
p2cread, p2cwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
# We just duplicated the handle, it has to be closed at the end
to_close.add(p2cread)
if stdin == PIPE:
to_close.add(p2cwrite)
if stdout is None:
c2pwrite = _subprocess.GetStdHandle(_subprocess.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _subprocess.CreatePipe(None, 0)
elif stdout == PIPE:
c2pread, c2pwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
# We just duplicated the handle, it has to be closed at the end
to_close.add(c2pwrite)
if stdout == PIPE:
to_close.add(c2pread)
if stderr is None:
errwrite = _subprocess.GetStdHandle(_subprocess.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == PIPE:
errread, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
# We just duplicated the handle, it has to be closed at the end
to_close.add(errwrite)
if stderr == PIPE:
to_close.add(errread)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite), to_close
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return _subprocess.DuplicateHandle(_subprocess.GetCurrentProcess(),
handle, _subprocess.GetCurrentProcess(), 0, 1,
_subprocess.DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(
os.path.dirname(_subprocess.GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _subprocess.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _subprocess.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format (comspec, args)
if (_subprocess.GetVersion() >= 0x80000000 or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= _subprocess.CREATE_NEW_CONSOLE
def _close_in_parent(fd):
fd.Close()
to_close.remove(fd)
# Start the process
try:
hp, ht, pid, tid = _subprocess.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or similar), but
# how can this be done from Python?
raise WindowsError(*e.args)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
_close_in_parent(p2cread)
if c2pwrite is not None:
_close_in_parent(c2pwrite)
if errwrite is not None:
_close_in_parent(errwrite)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_subprocess.WaitForSingleObject,
_WAIT_OBJECT_0=_subprocess.WAIT_OBJECT_0,
_GetExitCodeProcess=_subprocess.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
_subprocess.WaitForSingleObject(self._handle,
_subprocess.INFINITE)
self.returncode = _subprocess.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE:
raise
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process
"""
try:
_subprocess.TerminateProcess(self._handle, 1)
except OSError as e:
# ERROR_ACCESS_DENIED (winerror 5) is received when the
# process already died.
if e.winerror != 5:
raise
rc = _subprocess.GetExitCodeProcess(self._handle)
if rc == _subprocess.STILL_ACTIVE:
raise
self.returncode = rc
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
to_close = set()
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = self.pipe_cloexec()
to_close.update((p2cread, p2cwrite))
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = self.pipe_cloexec()
to_close.update((c2pread, c2pwrite))
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = self.pipe_cloexec()
to_close.update((errread, errwrite))
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite), to_close
def _set_cloexec_flag(self, fd, cloexec=True):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
if cloexec:
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
else:
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
def pipe_cloexec(self):
"""Create a pipe with FDs set CLOEXEC."""
# Pipes' FDs are set CLOEXEC by default because we don't want them
# to be inherited by other subprocesses: the CLOEXEC flag is removed
# from the child's FDs by _dup2(), between fork() and exec().
# This is not atomic: we would need the pipe2() syscall for that.
r, w = os.pipe()
self._set_cloexec_flag(r)
self._set_cloexec_flag(w)
return r, w
def _close_fds(self, but):
if hasattr(os, 'closerange'):
os.closerange(3, but)
os.closerange(but + 1, MAXFD)
else:
for i in xrange(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell, to_close,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if isinstance(args, types.StringTypes):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
def _close_in_parent(fd):
os.close(fd)
to_close.remove(fd)
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = self.pipe_cloexec()
try:
try:
gc_was_enabled = gc.isenabled()
# Disable gc to avoid bug where gc -> file_dealloc ->
# write to stderr -> hang. http://bugs.python.org/issue1336
gc.disable()
try:
self.pid = os.fork()
except:
if gc_was_enabled:
gc.enable()
raise
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite is not None:
os.close(p2cwrite)
if c2pread is not None:
os.close(c2pread)
if errread is not None:
os.close(errread)
os.close(errpipe_read)
# When duping fds, if there arises a situation
# where one of the fds is either 0, 1 or 2, it
# is possible that it is overwritten (#12607).
if c2pwrite == 0:
c2pwrite = os.dup(c2pwrite)
if errwrite == 0 or errwrite == 1:
errwrite = os.dup(errwrite)
# Dup fds for child
def _dup2(a, b):
# dup2() removes the CLOEXEC flag but
# we must do it ourselves if dup2()
# would be a no-op (issue #10806).
if a == b:
self._set_cloexec_flag(a, False)
elif a is not None:
os.dup2(a, b)
_dup2(p2cread, 0)
_dup2(c2pwrite, 1)
_dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the
# same fd more than once, or standard fds.
closed = { None }
for fd in [p2cread, c2pwrite, errwrite]:
if fd not in closed and fd > 2:
os.close(fd)
closed.add(fd)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
preexec_fn()
# Close all other fds, if asked for - after
# preexec_fn(), which may open FDs.
if close_fds:
self._close_fds(but=errpipe_write)
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
if gc_was_enabled:
gc.enable()
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
# Wait for exec to fail or succeed; possibly raising exception
# Exception limited to 1M
data = _eintr_retry_call(os.read, errpipe_read, 1048576)
finally:
if p2cread is not None and p2cwrite is not None:
_close_in_parent(p2cread)
if c2pwrite is not None and c2pread is not None:
_close_in_parent(c2pwrite)
if errwrite is not None and errread is not None:
_close_in_parent(errwrite)
# be sure the FD is closed no matter what
os.close(errpipe_read)
if data != "":
try:
_eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
child_exception = pickle.loads(data)
raise child_exception
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope.
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _os_error=os.error, _ECHILD=errno.ECHILD):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
try:
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except _os_error as e:
if _deadstate is not None:
self.returncode = _deadstate
if e.errno == _ECHILD:
# This happens if SIGCLD is set to be ignored or
# waiting for child processes has otherwise been
# disabled for our process. This child is dead, we
# can't get the status.
# http://bugs.python.org/issue15756
self.returncode = 0
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
while self.returncode is None:
try:
pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
pid = self.pid
sts = 0
# Check the pid and loop as waitpid has been known to return
# 0 even without WNOHANG in odd situations. issue14396.
if pid == self.pid:
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
if _has_poll:
stdout, stderr = self._communicate_with_poll(input)
else:
stdout, stderr = self._communicate_with_select(input)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def _communicate_with_poll(self, input):
stdout = None # Return
stderr = None # Return
fd2file = {}
fd2output = {}
poller = select.poll()
def register_and_append(file_obj, eventmask):
poller.register(file_obj.fileno(), eventmask)
fd2file[file_obj.fileno()] = file_obj
def close_unregister_and_remove(fd):
poller.unregister(fd)
fd2file[fd].close()
fd2file.pop(fd)
if self.stdin and input:
register_and_append(self.stdin, select.POLLOUT)
select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI
if self.stdout:
register_and_append(self.stdout, select_POLLIN_POLLPRI)
fd2output[self.stdout.fileno()] = stdout = []
if self.stderr:
register_and_append(self.stderr, select_POLLIN_POLLPRI)
fd2output[self.stderr.fileno()] = stderr = []
input_offset = 0
while fd2file:
try:
ready = poller.poll()
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
for fd, mode in ready:
if mode & select.POLLOUT:
chunk = input[input_offset : input_offset + _PIPE_BUF]
try:
input_offset += os.write(fd, chunk)
except OSError as e:
if e.errno == errno.EPIPE:
close_unregister_and_remove(fd)
else:
raise
else:
if input_offset >= len(input):
close_unregister_and_remove(fd)
elif mode & select_POLLIN_POLLPRI:
data = os.read(fd, 4096)
if not data:
close_unregister_and_remove(fd)
fd2output[fd].append(data)
else:
# Ignore hang up or errors.
close_unregister_and_remove(fd)
return (stdout, stderr)
def _communicate_with_select(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin and input:
write_set.append(self.stdin)
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
try:
rlist, wlist, xlist = select.select(read_set, write_set, [])
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
if self.stdin in wlist:
chunk = input[input_offset : input_offset + _PIPE_BUF]
try:
bytes_written = os.write(self.stdin.fileno(), chunk)
except OSError as e:
if e.errno == errno.EPIPE:
self.stdin.close()
write_set.remove(self.stdin)
else:
raise
else:
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
print >>sys.stderr, "Gosh. No error."
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
| gpl-3.0 |
dude56987/DoThis | speakthis.py | 1 | 7865 | #! /usr/bin/python
########################################################################
# SpeakThis speaks the currently highlighted text using espeak.
# Copyright (C) 2013 Carl J Smith
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
########################################################################
from os import popen
from os import system
import sys
import os
########################################################################
def loadFile(fileName):
try:
print "Loading :",fileName
fileObject=open(fileName,'r');
except:
print "Failed to load :",fileName
return False
fileText=''
lineCount = 0
for line in fileObject:
fileText += line
sys.stdout.write('Loading line '+str(lineCount)+'...\r')
lineCount += 1
print "Finished Loading :",fileName
fileObject.close()
if fileText == None:
return False
else:
return fileText
#if somehow everything fails return false
return False
########################################################################
def writeFile(fileName,contentToWrite):
# figure out the file path
filepath = fileName.split(os.sep)
filepath.pop()
filepath = os.sep.join(filepath)
# check if path exists
if os.path.exists(filepath):
try:
fileObject = open(fileName,'w')
fileObject.write(contentToWrite)
fileObject.close()
print 'Wrote file:',fileName
except:
print 'Failed to write file:',fileName
return False
else:
print 'Failed to write file, path:',filepath,'does not exist!'
return False
########################################################################
def speakText():
system('killall espeak')
# grab text with xsel and pipe to espeak
system('espeak "'+popen('xsel').read().replace('"','').replace("'","").replace('$','')+'"')
########################################################################
def setupShortcuts():
newSettingsArray = []
# in this array the 0th value is searched for and if found the 1th
# value is used to replace the current line in the file
newSettingsArray.append(['<property name="<Super>s" type="string" value="speakthis"/>',''])
newSettingsArray.append([' <property name="<Alt>F1" type="string" value="xfce4-popup-applicationsmenu"/>',' <property name="<Alt>F1" type="string" value="xfce4-popup-applicationsmenu"/>\n <property name="<Super>s" type="string" value="speakthis"/>'])
# sets up shortcuts for all users on the system
for folder in os.listdir('/home'):
filePath = os.path.join('/home',folder,'.config','xfce4','xfconf','xfce-perchannel-xml','xfce4-keyboard-shortcuts.xml')
if os.path.exists(filePath):
# split the config file by lines into an array to loop though
tempConfig=loadFile(filePath).split('\n')
newConfig = '' # stores the new file as a string
for line in tempConfig:
foundString = False # this is used to prevent double line writes
for item in newSettingsArray:
if line.find(item[0]) != -1:
# write new setting and remove old one
newConfig += item[1]+'\n'
foundString = True
if foundString == False:
newConfig += line+'\n'
writeFile(filePath,newConfig.replace('\n\n','\n'))
# make user the owner of this file once more since root is editing the files
os.system(('chown '+folder+' '+filePath))
print 'The system has successfully been configured for all users to'
print 'speak the highlighted text when you press "<super>+s", you'
print 'must log out and back in before the program can be used!'
########################################################################
def setupShortcut():
newSettingsArray = []
# in this array the 0th value is searched for and if found the 1th
# value is used to replace the current line in the file
newSettingsArray.append([' <property name="<Super>s" type="string" value="speakthis"/>',''])
newSettingsArray.append([' <property name="<Alt>F1" type="string" value="xfce4-popup-applicationsmenu"/>',' <property name="<Alt>F1" type="string" value="xfce4-popup-applicationsmenu"/>\n <property name="<Super>s" type="string" value="speakthis"/>'])
# store the filepath to the config file in the user running the programs
# home directory
filePath = os.path.join(os.getenv('HOME'),'.config','xfce4','xfconf','xfce-perchannel-xml','xfce4-keyboard-shortcuts.xml')
if os.path.exists(filePath):
# split the config file by lines into an array to loop though
tempConfig=loadFile(filePath).split('\n')
newConfig = '' # stores the new file as a string
for line in tempConfig:
foundString = False # this is used to prevent double line writes
for item in newSettingsArray:
if line.find(item[0]) != -1:
# write new setting and remove old one
newConfig += item[1]+'\n'
print 'Wrote line', item[1] #DEBUG
foundString = True
if foundString == False:
newConfig += line+'\n'
print 'Wrote line', line #DEBUG
writeFile(filePath,newConfig.replace('\n\n','\n'))
print 'The system has successfully been configured to speak the'
print 'highlighted text when you press "<super>+s", you must log out'
print 'and back in before the program can be used.'
########################################################################
if (('-S' in sys.argv)==True) or (('--setup-shortcuts' in sys.argv)==True):
#check for root since shortcuts need to be installed for all users
if os.geteuid() != 0:
print 'ERROR: this argument must be ran as root!'
print 'This parameter will install shortcuts for all users!'
exit()
else:
# setup shortcuts for everyone on the system
setupShortcuts();
elif (('-s' in sys.argv)==True) or (('--setup-shortcut' in sys.argv)==True):
setupShortcut()
elif (('-h' in sys.argv)==True) or (('--help' in sys.argv)==True):
print "SpeakThis speaks the currently highlighted text using espeak."
print "Copyright (C) 2013 Carl J Smith"
print ""
print "This program is free software: you can redistribute it and/or modify"
print "it under the terms of the GNU General Public License as published by"
print "the Free Software Foundation, either version 3 of the License, or"
print "(at your option) any later version."
print ""
print "This program is distributed in the hope that it will be useful,"
print "but WITHOUT ANY WARRANTY; without even the implied warranty of"
print "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the"
print "GNU General Public License for more details."
print ""
print "You should have received a copy of the GNU General Public License"
print "along with this program. If not, see <http://www.gnu.org/licenses/>."
print "#############################################################"
print "-h or --help"
print " Displays this menu"
print "-s or --setup-shortcut"
print " Installs shortcuts for xfce <super>+s to run the program"
print "-S or --setup-shortcuts"
print " Installs shortcuts for xfce <super>+s to all users on the system"
print "Running the program without arguments will speak the currently"
print " highlighted text."
print '#############################################################'
print 'Highlight some of the above text and hit "<super>+s" to test'
print 'the program.'
else:
# by default run calcText on program launch with no arguments
speakText();
| gpl-3.0 |
pshen/ansible | lib/ansible/modules/network/panos/panos_pg.py | 78 | 6521 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_pg
short_description: create a security profiles group
description:
- Create a security profile group
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
pg_name:
description:
- name of the security profile group
required: true
data_filtering:
description:
- name of the data filtering profile
required: false
default: None
file_blocking:
description:
- name of the file blocking profile
required: false
default: None
spyware:
description:
- name of the spyware profile
required: false
default: None
url_filtering:
description:
- name of the url filtering profile
required: false
default: None
virus:
description:
- name of the anti-virus profile
required: false
default: None
vulnerability:
description:
- name of the vulnerability profile
required: false
default: None
wildfire:
description:
- name of the wildfire analysis profile
required: false
default: None
commit:
description:
- commit if changed
required: false
default: true
'''
EXAMPLES = '''
- name: setup security profile group
panos_pg:
ip_address: "192.168.1.1"
password: "admin"
username: "admin"
pg_name: "pg-default"
virus: "default"
spyware: "default"
vulnerability: "default"
'''
RETURN='''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
import pan.xapi
from pan.xapi import PanXapiError
HAS_LIB = True
except ImportError:
HAS_LIB = False
_PG_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/vsys/entry[@name='vsys1']" +\
"/profile-group/entry[@name='%s']"
def pg_exists(xapi, pg_name):
xapi.get(_PG_XPATH % pg_name)
e = xapi.element_root.find('.//entry')
if e is None:
return False
return True
def add_pg(xapi, pg_name, data_filtering, file_blocking, spyware,
url_filtering, virus, vulnerability, wildfire):
if pg_exists(xapi, pg_name):
return False
exml = []
if data_filtering is not None:
exml.append('<data-filtering><member>%s</member></data-filtering>' %
data_filtering)
if file_blocking is not None:
exml.append('<file-blocking><member>%s</member></file-blocking>' %
file_blocking)
if spyware is not None:
exml.append('<spyware><member>%s</member></spyware>' %
spyware)
if url_filtering is not None:
exml.append('<url-filtering><member>%s</member></url-filtering>' %
url_filtering)
if virus is not None:
exml.append('<virus><member>%s</member></virus>' %
virus)
if vulnerability is not None:
exml.append('<vulnerability><member>%s</member></vulnerability>' %
vulnerability)
if wildfire is not None:
exml.append('<wildfire-analysis><member>%s</member></wildfire-analysis>' %
wildfire)
exml = ''.join(exml)
xapi.set(xpath=_PG_XPATH % pg_name, element=exml)
return True
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
pg_name=dict(required=True),
data_filtering=dict(),
file_blocking=dict(),
spyware=dict(),
url_filtering=dict(),
virus=dict(),
vulnerability=dict(),
wildfire=dict(),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
pg_name = module.params['pg_name']
data_filtering = module.params['data_filtering']
file_blocking = module.params['file_blocking']
spyware = module.params['spyware']
url_filtering = module.params['url_filtering']
virus = module.params['virus']
vulnerability = module.params['vulnerability']
wildfire = module.params['wildfire']
commit = module.params['commit']
try:
changed = add_pg(xapi, pg_name, data_filtering, file_blocking,
spyware, url_filtering, virus, vulnerability, wildfire)
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
RPGOne/scikit-learn | sklearn/datasets/kddcup99.py | 14 | 13152 | """KDDCUP 99 dataset.
A classic dataset for anomaly detection.
The dataset page is available from UCI Machine Learning Repository
https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
"""
import sys
import errno
from gzip import GzipFile
from io import BytesIO
import logging
import os
from os.path import exists, join
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import numpy as np
from .base import get_data_home
from .base import Bunch
from ..externals import joblib, six
from ..utils import check_random_state
from ..utils import shuffle as shuffle_method
URL10 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz')
URL = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/kddcup99-mld/kddcup.data.gz')
logger = logging.getLogger()
def fetch_kddcup99(subset=None, shuffle=False, random_state=None,
percent10=True, download_if_missing=True):
"""Load and return the kddcup 99 dataset (classification).
The KDD Cup '99 dataset was created by processing the tcpdump portions
of the 1998 DARPA Intrusion Detection System (IDS) Evaluation dataset,
created by MIT Lincoln Lab [1] . The artificial data was generated using
a closed network and hand-injected attacks to produce a large number of
different types of attack with normal activity in the background.
As the initial goal was to produce a large training set for supervised
learning algorithms, there is a large proportion (80.1%) of abnormal
data which is unrealistic in real world, and inappropriate for unsupervised
anomaly detection which aims at detecting 'abnormal' data, ie
1) qualitatively different from normal data.
2) in large minority among the observations.
We thus transform the KDD Data set into two different data sets: SA and SF.
- SA is obtained by simply selecting all the normal data, and a small
proportion of abnormal data to gives an anomaly proportion of 1%.
- SF is obtained as in [2]
by simply picking up the data whose attribute logged_in is positive, thus
focusing on the intrusion attack, which gives a proportion of 0.3% of
attack.
- http and smtp are two subsets of SF corresponding with third feature
equal to 'http' (resp. to 'smtp')
General KDD structure :
================ ==========================================
Samples total 4898431
Dimensionality 41
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
SA structure :
================ ==========================================
Samples total 976158
Dimensionality 41
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
SF structure :
================ ==========================================
Samples total 699691
Dimensionality 4
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
http structure :
================ ==========================================
Samples total 619052
Dimensionality 3
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
smtp structure :
================ ==========================================
Samples total 95373
Dimensionality 3
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
.. versionadded:: 0.18
Parameters
----------
subset : None, 'SA', 'SF', 'http', 'smtp'
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
percent10 : bool, default=False
Whether to load only 10 percent of the data.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
References
----------
.. [1] Analysis and Results of the 1999 DARPA Off-Line Intrusion
Detection Evaluation Richard Lippmann, Joshua W. Haines,
David J. Fried, Jonathan Korba, Kumar Das
.. [2] A Geometric Framework for Unsupervised Anomaly Detection: Detecting
Intrusions in Unlabeled Data (2002) by Eleazar Eskin, Andrew Arnold,
Michael Prerau, Leonid Portnoy, Sal Stolfo
"""
kddcup99 = _fetch_brute_kddcup99(shuffle=shuffle, percent10=percent10,
download_if_missing=download_if_missing)
data = kddcup99.data
target = kddcup99.target
if subset == 'SA':
s = target == b'normal.'
t = np.logical_not(s)
normal_samples = data[s, :]
normal_targets = target[s]
abnormal_samples = data[t, :]
abnormal_targets = target[t]
n_samples_abnormal = abnormal_samples.shape[0]
# selected abnormal samples:
random_state = check_random_state(random_state)
r = random_state.randint(0, n_samples_abnormal, 3377)
abnormal_samples = abnormal_samples[r]
abnormal_targets = abnormal_targets[r]
data = np.r_[normal_samples, abnormal_samples]
target = np.r_[normal_targets, abnormal_targets]
if subset == 'SF' or subset == 'http' or subset == 'smtp':
# select all samples with positive logged_in attribute:
s = data[:, 11] == 1
data = np.c_[data[s, :11], data[s, 12:]]
target = target[s]
data[:, 0] = np.log((data[:, 0] + 0.1).astype(float))
data[:, 4] = np.log((data[:, 4] + 0.1).astype(float))
data[:, 5] = np.log((data[:, 5] + 0.1).astype(float))
if subset == 'http':
s = data[:, 2] == b'http'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
if subset == 'smtp':
s = data[:, 2] == b'smtp'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
if subset == 'SF':
data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]]
return Bunch(data=data, target=target)
def _fetch_brute_kddcup99(subset=None, data_home=None,
download_if_missing=True, random_state=None,
shuffle=False, percent10=False):
"""Load the kddcup99 dataset, downloading it if necessary.
Parameters
----------
subset : None, 'SA', 'SF', 'http', 'smtp'
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
data_home : string, optional
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : boolean, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
percent10 : bool, default=False
Whether to load only 10 percent of the data.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (494021, 41)
Each row corresponds to the 41 features in the dataset.
dataset.target : numpy array of shape (494021,)
Each value corresponds to one of the 21 attack types or to the
label 'normal.'.
dataset.DESCR : string
Description of the kddcup99 dataset.
"""
data_home = get_data_home(data_home=data_home)
if sys.version_info[0] == 3:
# The zlib compression format use by joblib is not compatible when
# switching from Python 2 to Python 3, let us use a separate folder
# under Python 3:
dir_suffix = "-py3"
else:
# Backward compat for Python 2 users
dir_suffix = ""
if percent10:
kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix)
else:
kddcup_dir = join(data_home, "kddcup99" + dir_suffix)
samples_path = join(kddcup_dir, "samples")
targets_path = join(kddcup_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
_mkdirp(kddcup_dir)
URL_ = URL10 if percent10 else URL
logger.warning("Downloading %s" % URL_)
f = BytesIO(urlopen(URL_).read())
dt = [('duration', int),
('protocol_type', 'S4'),
('service', 'S11'),
('flag', 'S6'),
('src_bytes', int),
('dst_bytes', int),
('land', int),
('wrong_fragment', int),
('urgent', int),
('hot', int),
('num_failed_logins', int),
('logged_in', int),
('num_compromised', int),
('root_shell', int),
('su_attempted', int),
('num_root', int),
('num_file_creations', int),
('num_shells', int),
('num_access_files', int),
('num_outbound_cmds', int),
('is_host_login', int),
('is_guest_login', int),
('count', int),
('srv_count', int),
('serror_rate', float),
('srv_serror_rate', float),
('rerror_rate', float),
('srv_rerror_rate', float),
('same_srv_rate', float),
('diff_srv_rate', float),
('srv_diff_host_rate', float),
('dst_host_count', int),
('dst_host_srv_count', int),
('dst_host_same_srv_rate', float),
('dst_host_diff_srv_rate', float),
('dst_host_same_src_port_rate', float),
('dst_host_srv_diff_host_rate', float),
('dst_host_serror_rate', float),
('dst_host_srv_serror_rate', float),
('dst_host_rerror_rate', float),
('dst_host_srv_rerror_rate', float),
('labels', 'S16')]
DT = np.dtype(dt)
file_ = GzipFile(fileobj=f, mode='r')
Xy = []
for line in file_.readlines():
if six.PY3:
line = line.decode()
Xy.append(line.replace('\n', '').split(','))
file_.close()
print('extraction done')
Xy = np.asarray(Xy, dtype=object)
for j in range(42):
Xy[:, j] = Xy[:, j].astype(DT[j])
X = Xy[:, :-1]
y = Xy[:, -1]
# XXX bug when compress!=0:
# (error: 'Incorrect data length while decompressing[...] the file
# could be corrupted.')
joblib.dump(X, samples_path, compress=0)
joblib.dump(y, targets_path, compress=0)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
X, y = shuffle_method(X, y, random_state=random_state)
return Bunch(data=X, target=y, DESCR=__doc__)
def _mkdirp(d):
"""Ensure directory d exists (like mkdir -p on Unix)
No guarantee that the directory is writable.
"""
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise
| bsd-3-clause |
longman694/youtube-dl | youtube_dl/extractor/tvplay.py | 26 | 15914 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urlparse,
)
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
parse_iso8601,
qualities,
try_get,
update_url_query,
)
class TVPlayIE(InfoExtractor):
IE_NAME = 'mtg'
IE_DESC = 'MTG services'
_VALID_URL = r'''(?x)
(?:
mtg:|
https?://
(?:www\.)?
(?:
tvplay(?:\.skaties)?\.lv/parraides|
(?:tv3play|play\.tv3)\.lt/programos|
tv3play(?:\.tv3)?\.ee/sisu|
(?:tv(?:3|6|8|10)play|viafree)\.se/program|
(?:(?:tv3play|viasat4play|tv6play|viafree)\.no|(?:tv3play|viafree)\.dk)/programmer|
play\.novatv\.bg/programi
)
/(?:[^/]+/)+
)
(?P<id>\d+)
'''
_TESTS = [
{
'url': 'http://www.tvplay.lv/parraides/vinas-melo-labak/418113?autostart=true',
'md5': 'a1612fe0849455423ad8718fe049be21',
'info_dict': {
'id': '418113',
'ext': 'mp4',
'title': 'Kādi ir īri? - Viņas melo labāk',
'description': 'Baiba apsmej īrus, kādi tie ir un ko viņi dara.',
'series': 'Viņas melo labāk',
'season': '2.sezona',
'season_number': 2,
'duration': 25,
'timestamp': 1406097056,
'upload_date': '20140723',
},
},
{
'url': 'http://play.tv3.lt/programos/moterys-meluoja-geriau/409229?autostart=true',
'info_dict': {
'id': '409229',
'ext': 'flv',
'title': 'Moterys meluoja geriau',
'description': 'md5:9aec0fc68e2cbc992d2a140bd41fa89e',
'series': 'Moterys meluoja geriau',
'episode_number': 47,
'season': '1 sezonas',
'season_number': 1,
'duration': 1330,
'timestamp': 1403769181,
'upload_date': '20140626',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv3play.ee/sisu/kodu-keset-linna/238551?autostart=true',
'info_dict': {
'id': '238551',
'ext': 'flv',
'title': 'Kodu keset linna 398537',
'description': 'md5:7df175e3c94db9e47c0d81ffa5d68701',
'duration': 1257,
'timestamp': 1292449761,
'upload_date': '20101215',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.tv3play.se/program/husraddarna/395385?autostart=true',
'info_dict': {
'id': '395385',
'ext': 'mp4',
'title': 'Husräddarna S02E07',
'description': 'md5:f210c6c89f42d4fc39faa551be813777',
'duration': 2574,
'timestamp': 1400596321,
'upload_date': '20140520',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.tv6play.se/program/den-sista-dokusapan/266636?autostart=true',
'info_dict': {
'id': '266636',
'ext': 'mp4',
'title': 'Den sista dokusåpan S01E08',
'description': 'md5:295be39c872520221b933830f660b110',
'duration': 1492,
'timestamp': 1330522854,
'upload_date': '20120229',
'age_limit': 18,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.tv8play.se/program/antikjakten/282756?autostart=true',
'info_dict': {
'id': '282756',
'ext': 'mp4',
'title': 'Antikjakten S01E10',
'description': 'md5:1b201169beabd97e20c5ad0ad67b13b8',
'duration': 2646,
'timestamp': 1348575868,
'upload_date': '20120925',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.tv3play.no/programmer/anna-anka-soker-assistent/230898?autostart=true',
'info_dict': {
'id': '230898',
'ext': 'mp4',
'title': 'Anna Anka søker assistent - Ep. 8',
'description': 'md5:f80916bf5bbe1c5f760d127f8dd71474',
'duration': 2656,
'timestamp': 1277720005,
'upload_date': '20100628',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.viasat4play.no/programmer/budbringerne/21873?autostart=true',
'info_dict': {
'id': '21873',
'ext': 'mp4',
'title': 'Budbringerne program 10',
'description': 'md5:4db78dc4ec8a85bb04fd322a3ee5092d',
'duration': 1297,
'timestamp': 1254205102,
'upload_date': '20090929',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.tv6play.no/programmer/hotelinspektor-alex-polizzi/361883?autostart=true',
'info_dict': {
'id': '361883',
'ext': 'mp4',
'title': 'Hotelinspektør Alex Polizzi - Ep. 10',
'description': 'md5:3ecf808db9ec96c862c8ecb3a7fdaf81',
'duration': 2594,
'timestamp': 1393236292,
'upload_date': '20140224',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://play.novatv.bg/programi/zdravei-bulgariya/624952?autostart=true',
'info_dict': {
'id': '624952',
'ext': 'flv',
'title': 'Здравей, България (12.06.2015 г.) ',
'description': 'md5:99f3700451ac5bb71a260268b8daefd7',
'duration': 8838,
'timestamp': 1434100372,
'upload_date': '20150612',
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://tvplay.skaties.lv/parraides/vinas-melo-labak/418113?autostart=true',
'only_matching': True,
},
{
# views is null
'url': 'http://tvplay.skaties.lv/parraides/tv3-zinas/760183',
'only_matching': True,
},
{
'url': 'http://tv3play.tv3.ee/sisu/kodu-keset-linna/238551?autostart=true',
'only_matching': True,
},
{
'url': 'http://www.viafree.se/program/underhallning/i-like-radio-live/sasong-1/676869',
'only_matching': True,
},
{
'url': 'mtg:418113',
'only_matching': True,
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
geo_country = self._search_regex(
r'https?://[^/]+\.([a-z]{2})', url,
'geo country', default=None)
if geo_country:
self._initialize_geo_bypass([geo_country.upper()])
video = self._download_json(
'http://playapi.mtgx.tv/v3/videos/%s' % video_id, video_id, 'Downloading video JSON')
title = video['title']
try:
streams = self._download_json(
'http://playapi.mtgx.tv/v3/videos/stream/%s' % video_id,
video_id, 'Downloading streams JSON')
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
msg = self._parse_json(e.cause.read().decode('utf-8'), video_id)
raise ExtractorError(msg['msg'], expected=True)
raise
quality = qualities(['hls', 'medium', 'high'])
formats = []
for format_id, video_url in streams.get('streams', {}).items():
if not video_url or not isinstance(video_url, compat_str):
continue
ext = determine_ext(video_url)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
update_url_query(video_url, {
'hdcore': '3.5.0',
'plugin': 'aasp-3.5.0.151.81'
}), video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
else:
fmt = {
'format_id': format_id,
'quality': quality(format_id),
'ext': ext,
}
if video_url.startswith('rtmp'):
m = re.search(
r'^(?P<url>rtmp://[^/]+/(?P<app>[^/]+))/(?P<playpath>.+)$', video_url)
if not m:
continue
fmt.update({
'ext': 'flv',
'url': m.group('url'),
'app': m.group('app'),
'play_path': m.group('playpath'),
})
else:
fmt.update({
'url': video_url,
})
formats.append(fmt)
if not formats and video.get('is_geo_blocked'):
self.raise_geo_restricted(
'This content might not be available in your country due to copyright reasons')
self._sort_formats(formats)
# TODO: webvtt in m3u8
subtitles = {}
sami_path = video.get('sami_path')
if sami_path:
lang = self._search_regex(
r'_([a-z]{2})\.xml', sami_path, 'lang',
default=compat_urlparse.urlparse(url).netloc.rsplit('.', 1)[-1])
subtitles[lang] = [{
'url': sami_path,
}]
series = video.get('format_title')
episode_number = int_or_none(video.get('format_position', {}).get('episode'))
season = video.get('_embedded', {}).get('season', {}).get('title')
season_number = int_or_none(video.get('format_position', {}).get('season'))
return {
'id': video_id,
'title': title,
'description': video.get('description'),
'series': series,
'episode_number': episode_number,
'season': season,
'season_number': season_number,
'duration': int_or_none(video.get('duration')),
'timestamp': parse_iso8601(video.get('created_at')),
'view_count': try_get(video, lambda x: x['views']['total'], int),
'age_limit': int_or_none(video.get('age_limit', 0)),
'formats': formats,
'subtitles': subtitles,
}
class ViafreeIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:www\.)?
viafree\.
(?:
(?:dk|no)/programmer|
se/program
)
/(?:[^/]+/)+(?P<id>[^/?#&]+)
'''
_TESTS = [{
'url': 'http://www.viafree.se/program/livsstil/husraddarna/sasong-2/avsnitt-2',
'info_dict': {
'id': '395375',
'ext': 'mp4',
'title': 'Husräddarna S02E02',
'description': 'md5:4db5c933e37db629b5a2f75dfb34829e',
'series': 'Husräddarna',
'season': 'Säsong 2',
'season_number': 2,
'duration': 2576,
'timestamp': 1400596321,
'upload_date': '20140520',
},
'params': {
'skip_download': True,
},
'add_ie': [TVPlayIE.ie_key()],
}, {
# with relatedClips
'url': 'http://www.viafree.se/program/reality/sommaren-med-youtube-stjarnorna/sasong-1/avsnitt-1',
'info_dict': {
'id': '758770',
'ext': 'mp4',
'title': 'Sommaren med YouTube-stjärnorna S01E01',
'description': 'md5:2bc69dce2c4bb48391e858539bbb0e3f',
'series': 'Sommaren med YouTube-stjärnorna',
'season': 'Säsong 1',
'season_number': 1,
'duration': 1326,
'timestamp': 1470905572,
'upload_date': '20160811',
},
'params': {
'skip_download': True,
},
'add_ie': [TVPlayIE.ie_key()],
}, {
# Different og:image URL schema
'url': 'http://www.viafree.se/program/reality/sommaren-med-youtube-stjarnorna/sasong-1/avsnitt-2',
'only_matching': True,
}, {
'url': 'http://www.viafree.no/programmer/underholdning/det-beste-vorspielet/sesong-2/episode-1',
'only_matching': True,
}, {
'url': 'http://www.viafree.dk/programmer/reality/paradise-hotel/saeson-7/episode-5',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if TVPlayIE.suitable(url) else super(ViafreeIE, cls).suitable(url)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
data = self._parse_json(
self._search_regex(
r'(?s)window\.App\s*=\s*({.+?})\s*;\s*</script',
webpage, 'data', default='{}'),
video_id, transform_source=lambda x: re.sub(
r'(?s)function\s+[a-zA-Z_][\da-zA-Z_]*\s*\([^)]*\)\s*{[^}]*}\s*',
'null', x), fatal=False)
video_id = None
if data:
video_id = try_get(
data, lambda x: x['context']['dispatcher']['stores'][
'ContentPageProgramStore']['currentVideo']['id'],
compat_str)
# Fallback #1 (extract from og:image URL schema)
if not video_id:
thumbnail = self._og_search_thumbnail(webpage, default=None)
if thumbnail:
video_id = self._search_regex(
# Patterns seen:
# http://cdn.playapi.mtgx.tv/imagecache/600x315/cloud/content-images/inbox/765166/a2e95e5f1d735bab9f309fa345cc3f25.jpg
# http://cdn.playapi.mtgx.tv/imagecache/600x315/cloud/content-images/seasons/15204/758770/4a5ba509ca8bc043e1ebd1a76131cdf2.jpg
r'https?://[^/]+/imagecache/(?:[^/]+/)+(\d{6,})/',
thumbnail, 'video id', default=None)
# Fallback #2. Extract from raw JSON string.
# May extract wrong video id if relatedClips is present.
if not video_id:
video_id = self._search_regex(
r'currentVideo["\']\s*:\s*.+?["\']id["\']\s*:\s*["\'](\d{6,})',
webpage, 'video id')
return self.url_result('mtg:%s' % video_id, TVPlayIE.ie_key())
| unlicense |
Windy-Ground/TextBlob | run_tests.py | 11 | 1778 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
The main test runner script.
Usage: ::
python run_tests.py
Skip slow tests
python run_tests.py fast
When there's no Internet
python run_tests.py no-internet
'''
from __future__ import unicode_literals
import nose
import sys
from textblob.compat import PY2
PY26 = PY2 and int(sys.version_info[1]) < 7
PYPY = "PyPy" in sys.version
def main():
args = get_argv()
success = nose.run(argv=args)
sys.exit(0) if success else sys.exit(1)
def get_argv():
args = [sys.argv[0], "tests", '--verbosity', '2']
attr_conditions = [] # Use nose's attribselect plugin to filter tests
if "force-all" in sys.argv:
# Don't exclude any tests
return args
if "cover" in sys.argv:
args += ["--with-coverage", "--cover-html"]
if PY26:
# Exclude tests that don't work on python2.6
attr_conditions.append("not py27_only")
try:
__import__('numpy')
except ImportError:
# Exclude tests that require numpy
attr_conditions.append("not requires_numpy")
if not PY2:
# Exclude tests that only work on python2
attr_conditions.append("not py2_only")
if PYPY:
# Exclude tests that don't work on PyPY
attr_conditions.append("not no_pypy")
if "fast" in sys.argv:
attr_conditions.append("not slow")
if "no-internet" in sys.argv:
# Exclude tests that require internet
attr_conditions.append("not requires_internet")
# Skip tests with the "skip" attribute
attr_conditions.append("not skip")
attr_expression = " and ".join(attr_conditions)
if attr_expression:
args.extend(["-A", attr_expression])
return args
if __name__ == '__main__':
main()
| mit |
mancoast/CPythonPyc_test | crash/242_pystone.py | 35 | 7254 | #! /usr/bin/env python
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
from time import clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = 0):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(loops)
print "Pystone(%s) time for %d passes = %g" % \
(__version__, loops, benchtime)
print "This machine benchmarks at %g pystones/second" % stones
def pystones(loops=LOOPS):
return Proc0(loops)
IntGlob = 0
BoolGlob = FALSE
Char1Glob = '\0'
Char2Glob = '\0'
Array1Glob = [0]*51
Array2Glob = map(lambda x: x[:], [Array1Glob]*51)
PtrGlb = None
PtrGlbNext = None
def Proc0(loops=LOOPS):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
starttime = clock()
for i in range(loops):
pass
nulltime = clock() - starttime
PtrGlbNext = Record()
PtrGlb = Record()
PtrGlb.PtrComp = PtrGlbNext
PtrGlb.Discr = Ident1
PtrGlb.EnumComp = Ident3
PtrGlb.IntComp = 40
PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
starttime = clock()
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = 'A'
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
benchtime = clock() - starttime - nulltime
return benchtime, (loops / benchtime)
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
while 1:
if Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = Proc7(10, IntGlob)
return PtrParOut
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == 'A'
BoolLoc = BoolLoc or BoolGlob
Char2Glob = 'B'
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = 'A'
BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
if __name__ == '__main__':
import sys
def error(msg):
print >>sys.stderr, msg,
print >>sys.stderr, "usage: %s [number_of_loops]" % sys.argv[0]
sys.exit(100)
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try: loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
loops = LOOPS
main(loops)
| gpl-3.0 |
xrmx/django | tests/template_tests/filter_tests/test_truncatewords.py | 215 | 1705 | from django.template.defaultfilters import truncatewords
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class TruncatewordsTests(SimpleTestCase):
@setup({'truncatewords01':
'{% autoescape off %}{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}{% endautoescape %}'})
def test_truncatewords01(self):
output = self.engine.render_to_string('truncatewords01', {'a': 'alpha & bravo', 'b': mark_safe('alpha & bravo')})
self.assertEqual(output, 'alpha & ... alpha & ...')
@setup({'truncatewords02': '{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}'})
def test_truncatewords02(self):
output = self.engine.render_to_string('truncatewords02', {'a': 'alpha & bravo', 'b': mark_safe('alpha & bravo')})
self.assertEqual(output, 'alpha & ... alpha & ...')
class FunctionTests(SimpleTestCase):
def test_truncate(self):
self.assertEqual(truncatewords('A sentence with a few words in it', 1), 'A ...')
def test_truncate2(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 5),
'A sentence with a few ...',
)
def test_overtruncate(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 100),
'A sentence with a few words in it',
)
def test_invalid_number(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 'not a number'),
'A sentence with a few words in it',
)
def test_non_string_input(self):
self.assertEqual(truncatewords(123, 2), '123')
| bsd-3-clause |
mabelcalim/tide-app | kivy/test8/.buildozer/android/app/python-for-android/tools/check_headers.py | 27 | 1267 | #!/usr/bin/env python2
import sys
from os import unlink
from os.path import exists
HEADERS = ('Content-Disposition', 'Content-Length', 'Content-Type',
'ETag', 'Last-Modified')
def is_sig_header(header):
header = header.lower()
for s in HEADERS:
if header.startswith(s.lower()):
return True
def do():
headers_fn = sys.argv[1]
signature_fn = sys.argv[2]
# first, get all the headers from the latest request
with open(headers_fn) as fd:
headers = [line.strip() for line in fd.readlines()]
last_index = 0
for index, header in enumerate(headers):
if header.startswith('HTTP/1.'):
last_index = index
headers = headers[last_index:]
# select few headers for the signature
headers = [header for header in headers if is_sig_header(header)]
signature = '\n'.join(headers)
# read the original signature
if exists(signature_fn):
with open(signature_fn) as fd:
original_signature = fd.read()
if original_signature == signature:
return 0
unlink(signature_fn)
if signature:
with open(signature_fn, 'w') as fd:
fd.write(signature)
try:
ret = do()
except:
ret = 1
sys.exit(ret)
| gpl-3.0 |
shaolinfry/litecoin | contrib/devtools/clang-format-diff.py | 90 | 6192 | #!/usr/bin/env python
#
#===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License.
#
# ============================================================
#
# University of Illinois/NCSA
# Open Source License
#
# Copyright (c) 2007-2015 University of Illinois at Urbana-Champaign.
# All rights reserved.
#
# Developed by:
#
# LLVM Team
#
# University of Illinois at Urbana-Champaign
#
# http://llvm.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal with
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the names of the LLVM Team, University of Illinois at
# Urbana-Champaign, nor the names of its contributors may be used to
# endorse or promote products derived from this Software without specific
# prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
# SOFTWARE.
#
# ============================================================
#
#===------------------------------------------------------------------------===#
r"""
ClangFormat Diff Reformatter
============================
This script reads input from a unified diff and reformats all the changed
lines. This is useful to reformat all the lines touched by a specific patch.
Example usage for git/svn users:
git diff -U0 HEAD^ | clang-format-diff.py -p1 -i
svn diff --diff-cmd=diff -x-U0 | clang-format-diff.py -i
"""
import argparse
import difflib
import re
import string
import subprocess
import StringIO
import sys
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
def main():
parser = argparse.ArgumentParser(description=
'Reformat changed lines in diff. Without -i '
'option just output the diff that would be '
'introduced.')
parser.add_argument('-i', action='store_true', default=False,
help='apply edits to files instead of displaying a diff')
parser.add_argument('-p', metavar='NUM', default=0,
help='strip the smallest prefix containing P slashes')
parser.add_argument('-regex', metavar='PATTERN', default=None,
help='custom pattern selecting file paths to reformat '
'(case sensitive, overrides -iregex)')
parser.add_argument('-iregex', metavar='PATTERN', default=
r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc|js|ts|proto'
r'|protodevel|java)',
help='custom pattern selecting file paths to reformat '
'(case insensitive, overridden by -regex)')
parser.add_argument('-sort-includes', action='store_true', default=False,
help='let clang-format sort include blocks')
parser.add_argument('-v', '--verbose', action='store_true',
help='be more verbose, ineffective without -i')
args = parser.parse_args()
# Extract changed lines for each file.
filename = None
lines_by_file = {}
for line in sys.stdin:
match = re.search('^\+\+\+\ (.*?/){%s}(\S*)' % args.p, line)
if match:
filename = match.group(2)
if filename == None:
continue
if args.regex is not None:
if not re.match('^%s$' % args.regex, filename):
continue
else:
if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
continue
match = re.search('^@@.*\+(\d+)(,(\d+))?', line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count == 0:
continue
end_line = start_line + line_count - 1;
lines_by_file.setdefault(filename, []).extend(
['-lines', str(start_line) + ':' + str(end_line)])
# Reformat files containing changes in place.
for filename, lines in lines_by_file.iteritems():
if args.i and args.verbose:
print 'Formatting', filename
command = [binary, filename]
if args.i:
command.append('-i')
if args.sort_includes:
command.append('-sort-includes')
command.extend(lines)
command.extend(['-style=file', '-fallback-style=none'])
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=None, stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
sys.exit(p.returncode);
if not args.i:
with open(filename) as f:
code = f.readlines()
formatted_code = StringIO.StringIO(stdout).readlines()
diff = difflib.unified_diff(code, formatted_code,
filename, filename,
'(before formatting)', '(after formatting)')
diff_string = string.join(diff, '')
if len(diff_string) > 0:
sys.stdout.write(diff_string)
if __name__ == '__main__':
main()
| mit |
sriprasanna/django-1.3.1 | django/contrib/auth/tests/forms.py | 58 | 9283 | from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm, PasswordChangeForm, SetPasswordForm, UserChangeForm, PasswordResetForm
from django.test import TestCase
class UserCreationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[u'A user with that username already exists.'])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[u'This value may contain only letters, numbers and @/./+/-/_ characters.'])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[u"The two password fields didn't match."])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors,
[u'This field is required.'])
self.assertEqual(form['password2'].errors,
[u'This field is required.'])
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors,
[u'This field is required.'])
def test_success(self):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<User: jsmith@example.com>')
class AuthenticationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[u'Please enter a correct username and password. Note that both fields are case-sensitive.'])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[u'This account is inactive.'])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
class SetPasswordFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[u"The two password fields didn't match."])
def test_success(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
class PasswordChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[u'Your old password was entered incorrectly. Please enter it again.'])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[u"The two password fields didn't match."])
def test_success(self):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(PasswordChangeForm(user, {}).fields.keys(),
['old_password', 'new_password1', 'new_password2'])
class UserChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
self.assertEqual(form['username'].errors,
[u'This value may contain only letters, numbers and @/./+/-/_ characters.'])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
form = MyUserForm({})
class PasswordResetFormTest(TestCase):
fixtures = ['authtestdata.json']
def create_dummy_user(self):
"""creates a user and returns a tuple
(user_object, username, email)
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email':'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors,
[u'Enter a valid e-mail address.'])
def test_nonexistant_email(self):
# Test nonexistant email address
data = {'email':'foo@bar.com'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors,
{'email': [u"That e-mail address doesn't have an associated user account. Are you sure you've registered?"]})
def test_cleaned_data(self):
# Regression test
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['email'], email)
def test_bug_5605(self):
# bug #5605, preserve the case of the user name (before the @ in the
# email address) when creating a user.
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
#tests that inactive user cannot
#receive password reset email
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertFalse(form.is_valid())
| bsd-3-clause |
croot/blacksmith-2 | expansions/config/insc.py | 3 | 1347 | # coding: utf-8
if DefLANG in ("RU", "UA"):
AnsBase_temp = tuple([line.decode("utf-8") for line in (
"Изменённые пункты: %s", # 0
"Очевидно параметры неверны.", # 1
"Настройки:\n", # 2
"Конфиг пуст.", # 3
"Вниание! Текущий jid сейчас удаляется, сейчас я зайду с нового.", # 4
"смена jid'а", # 5
"Теперь '%s' - мой основной JID.", # 6
"Нельзя! Итак подключен всего один клиент.", # 7
"Система не может выделить ресурсы на ещё один клиент.", # 8
"Не коннектится.", # 9
"Этот jid уже есть в списках.", # 10
"«%s» нет в списке клиентов.", # 11
"«%s» сейчас оффлайн." # 12
)])
else:
AnsBase_temp = (
"Changed options: %s", # 0
"Parameters are incorrect.", # 1
"Config:\n", # 2
"Config is empty.", # 3
"Attention! Current jid deleting now. I'll rejoin with new.", # 4
"jid change", # 5
"'%s' - my main JID now.", # 6
"Forbidden!", # 7
"The system can not allocate resources to another client.", # 8
"No connection.", # 9
"This jid is already in the list.", # 10
"'%s' not in clients-list.", # 11
"'%s' is offline." # 12
) | apache-2.0 |
Zord13appdesa/python-for-android | python3-alpha/extra_modules/pyxmpp2/ext/legacyauth.py | 46 | 18162 | #
# (C) Copyright 2003-2010 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""XMPP stream support with fallback to legacy non-SASL Jabber authentication.
Normative reference:
- `JEP 78 <http://www.jabber.org/jeps/jep-0078.html>`__
"""
__docformat__="restructuredtext en"
raise ImportError("{0} is not yet rewritten for PyXMPP2".format(__name__))
import hashlib
import logging
from ..iq import Iq
from ..utils import to_utf8,from_utf8
from ..jid import JID
from ..clientstream import ClientStream
from .register import Register
from ..exceptions import ClientStreamError, LegacyAuthenticationError, RegistrationError
class LegacyClientStream(ClientStream):
"""Handles Jabber (both XMPP and legacy protocol) client connection stream.
Both client and server side of the connection is supported. This class handles
client SASL and legacy authentication, authorisation and XMPP resource binding.
"""
def __init__(self, jid, password = None, server = None, port = 5222,
auth_methods = ("sasl:DIGEST-MD5", "digest"),
tls_settings = None, keepalive = 0, owner = None):
"""Initialize a LegacyClientStream object.
:Parameters:
- `jid`: local JID.
- `password`: user's password.
- `server`: server to use. If not given then address will be derived form the JID.
- `port`: port number to use. If not given then address will be derived form the JID.
- `auth_methods`: sallowed authentication methods. SASL authentication mechanisms
in the list should be prefixed with "sasl:" string.
- `tls_settings`: settings for StartTLS -- `TLSSettings` instance.
- `keepalive`: keepalive output interval. 0 to disable.
- `owner`: `Client`, `Component` or similar object "owning" this stream.
:Types:
- `jid`: `pyxmpp.JID`
- `password`: `str`
- `server`: `str`
- `port`: `int`
- `auth_methods`: sequence of `str`
- `tls_settings`: `pyxmpp.TLSSettings`
- `keepalive`: `int`
"""
(self.authenticated, self.available_auth_methods, self.auth_stanza,
self.peer_authenticated, self.auth_method_used,
self.registration_callback, self.registration_form, self.__register) = (None,) * 8
ClientStream.__init__(self, jid, password, server, port,
auth_methods, tls_settings, keepalive, owner)
self.__logger=logging.getLogger("pyxmpp2.jabber.LegacyClientStream")
def _reset(self):
"""Reset the `LegacyClientStream` object state, making the object ready
to handle new connections."""
ClientStream._reset(self)
self.available_auth_methods = None
self.auth_stanza = None
self.registration_callback = None
def _post_connect(self):
"""Initialize authentication when the connection is established
and we are the initiator."""
if not self.initiator:
if "plain" in self.auth_methods or "digest" in self.auth_methods:
self.set_iq_get_handler("query","jabber:iq:auth",
self.auth_in_stage1)
self.set_iq_set_handler("query","jabber:iq:auth",
self.auth_in_stage2)
elif self.registration_callback:
iq = Iq(stanza_type = "get")
iq.set_content(Register())
self.set_response_handlers(iq, self.registration_form_received, self.registration_error)
self.send(iq)
return
ClientStream._post_connect(self)
def _post_auth(self):
"""Unregister legacy authentication handlers after successfull
authentication."""
ClientStream._post_auth(self)
if not self.initiator:
self.unset_iq_get_handler("query","jabber:iq:auth")
self.unset_iq_set_handler("query","jabber:iq:auth")
def _try_auth(self):
"""Try to authenticate using the first one of allowed authentication
methods left.
[client only]"""
if self.authenticated:
self.__logger.debug("try_auth: already authenticated")
return
self.__logger.debug("trying auth: %r" % (self._auth_methods_left,))
if not self._auth_methods_left:
raise LegacyAuthenticationError("No allowed authentication methods available")
method=self._auth_methods_left[0]
if method.startswith("sasl:"):
return ClientStream._try_auth(self)
elif method not in ("plain","digest"):
self._auth_methods_left.pop(0)
self.__logger.debug("Skipping unknown auth method: %s" % method)
return self._try_auth()
elif self.available_auth_methods is not None:
if method in self.available_auth_methods:
self._auth_methods_left.pop(0)
self.auth_method_used=method
if method=="digest":
self._digest_auth_stage2(self.auth_stanza)
else:
self._plain_auth_stage2(self.auth_stanza)
self.auth_stanza=None
return
else:
self.__logger.debug("Skipping unavailable auth method: %s" % method)
else:
self._auth_stage1()
def auth_in_stage1(self,stanza):
"""Handle the first stage (<iq type='get'/>) of legacy ("plain" or
"digest") authentication.
[server only]"""
self.lock.acquire()
try:
if "plain" not in self.auth_methods and "digest" not in self.auth_methods:
iq=stanza.make_error_response("not-allowed")
self.send(iq)
return
iq=stanza.make_result_response()
q=iq.new_query("jabber:iq:auth")
q.newChild(None,"username",None)
q.newChild(None,"resource",None)
if "plain" in self.auth_methods:
q.newChild(None,"password",None)
if "digest" in self.auth_methods:
q.newChild(None,"digest",None)
self.send(iq)
iq.free()
finally:
self.lock.release()
def auth_in_stage2(self,stanza):
"""Handle the second stage (<iq type='set'/>) of legacy ("plain" or
"digest") authentication.
[server only]"""
self.lock.acquire()
try:
if "plain" not in self.auth_methods and "digest" not in self.auth_methods:
iq=stanza.make_error_response("not-allowed")
self.send(iq)
return
username=stanza.xpath_eval("a:query/a:username",{"a":"jabber:iq:auth"})
if username:
username=from_utf8(username[0].getContent())
resource=stanza.xpath_eval("a:query/a:resource",{"a":"jabber:iq:auth"})
if resource:
resource=from_utf8(resource[0].getContent())
if not username or not resource:
self.__logger.debug("No username or resource found in auth request")
iq=stanza.make_error_response("bad-request")
self.send(iq)
return
if stanza.xpath_eval("a:query/a:password",{"a":"jabber:iq:auth"}):
if "plain" not in self.auth_methods:
iq=stanza.make_error_response("not-allowed")
self.send(iq)
return
else:
return self._plain_auth_in_stage2(username,resource,stanza)
if stanza.xpath_eval("a:query/a:digest",{"a":"jabber:iq:auth"}):
if "plain" not in self.auth_methods:
iq=stanza.make_error_response("not-allowed")
self.send(iq)
return
else:
return self._digest_auth_in_stage2(username,resource,stanza)
finally:
self.lock.release()
def _auth_stage1(self):
"""Do the first stage (<iq type='get'/>) of legacy ("plain" or
"digest") authentication.
[client only]"""
iq=Iq(stanza_type="get")
q=iq.new_query("jabber:iq:auth")
q.newTextChild(None,"username",to_utf8(self.my_jid.node))
q.newTextChild(None,"resource",to_utf8(self.my_jid.resource))
self.send(iq)
self.set_response_handlers(iq,self.auth_stage2,self.auth_error,
self.auth_timeout,timeout=60)
iq.free()
def auth_timeout(self):
"""Handle legacy authentication timeout.
[client only]"""
self.lock.acquire()
try:
self.__logger.debug("Timeout while waiting for jabber:iq:auth result")
if self._auth_methods_left:
self._auth_methods_left.pop(0)
finally:
self.lock.release()
def auth_error(self,stanza):
"""Handle legacy authentication error.
[client only]"""
self.lock.acquire()
try:
err=stanza.get_error()
ae=err.xpath_eval("e:*",{"e":"jabber:iq:auth:error"})
if ae:
ae=ae[0].name
else:
ae=err.get_condition().name
raise LegacyAuthenticationError("Authentication error condition: %s"
% (ae,))
finally:
self.lock.release()
def auth_stage2(self,stanza):
"""Handle the first stage authentication response (result of the <iq
type="get"/>).
[client only]"""
self.lock.acquire()
try:
self.__logger.debug("Procesing auth response...")
self.available_auth_methods=[]
if (stanza.xpath_eval("a:query/a:digest",{"a":"jabber:iq:auth"}) and self.stream_id):
self.available_auth_methods.append("digest")
if (stanza.xpath_eval("a:query/a:password",{"a":"jabber:iq:auth"})):
self.available_auth_methods.append("plain")
self.auth_stanza=stanza.copy()
self._try_auth()
finally:
self.lock.release()
def _plain_auth_stage2(self, _unused):
"""Do the second stage (<iq type='set'/>) of legacy "plain"
authentication.
[client only]"""
iq=Iq(stanza_type="set")
q=iq.new_query("jabber:iq:auth")
q.newTextChild(None,"username",to_utf8(self.my_jid.node))
q.newTextChild(None,"resource",to_utf8(self.my_jid.resource))
q.newTextChild(None,"password",to_utf8(self.password))
self.send(iq)
self.set_response_handlers(iq,self.auth_finish,self.auth_error)
iq.free()
def _plain_auth_in_stage2(self, username, _unused, stanza):
"""Handle the second stage (<iq type='set'/>) of legacy "plain"
authentication.
[server only]"""
password=stanza.xpath_eval("a:query/a:password",{"a":"jabber:iq:auth"})
if password:
password=from_utf8(password[0].getContent())
if not password:
self.__logger.debug("No password found in plain auth request")
iq=stanza.make_error_response("bad-request")
self.send(iq)
return
if self.check_password(username,password):
iq=stanza.make_result_response()
self.send(iq)
self.peer_authenticated=True
self.auth_method_used="plain"
self.state_change("authorized",self.peer)
self._post_auth()
else:
self.__logger.debug("Plain auth failed")
iq=stanza.make_error_response("bad-request")
e=iq.get_error()
e.add_custom_condition('jabber:iq:auth:error',"user-unauthorized")
self.send(iq)
def _digest_auth_stage2(self, _unused):
"""Do the second stage (<iq type='set'/>) of legacy "digest"
authentication.
[client only]"""
iq=Iq(stanza_type="set")
q=iq.new_query("jabber:iq:auth")
q.newTextChild(None,"username",to_utf8(self.my_jid.node))
q.newTextChild(None,"resource",to_utf8(self.my_jid.resource))
digest = hashlib.sha1(to_utf8(self.stream_id)+to_utf8(self.password)).hexdigest()
q.newTextChild(None,"digest",digest)
self.send(iq)
self.set_response_handlers(iq,self.auth_finish,self.auth_error)
iq.free()
def _digest_auth_in_stage2(self, username, _unused, stanza):
"""Handle the second stage (<iq type='set'/>) of legacy "digest"
authentication.
[server only]"""
digest=stanza.xpath_eval("a:query/a:digest",{"a":"jabber:iq:auth"})
if digest:
digest=digest[0].getContent()
if not digest:
self.__logger.debug("No digest found in digest auth request")
iq=stanza.make_error_response("bad-request")
self.send(iq)
return
password,pwformat=self.get_password(username)
if not password or pwformat!="plain":
iq=stanza.make_error_response("bad-request")
e=iq.get_error()
e.add_custom_condition('jabber:iq:auth:error',"user-unauthorized")
self.send(iq)
return
mydigest = hashlib.sha1(to_utf8(self.stream_id)+to_utf8(password)).hexdigest()
if mydigest==digest:
iq=stanza.make_result_response()
self.send(iq)
self.peer_authenticated=True
self.auth_method_used="digest"
self.state_change("authorized",self.peer)
self._post_auth()
else:
self.__logger.debug("Digest auth failed: %r != %r" % (digest,mydigest))
iq=stanza.make_error_response("bad-request")
e=iq.get_error()
e.add_custom_condition('jabber:iq:auth:error',"user-unauthorized")
self.send(iq)
def auth_finish(self, _unused):
"""Handle success of the legacy authentication."""
self.lock.acquire()
try:
self.__logger.debug("Authenticated")
self.authenticated=True
self.state_change("authorized",self.my_jid)
self._post_auth()
finally:
self.lock.release()
def registration_error(self, stanza):
"""Handle in-band registration error.
[client only]
:Parameters:
- `stanza`: the error stanza received or `None` on timeout.
:Types:
- `stanza`: `pyxmpp.stanza.Stanza`"""
self.lock.acquire()
try:
err=stanza.get_error()
ae=err.xpath_eval("e:*",{"e":"jabber:iq:auth:error"})
if ae:
ae=ae[0].name
else:
ae=err.get_condition().name
raise RegistrationError("Authentication error condition: %s" % (ae,))
finally:
self.lock.release()
def registration_form_received(self, stanza):
"""Handle registration form received.
[client only]
Call self.registration_callback with the registration form received
as the argument. Use the value returned by the callback will be a
filled-in form.
:Parameters:
- `stanza`: the stanza received.
:Types:
- `stanza`: `pyxmpp.iq.Iq`"""
self.lock.acquire()
try:
self.__register = Register(stanza.get_query())
self.registration_callback(stanza, self.__register.get_form())
finally:
self.lock.release()
def submit_registration_form(self, form):
"""Submit a registration form.
[client only]
:Parameters:
- `form`: the filled-in form. When form is `None` or its type is
"cancel" the registration is to be canceled.
:Types:
- `form`: `pyxmpp.jabber.dataforms.Form`"""
self.lock.acquire()
try:
if form and form.type!="cancel":
self.registration_form = form
iq = Iq(stanza_type = "set")
iq.set_content(self.__register.submit_form(form))
self.set_response_handlers(iq, self.registration_success, self.registration_error)
self.send(iq)
else:
self.__register = None
finally:
self.lock.release()
def registration_success(self, stanza):
"""Handle registration success.
[client only]
Clean up registration stuff, change state to "registered" and initialize
authentication.
:Parameters:
- `stanza`: the stanza received.
:Types:
- `stanza`: `pyxmpp.iq.Iq`"""
_unused = stanza
self.lock.acquire()
try:
self.state_change("registered", self.registration_form)
if ('FORM_TYPE' in self.registration_form
and self.registration_form['FORM_TYPE'].value == 'jabber:iq:register'):
if 'username' in self.registration_form:
self.my_jid = JID(self.registration_form['username'].value,
self.my_jid.domain, self.my_jid.resource)
if 'password' in self.registration_form:
self.password = self.registration_form['password'].value
self.registration_callback = None
self._post_connect()
finally:
self.lock.release()
# vi: sts=4 et sw=4
| apache-2.0 |
gbaty/shiboken2 | tests/samplebinding/copy_test.py | 6 | 2212 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the Shiboken Python Bindings Generator project.
#
# Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
#
# Contact: PySide team <contact@pyside.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# version 2.1 as published by the Free Software Foundation. Please
# review the following information to ensure the GNU Lesser General
# Public License version 2.1 requirements will be met:
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
# #
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
'''Test cases for deep copy of objects'''
import copy
import unittest
try:
import cPickle as pickle
except ImportError:
import pickle
from sample import Point
class SimpleCopy(unittest.TestCase):
'''Simple copy of objects'''
def testCopy(self):
point = Point(0.1, 2.4)
new_point = copy.copy(point)
self.assert_(point is not new_point)
self.assertEqual(point, new_point)
class DeepCopy(unittest.TestCase):
'''Deep copy with shiboken objects'''
def testDeepCopy(self):
'''Deep copy of value types'''
point = Point(3.1, 4.2)
new_point = copy.deepcopy([point])[0]
self.assert_(point is not new_point)
self.assertEqual(point, new_point)
class PicklingTest(unittest.TestCase):
'''Support pickling'''
def testSimple(self):
'''Simple pickling and unpickling'''
point = Point(10.2, 43.5)
data = pickle.dumps(point)
new_point = pickle.loads(data)
self.assertEqual(point, new_point)
self.assert_(point is not new_point)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
akintoey/django | tests/gis_tests/utils.py | 327 | 1377 | from unittest import skip
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
def no_backend(test_func, backend):
"Use this decorator to disable test on specified backend."
if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1] == backend:
@skip("This test is skipped on '%s' backend" % backend)
def inner():
pass
return inner
else:
return test_func
# Decorators to disable entire test functions for specific
# spatial backends.
def no_oracle(func):
return no_backend(func, 'oracle')
# Shortcut booleans to omit only portions of tests.
_default_db = settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1]
oracle = _default_db == 'oracle'
postgis = _default_db == 'postgis'
mysql = _default_db == 'mysql'
spatialite = _default_db == 'spatialite'
# MySQL spatial indices can't handle NULL geometries.
gisfield_may_be_null = not mysql
if oracle and 'gis' in settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE']:
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys as SpatialRefSys
elif postgis:
from django.contrib.gis.db.backends.postgis.models import PostGISSpatialRefSys as SpatialRefSys
elif spatialite:
from django.contrib.gis.db.backends.spatialite.models import SpatialiteSpatialRefSys as SpatialRefSys
else:
SpatialRefSys = None
| bsd-3-clause |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/estimator/estimator_test.py | 9 | 58131 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import tempfile
import numpy as np
import six
from google.protobuf import text_format
from tensorflow.python.client import session
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.export import export_output
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import layers
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_state_pb2
from tensorflow.python.training import saver
from tensorflow.python.training import saver_test_utils
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.util import compat
_TMP_DIR = '/tmp'
_ANOTHER_TMP_DIR = '/another_tmp'
def dummy_model_fn(features, labels, params):
_, _, _ = features, labels, params
class EstimatorInheritanceConstraintTest(test.TestCase):
"""Tests that sub classes cannot override methods of Estimator."""
def test_override_a_method(self):
class _Estimator(estimator.Estimator):
def __init__(self):
super(_Estimator, self).__init__(model_fn=dummy_model_fn)
def predict(self, input_fn, predict_keys=None, hooks=None):
pass
with self.assertRaisesRegexp(
ValueError, 'cannot override members of Estimator.*predict'):
_Estimator()
def test_override_a_method_with_tricks(self):
class _Estimator(estimator.Estimator):
def __init__(self):
super(_Estimator, self).__init__(model_fn=dummy_model_fn)
def _assert_members_are_not_overridden(self):
pass # HAHA! I tricked you!
def predict(self, input_fn, predict_keys=None, hooks=None):
pass
with self.assertRaisesRegexp(
ValueError, 'cannot override members of Estimator.*predict'):
_Estimator()
def test_extension_of_api_is_ok(self):
class _Estimator(estimator.Estimator):
def __init__(self):
super(_Estimator, self).__init__(model_fn=dummy_model_fn)
def predict_proba(self, input_fn, predict_keys=None, hooks=None):
pass
_Estimator()
class EstimatorConstructorTest(test.TestCase):
def test_config_must_be_a_run_config(self):
with self.assertRaisesRegexp(ValueError, 'an instance of RunConfig'):
estimator.Estimator(model_fn=None, config='NotARunConfig')
def test_model_fn_must_be_provided(self):
with self.assertRaisesRegexp(ValueError, 'model_fn.* must be'):
estimator.Estimator(model_fn=None)
def test_property_accessors(self):
def model_fn(features, labels, params):
_, _, _ = features, labels, params
class FakeConfig(run_config.RunConfig):
pass
params = {'hidden_layers': [3, 4]}
est = estimator.Estimator(
model_fn=model_fn, model_dir='bla', config=FakeConfig(), params=params)
self.assertTrue(isinstance(est.config, FakeConfig))
self.assertEqual(params, est.params)
self.assertEqual('bla', est.model_dir)
def test_default_config(self):
def model_fn(features, labels):
_, _ = features, labels
est = estimator.Estimator(model_fn=model_fn)
self.assertTrue(isinstance(est.config, run_config.RunConfig))
def test_default_model_dir(self):
def model_fn(features, labels):
_, _ = features, labels
with test.mock.patch.object(tempfile, 'mkdtemp', return_value=_TMP_DIR):
est = estimator.Estimator(model_fn=model_fn)
self.assertEqual(_TMP_DIR, est.config.model_dir)
self.assertEqual(_TMP_DIR, est.model_dir)
def test_model_dir_in_constructor(self):
def model_fn(features, labels):
_, _ = features, labels
est = estimator.Estimator(model_fn=model_fn, model_dir=_TMP_DIR)
self.assertEqual(_TMP_DIR, est.config.model_dir)
self.assertEqual(_TMP_DIR, est.model_dir)
def test_model_dir_in_run_config(self):
class FakeConfig(run_config.RunConfig):
@property
def model_dir(self):
return _TMP_DIR
def model_fn(features, labels):
_, _ = features, labels
est = estimator.Estimator(model_fn=model_fn, config=FakeConfig())
self.assertEqual(_TMP_DIR, est.config.model_dir)
self.assertEqual(_TMP_DIR, est.model_dir)
def test_same_model_dir_in_constructor_and_run_config(self):
class FakeConfig(run_config.RunConfig):
@property
def model_dir(self):
return _TMP_DIR
def model_fn(features, labels):
_, _ = features, labels
est = estimator.Estimator(
model_fn=model_fn, config=FakeConfig(), model_dir=_TMP_DIR)
self.assertEqual(_TMP_DIR, est.config.model_dir)
self.assertEqual(_TMP_DIR, est.model_dir)
def test_different_model_dir_in_constructor_and_run_config(self):
class FakeConfig(run_config.RunConfig):
@property
def model_dir(self):
return _TMP_DIR
def model_fn(features, labels):
_, _ = features, labels
with self.assertRaisesRegexp(
ValueError,
'model_dir are set both in constructor and RunConfig, but '
'with different values'):
estimator.Estimator(
model_fn=model_fn, config=FakeConfig(), model_dir=_ANOTHER_TMP_DIR)
def test_model_fn_args_must_include_features(self):
def model_fn(x, labels):
_, _ = x, labels
with self.assertRaisesRegexp(ValueError, 'features'):
estimator.Estimator(model_fn=model_fn)
def test_model_fn_args_must_include_labels(self):
def model_fn(features, y):
_, _ = features, y
with self.assertRaisesRegexp(ValueError, 'labels'):
estimator.Estimator(model_fn=model_fn)
def test_if_params_provided_then_model_fn_should_accept_it(self):
def model_fn(features, labels):
_, _ = features, labels
estimator.Estimator(model_fn=model_fn)
with self.assertRaisesRegexp(ValueError, 'params'):
estimator.Estimator(model_fn=model_fn, params={'hidden_layers': 4})
def test_not_known_model_fn_args(self):
def model_fn(features, labels, something):
_, _, _ = features, labels, something
with self.assertRaisesRegexp(ValueError, 'something'):
estimator.Estimator(model_fn=model_fn)
def test_not_known_model_fn_args_handled_by_lambda(self):
def model_fn(features, labels, something):
_, _, _ = features, labels, something
new_model_fn = lambda features, labels: model_fn( # pylint: disable=g-long-lambda
features, labels, 'something')
estimator.Estimator(model_fn=new_model_fn)
def test_if_model_fn_is_a_member_function_of_a_class(self):
class ModelFnClass(object):
def __init__(self):
estimator.Estimator(model_fn=self.model_fn)
def model_fn(self, features, labels, mode):
_, _, _ = features, labels, mode
ModelFnClass()
def dummy_input_fn():
return ({'x': constant_op.constant([[1], [1]])},
constant_op.constant([[1], [1]]))
def model_fn_global_step_incrementer(features, labels, mode):
_, _ = features, labels
global_step = training.get_global_step()
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(1.),
train_op=state_ops.assign_add(global_step, 1))
def _estimator_spec(
expected_features, expected_labels, actual_features, actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.))
def _make_input_fn(features, labels):
def _input_fn():
return {
k: constant_op.constant(v)
for k, v in six.iteritems(features)
}, constant_op.constant(labels)
return _input_fn
class EstimatorTrainTest(test.TestCase):
def test_minimal_model_fn_args(self):
expected_features = {'x': 42., 'y': 43.}
expected_labels = 44.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
def _model_fn(features, labels):
model_fn_call_count[0] += 1
self.assertItemsEqual(expected_features.keys(), features.keys())
return _estimator_spec(
expected_features, expected_labels, features, labels,
model_fn_lib.ModeKeys.TRAIN)
with self.assertRaisesRegexp(ValueError, 'does not include params'):
estimator.Estimator(model_fn=_model_fn, params={'a': 'b'})
est = estimator.Estimator(model_fn=_model_fn, config=run_config.RunConfig())
self.assertEqual(0, model_fn_call_count[0])
est.train(
input_fn=_make_input_fn(expected_features, expected_labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def test_all_model_fn_args(self):
expected_features = {'x': 42., 'y': 43.}
expected_labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# Note that args are all passed by keyword, so can be in any order.
def _model_fn(mode, params, features, labels, config):
model_fn_call_count[0] += 1
self.assertItemsEqual(expected_features.keys(), features.keys())
self.assertEqual(model_fn_lib.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _estimator_spec(
expected_features, expected_labels, features, labels, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.train(
input_fn=_make_input_fn(expected_features, expected_labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def test_partial_model_fn_args(self):
expected_features = {'x': 42., 'y': 43.}
expected_labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
def _model_fn(features, labels, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
self.assertEqual(expected_foo, foo)
self.assertEqual(expected_bar, bar)
self.assertItemsEqual(expected_features.keys(), features.keys())
self.assertEqual(model_fn_lib.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _estimator_spec(
expected_features, expected_labels, features, labels, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn, params=expected_params,
config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.train(
input_fn=_make_input_fn(expected_features, expected_labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def test_model_fn_must_return_estimator_spec(self):
def model_fn(features, labels):
_, _ = features, labels
return 'NotGoodNotGood'
est = estimator.Estimator(model_fn=model_fn)
with self.assertRaisesRegexp(ValueError, 'EstimatorSpec'):
est.train(dummy_input_fn, steps=1)
def test_run_train_op_and_saves_at_the_end(self):
est = estimator.Estimator(model_fn=model_fn_global_step_incrementer)
est.train(dummy_input_fn, steps=5)
self.assertEqual(
5, estimator._load_global_step_from_checkpoint_dir(est.model_dir))
def test_steps_and_saves_reloads(self):
est = estimator.Estimator(model_fn=model_fn_global_step_incrementer)
est.train(dummy_input_fn, steps=5)
self.assertEqual(
5, estimator._load_global_step_from_checkpoint_dir(est.model_dir))
est.train(dummy_input_fn, steps=5)
self.assertEqual(
10, estimator._load_global_step_from_checkpoint_dir(est.model_dir))
def test_max_step(self):
est = estimator.Estimator(model_fn=model_fn_global_step_incrementer)
est.train(dummy_input_fn, max_steps=5)
self.assertEqual(
5, estimator._load_global_step_from_checkpoint_dir(est.model_dir))
est.train(dummy_input_fn, max_steps=5)
self.assertEqual(
5, estimator._load_global_step_from_checkpoint_dir(est.model_dir))
def test_checkpoint_contains_relative_paths(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_dir=tmpdir,
model_fn=model_fn_global_step_incrementer)
est.train(dummy_input_fn, steps=5)
checkpoint_file_content = file_io.read_file_to_string(
os.path.join(tmpdir, 'checkpoint'))
ckpt = checkpoint_state_pb2.CheckpointState()
text_format.Merge(checkpoint_file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
self.assertAllEqual(
['model.ckpt-1', 'model.ckpt-5'], ckpt.all_model_checkpoint_paths)
def test_train_save_copy_reload(self):
tmpdir = tempfile.mkdtemp()
model_dir1 = os.path.join(tmpdir, 'model_dir1')
est1 = estimator.Estimator(
model_dir=model_dir1,
model_fn=model_fn_global_step_incrementer)
est1.train(dummy_input_fn, steps=5)
# We have to clear the cache before we can rename the directory,
# otherwise open file handles will prevent the delete on Windows.
writer_cache.FileWriterCache.clear()
model_dir2 = os.path.join(tmpdir, 'model_dir2')
os.renames(model_dir1, model_dir2)
est2 = estimator.Estimator(
model_dir=model_dir2,
model_fn=model_fn_global_step_incrementer)
self.assertEqual(
5, estimator._load_global_step_from_checkpoint_dir(est2.model_dir))
est2.train(dummy_input_fn, steps=5)
self.assertEqual(
10, estimator._load_global_step_from_checkpoint_dir(est2.model_dir))
def test_steps0_raises_error(self):
est = estimator.Estimator(
model_fn=_model_fn_with_eval_metric_ops)
with self.assertRaisesRegexp(ValueError, 'Must specify steps > 0'):
est.train(dummy_input_fn, steps=0)
def test_steps_negative_raises_error(self):
est = estimator.Estimator(
model_fn=_model_fn_with_eval_metric_ops)
with self.assertRaisesRegexp(ValueError, 'Must specify steps > 0'):
est.train(dummy_input_fn, steps=-1)
def test_max_steps0_raises_error(self):
est = estimator.Estimator(
model_fn=_model_fn_with_eval_metric_ops)
with self.assertRaisesRegexp(ValueError, 'Must specify max_steps > 0'):
est.train(dummy_input_fn, max_steps=0)
def test_max_steps_negative_raises_error(self):
est = estimator.Estimator(
model_fn=_model_fn_with_eval_metric_ops)
with self.assertRaisesRegexp(ValueError, 'Must specify max_steps > 0'):
est.train(dummy_input_fn, max_steps=-1)
def test_scaffold_is_used(self):
self.is_init_fn_called = False
def _init_fn(scaffold, sess):
_, _ = scaffold, sess
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=training.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.train(dummy_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def test_hooks_should_be_session_run_hook(self):
est = estimator.Estimator(model_fn=model_fn_global_step_incrementer)
with self.assertRaisesRegexp(TypeError, 'must be a SessionRunHook'):
est.train(dummy_input_fn, steps=1, hooks=['NotAHook'])
def test_training_hooks_are_used(self):
chief_hook = test.mock.MagicMock(
wraps=training.SessionRunHook(), spec=training.SessionRunHook)
hook = test.mock.MagicMock(
wraps=training.SessionRunHook(), spec=training.SessionRunHook)
def _model_fn_hooks(features, labels, mode):
_, _ = features, labels
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
training_chief_hooks=[chief_hook],
training_hooks=[hook])
est = estimator.Estimator(model_fn=_model_fn_hooks)
self.assertFalse(chief_hook.begin.called)
self.assertFalse(hook.begin.called)
est.train(dummy_input_fn, steps=1)
self.assertTrue(chief_hook.begin.called)
self.assertTrue(hook.begin.called)
def test_chief_only_hook_should_not_be_called_on_non_chief(self):
chief_hook = test.mock.MagicMock(
wraps=training.SessionRunHook(), spec=training.SessionRunHook)
hook = test.mock.MagicMock(
wraps=training.SessionRunHook(), spec=training.SessionRunHook)
def _model_fn_hooks(features, labels, mode):
_, _ = features, labels
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
training_chief_hooks=[chief_hook],
training_hooks=[hook])
class NonChiefRunConfig(run_config.RunConfig):
@property
def is_chief(self): # pylint: disable=g-wrong-blank-lines
return False
# Mocking the SessionManager.wait_for_session, so that worker doesn't wait
# for chief.
def get_initialized_session(*args, **kwargs):
# Session doesn't take 'max_wait_secs' argument.
kwargs.pop('max_wait_secs', None)
scaffold = training.Scaffold().finalize()
sess = session.Session(*args, **kwargs)
sess.run(scaffold.init_op)
return sess
with test.mock.patch.object(
training.SessionManager,
'wait_for_session',
side_effect=get_initialized_session):
est = estimator.Estimator(
model_fn=_model_fn_hooks, config=NonChiefRunConfig())
self.assertFalse(chief_hook.begin.called)
self.assertFalse(hook.begin.called)
est.train(dummy_input_fn, steps=1)
self.assertFalse(chief_hook.begin.called)
self.assertTrue(hook.begin.called)
def test_features_labels_mode(self):
given_features = {'test-features': [[1], [1]]}
given_labels = {'test-labels': [[1], [1]]}
def _input_fn():
return given_features, given_labels
def _model_fn(features, labels, mode):
self.features, self.labels, self.mode = features, labels, mode
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=constant_op.constant([[0.]]))
est = estimator.Estimator(model_fn=_model_fn)
est.train(_input_fn, steps=1)
self.assertEqual(given_features, self.features)
self.assertEqual(given_labels, self.labels)
self.assertEqual(model_fn_lib.ModeKeys.TRAIN, self.mode)
def test_graph_initialization_global_step_and_random_seed(self):
expected_random_seed = run_config.RunConfig().tf_random_seed
def _model_fn(features, labels, mode):
_, _, _ = features, labels, mode
self.assertIsNotNone(training.get_global_step())
self.assertEqual(expected_random_seed, ops.get_default_graph().seed)
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=constant_op.constant([[0.]]))
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
def _model_fn_with_eval_metric_ops(features, labels, mode, params):
_, _ = features, labels
metric_name = params.get('metric_name') or 'metric'
metric_value = params.get('metric_value') or 2.
global_step = training.get_global_step()
loss = constant_op.constant(1.)
metric_update_op = loss.op
metric_tensor = control_flow_ops.with_dependencies(
[metric_update_op], constant_op.constant(metric_value))
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
predictions={'predictions': constant_op.constant(1.)},
train_op=state_ops.assign_add(global_step, 1),
eval_metric_ops={metric_name: (metric_tensor, metric_update_op)})
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Hooks that counts the number of times it is called."""
def __init__(self):
self._steps = 0
def before_run(self, run_context):
del run_context
self._steps += 1
@property
def steps(self):
return self._steps
class EstimatorEvaluateTest(test.TestCase):
def test_model_fn_must_return_estimator_spec(self):
def _model_fn(features, labels, mode):
_, _ = features, labels
if mode == model_fn_lib.ModeKeys.EVAL:
return 'NotGoodNotGood'
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(1.),
train_op=control_flow_ops.no_op())
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
with self.assertRaisesRegexp(
ValueError, 'model_fn should return an EstimatorSpec'):
est.evaluate(dummy_input_fn, steps=1)
def test_no_trained_model(self):
est = estimator.Estimator(model_fn=_model_fn_with_eval_metric_ops)
with self.assertRaisesRegexp(
ValueError, 'Could not find trained model in model_dir'):
est.evaluate(dummy_input_fn, steps=1)
def test_scores(self):
est = estimator.Estimator(
model_fn=_model_fn_with_eval_metric_ops,
params={
'metric_name': 'metric',
'metric_value': 2.})
est.train(dummy_input_fn, steps=5)
scores = est.evaluate(dummy_input_fn, steps=1)
self.assertIn('metric', scores)
self.assertAlmostEqual(2., scores['metric'])
def test_tuple_metrics(self):
def _model_fn(features, labels, mode):
del features # unused
del labels
return model_fn_lib.EstimatorSpec(
mode,
train_op=control_flow_ops.no_op(),
loss=constant_op.constant(1.),
eval_metric_ops={
'nested_metric': (
((constant_op.constant(2.), constant_op.constant(1)),
constant_op.constant(3., dtype=dtypes.float64)),
control_flow_ops.no_op())})
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
evaluation = est.evaluate(dummy_input_fn, steps=1)
((two_float, one_integer), three_double) = evaluation['nested_metric']
self.assertAlmostEqual(2., two_float)
self.assertEqual(1, one_integer)
self.assertAlmostEqual(3., three_double)
def test_steps0_raises_error(self):
est = estimator.Estimator(
model_fn=_model_fn_with_eval_metric_ops)
est.train(dummy_input_fn, steps=5)
with self.assertRaisesRegexp(ValueError, 'Must specify steps > 0'):
est.evaluate(dummy_input_fn, steps=0)
def test_steps_negative_raises_error(self):
est = estimator.Estimator(
model_fn=_model_fn_with_eval_metric_ops)
est.train(dummy_input_fn, steps=5)
with self.assertRaisesRegexp(ValueError, 'Must specify steps > 0'):
est.evaluate(dummy_input_fn, steps=-1)
def test_global_step_metric_raises_error(self):
est = estimator.Estimator(
model_fn=_model_fn_with_eval_metric_ops,
params={
'metric_name': 'global_step',
'metric_value': 2.})
est.train(dummy_input_fn, steps=5)
with self.assertRaisesRegexp(
ValueError, 'Metric with name `global_step` is not allowed'):
est.evaluate(dummy_input_fn, steps=1)
def test_global_step_is_reported(self):
est = estimator.Estimator(
model_fn=_model_fn_with_eval_metric_ops,
params={'metric_name': 'metric',
'metric_value': 2.})
est.train(dummy_input_fn, steps=5)
scores = est.evaluate(dummy_input_fn, steps=1)
self.assertIn('global_step', scores)
self.assertEqual(5, scores['global_step'])
def test_loss_metric_is_reported(self):
def _model_fn_with_incremental_loss(features, labels, mode):
_, _ = features, labels
local_weight = variables.Variable(
0., name='local_weight', collections=[ops.GraphKeys.LOCAL_VARIABLES])
# Loss will be 2, 4, 6, ...
loss = 2 * state_ops.assign_add(local_weight, 1.)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
train_op=state_ops.assign_add(training.get_global_step(), 1))
est = estimator.Estimator(model_fn=_model_fn_with_incremental_loss)
est.train(dummy_input_fn, steps=1)
scores = est.evaluate(dummy_input_fn, steps=5)
self.assertIn(model_fn_lib.MetricKeys.LOSS, scores)
# Average loss will be (2 + 4 + 6 + 8 + 10)/5=6
self.assertAlmostEqual(6., scores[model_fn_lib.MetricKeys.LOSS])
def test_hooks_should_be_session_run_hook(self):
est = estimator.Estimator(model_fn=model_fn_global_step_incrementer)
est.train(dummy_input_fn, steps=1)
with self.assertRaisesRegexp(TypeError, 'must be a SessionRunHook'):
est.evaluate(dummy_input_fn, steps=5, hooks=['NotAHook'])
def test_hooks_are_used(self):
step_counter_hook = _StepCounterHook()
est = estimator.Estimator(model_fn=_model_fn_with_eval_metric_ops)
est.train(dummy_input_fn, steps=1)
est.evaluate(dummy_input_fn, steps=5, hooks=[step_counter_hook])
self.assertEqual(5, step_counter_hook.steps)
def test_evaluate_from_checkpoint(self):
params = {
'metric_name': 'metric',
'metric_value': 2.}
est1 = estimator.Estimator(
model_fn=_model_fn_with_eval_metric_ops,
params=params)
est1.train(dummy_input_fn, steps=5)
est2 = estimator.Estimator(
model_fn=_model_fn_with_eval_metric_ops,
params=params)
scores = est2.evaluate(
dummy_input_fn,
steps=1,
checkpoint_path=saver.latest_checkpoint(est1.model_dir))
self.assertEqual(5, scores['global_step'])
def test_scaffold_is_used(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables.Variable(1., name='weight')
real_saver = saver.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=training.Scaffold(saver=self.mock_saver))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.train(dummy_input_fn, steps=1)
est.evaluate(dummy_input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
def test_features_labels_mode(self):
given_features = {'test-features': [[1], [1]]}
given_labels = {'test-labels': [[1], [1]]}
def _input_fn():
return given_features, given_labels
def _model_fn(features, labels, mode):
self.features, self.labels, self.mode = features, labels, mode
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=constant_op.constant([[0.]]))
est = estimator.Estimator(model_fn=_model_fn)
est.train(_input_fn, steps=1)
est.evaluate(_input_fn, steps=1)
self.assertEqual(given_features, self.features)
self.assertEqual(given_labels, self.labels)
self.assertEqual(model_fn_lib.ModeKeys.EVAL, self.mode)
def test_graph_initialization_global_step_and_random_seed(self):
expected_random_seed = run_config.RunConfig().tf_random_seed
def _model_fn(features, labels, mode):
_, _, _ = features, labels, mode
self.assertIsNotNone(training.get_global_step())
self.assertEqual(expected_random_seed, ops.get_default_graph().seed)
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=constant_op.constant([[0.]]))
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
est.evaluate(dummy_input_fn, steps=1)
class EstimatorPredictTest(test.TestCase):
def test_no_trained_model_in_model_dir(self):
est = estimator.Estimator(model_fn=model_fn_global_step_incrementer)
with self.assertRaisesRegexp(ValueError,
'Could not find trained model in model_dir'):
next(est.predict(dummy_input_fn))
def test_no_trained_model_invalid_checkpoint_path(self):
est = estimator.Estimator(model_fn=model_fn_global_step_incrementer)
with self.assertRaises(ValueError):
next(
est.predict(
dummy_input_fn,
checkpoint_path=saver.latest_checkpoint('fakedir')))
def test_tensor_predictions(self):
def _model_fn(features, labels, mode):
_, _ = features, labels
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=constant_op.constant([[10.]]))
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
self.assertEqual(10., next(est.predict(dummy_input_fn)))
def test_warn_if_no_queue_runner(self):
def _model_fn(features, labels, mode):
_, _ = features, labels
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=constant_op.constant([[10.]]))
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
with test.mock.patch.object(logging, 'warning') as mock_log:
next(est.predict(dummy_input_fn))
self.assertRegexpMatches(
str(mock_log.call_args),
'Input graph does not contain a QueueRunner.')
def test_input_fn_can_return_just_features(self):
def _model_fn(features, labels, mode):
_, _ = features, labels
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=constant_op.constant([[10.]]))
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
def _only_features():
return {'x': constant_op.constant([[0.]])}
self.assertEqual([10.], next(est.predict(_only_features)))
def test_batch_size_mismatch(self):
def _model_fn(features, labels, mode):
_, _ = features, labels
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions={
'y1': constant_op.constant([[10.]]),
'y2': constant_op.constant([[12.], [13]])
})
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError,
'Batch length of predictions should be same'):
next(est.predict(dummy_input_fn))
def test_predict_keys_defined_for_tensor(self):
def _model_fn(features, labels, mode):
_, _ = features, labels
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=constant_op.constant([[10.]]))
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
with self.assertRaisesRegexp(
ValueError,
'predict_keys argument is not valid in case of non-dict predictions'):
next(est.predict(dummy_input_fn, predict_keys=['y']))
def test_predict_keys_does_not_exists(self):
def _model_fn(features, labels, mode):
_, _ = features, labels
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions={
'y1': constant_op.constant([[10.]]),
'y2': constant_op.constant([[12.]])
})
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError,
'Expected to run at least one output from'):
next(est.predict(dummy_input_fn, predict_keys=['y3']))
def test_return_given_predict_keys(self):
def _model_fn(features, labels, mode):
_, _ = features, labels
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions={
'y1': constant_op.constant([[10.]]),
'y2': constant_op.constant([[12.]])
})
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
results = next(est.predict(dummy_input_fn, predict_keys=['y1']))
self.assertIn('y1', results)
self.assertNotIn('y2', results)
def test_yield_rows_of_tensor(self):
def _model_fn(features, labels, mode):
_, _ = features, labels
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=constant_op.constant([[10.], [12.]]))
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
results = est.predict(dummy_input_fn)
self.assertEqual([10.], next(results))
self.assertEqual([12.], next(results))
def test_yield_rows_of_dict(self):
def _model_fn(features, labels, mode):
_, _ = features, labels
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions={
'y1': constant_op.constant([[10.], [12]]),
'y2': constant_op.constant([[0.], [2.]])
})
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
results = est.predict(dummy_input_fn)
self.assertDictEqual({'y1': [10.], 'y2': [0.]}, next(results))
self.assertDictEqual({'y1': [12.], 'y2': [2.]}, next(results))
def test_hooks_should_be_session_run_hook(self):
est = estimator.Estimator(model_fn=model_fn_global_step_incrementer)
est.train(dummy_input_fn, steps=1)
with self.assertRaisesRegexp(TypeError, 'must be a SessionRunHook'):
next(est.predict(dummy_input_fn, hooks=['NotAHook']))
def test_hooks_are_used(self):
def _model_fn(features, labels, mode):
_, _ = features, labels
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=constant_op.constant([[10.], [12.]]))
step_counter_hook = _StepCounterHook()
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
results = est.predict(dummy_input_fn, hooks=[step_counter_hook])
self.assertEqual(0, step_counter_hook.steps) # not called yet
next(results)
self.assertEqual(1, step_counter_hook.steps) # first call
next(results)
self.assertEqual(1, step_counter_hook.steps) # it's in same batch
next(results)
self.assertEqual(2, step_counter_hook.steps) # next batch
def test_predict_from_old_model_dir(self):
def _model_fn(features, labels, mode):
_, _ = features, labels
v = variables.Variable([[16.]], name='weight')
prediction = v * 2
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=prediction)
est1 = estimator.Estimator(model_fn=_model_fn)
est1.train(dummy_input_fn, steps=1)
est2 = estimator.Estimator(model_fn=_model_fn, model_dir=est1.model_dir)
self.assertEqual([32.], next(est2.predict(dummy_input_fn)))
def test_predict_from_checkpoint_path(self):
def _model_fn(features, labels, mode):
_, _ = features, labels
v = variables.Variable([[16.]], name='weight')
prediction = v * 2
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=prediction)
est1 = estimator.Estimator(model_fn=_model_fn)
est1.train(dummy_input_fn, steps=1)
est2 = estimator.Estimator(model_fn=_model_fn, model_dir=est1.model_dir)
self.assertEqual(
[32.],
next(
est2.predict(
dummy_input_fn,
checkpoint_path=saver.latest_checkpoint(est1.model_dir))))
def test_scaffold_is_used(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables.Variable(1., name='weight')
real_saver = saver.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=training.Scaffold(saver=self.mock_saver))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.train(dummy_input_fn, steps=1)
next(est.predict(dummy_input_fn))
self.assertTrue(self.mock_saver.restore.called)
def test_features_labels_mode(self):
given_features = {'test-features': [[1], [1]]}
given_labels = {'test-labels': [[1], [1]]}
def _input_fn():
return given_features, given_labels
def _model_fn(features, labels, mode):
self.features, self.labels, self.mode = features, labels, mode
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=constant_op.constant([[0.]]))
est = estimator.Estimator(model_fn=_model_fn)
est.train(_input_fn, steps=1)
next(est.predict(_input_fn))
self.assertEqual(given_features, self.features)
self.assertIsNone(self.labels)
self.assertEqual(model_fn_lib.ModeKeys.PREDICT, self.mode)
def test_graph_initialization_global_step_and_random_seed(self):
expected_random_seed = run_config.RunConfig().tf_random_seed
def _model_fn(features, labels, mode):
_, _, _ = features, labels, mode
self.assertIsNotNone(training.get_global_step())
self.assertEqual(expected_random_seed, ops.get_default_graph().seed)
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=constant_op.constant([[0.]]))
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
next(est.predict(dummy_input_fn))
def _model_fn_for_export_tests(features, labels, mode):
_, _ = features, labels
variables.Variable(1., name='weight')
scores = constant_op.constant([3.])
classes = constant_op.constant(['wumpus'])
return model_fn_lib.EstimatorSpec(
mode,
predictions=constant_op.constant(10.),
loss=constant_op.constant(1.),
train_op=constant_op.constant(2.),
export_outputs={
'test': export_output.ClassificationOutput(scores, classes)})
def _model_fn_with_saveables_for_export_tests(features, labels, mode):
_, _ = features, labels
table = saver_test_utils.CheckpointedOp(name='v2')
train_op = table.insert('k1', 30.0)
prediction = table.lookup('k1', 0.0)
return model_fn_lib.EstimatorSpec(
mode,
predictions=prediction,
loss=constant_op.constant(1.),
train_op=train_op,
export_outputs={
'test': export_output.PredictOutput({'prediction': prediction})})
_VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
_EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorExportTest(test.TestCase):
def test_export_savedmodel_proto_roundtrip(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=_model_fn_for_export_tests)
est.train(input_fn=dummy_input_fn, steps=1)
feature_spec = {'x': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'y': parsing_ops.VarLenFeature(dtype=dtypes.int64)}
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
# Perform the export.
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_receiver_fn)
# Check that all the files are in the right places.
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(gfile.Exists(os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(gfile.Exists(os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables'))))
self.assertTrue(gfile.Exists(os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(gfile.Exists(os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('weight' in graph_ops)
# Clean up.
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_saveables_proto_roundtrip(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_fn=_model_fn_with_saveables_for_export_tests)
est.train(input_fn=dummy_input_fn, steps=1)
feature_spec = {'x': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'y': parsing_ops.VarLenFeature(dtype=dtypes.int64)}
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
# Perform the export.
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_receiver_fn)
# Check that all the files are in the right places.
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(gfile.Exists(os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(gfile.Exists(os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables'))))
self.assertTrue(gfile.Exists(os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(gfile.Exists(os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('save/LookupTableImport' in graph_ops)
# Clean up.
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_assets(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=_model_fn_for_export_tests)
est.train(input_fn=dummy_input_fn, steps=1)
feature_spec = {'x': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'y': parsing_ops.VarLenFeature(dtype=dtypes.int64)}
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
# Create a fake asset.
vocab_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(_VOCAB_FILE_CONTENT)
vocab_file.close()
# hack in an op that uses the asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_receiver_with_asset_fn():
features, receiver_tensor = serving_input_receiver_fn()
filename = ops.convert_to_tensor(vocab_file_name,
dtypes.string,
name='asset_filepath')
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename)
features['bogus_filename'] = filename
return export.ServingInputReceiver(features, receiver_tensor)
# Perform the export.
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_receiver_with_asset_fn)
# Check that the asset files are in the right places.
expected_vocab_file_name = os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets/my_vocab_file'))
self.assertTrue(gfile.Exists(os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(gfile.Exists(expected_vocab_file_name))
self.assertEqual(
compat.as_bytes(_VOCAB_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_vocab_file_name).read()))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([vocab_file_name], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('asset_filepath' in graph_ops)
self.assertTrue('weight' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_extra_assets(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=_model_fn_for_export_tests)
est.train(input_fn=dummy_input_fn, steps=1)
feature_spec = {'x': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'y': parsing_ops.VarLenFeature(dtype=dtypes.int64)}
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
# Create a fake asset.
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(_EXTRA_FILE_CONTENT)
extra_file.close()
# Perform the export.
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base,
serving_input_receiver_fn,
assets_extra=assets_extra)
# Check that the asset files are in the right places.
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(gfile.Exists(os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(_EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_scaffold_is_used_for_saver(self):
tmpdir = tempfile.mkdtemp()
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables.Variable(1., name='weight')
real_saver = saver.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
scores = constant_op.constant([3.])
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=training.Scaffold(saver=self.mock_saver),
export_outputs={'test': export_output.ClassificationOutput(scores)})
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.train(dummy_input_fn, steps=1)
feature_spec = {'x': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'y': parsing_ops.VarLenFeature(dtype=dtypes.int64)}
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
# Perform the export.
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
est.export_savedmodel(export_dir_base, serving_input_receiver_fn)
self.assertTrue(self.mock_saver.restore.called)
def test_scaffold_is_used_for_local_init(self):
tmpdir = tempfile.mkdtemp()
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
my_int = variables.Variable(1, name='my_int',
collections=[ops.GraphKeys.LOCAL_VARIABLES])
scores = constant_op.constant([3.])
with ops.control_dependencies([
variables.local_variables_initializer(),
lookup_ops.tables_initializer()
]):
assign_op = state_ops.assign(my_int, 12345)
# local_initSop must be an Operation, not a Tensor.
custom_local_init_op = control_flow_ops.group(assign_op)
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=training.Scaffold(local_init_op=custom_local_init_op),
export_outputs={'test': export_output.ClassificationOutput(scores)})
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.train(dummy_input_fn, steps=1)
feature_spec = {'x': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'y': parsing_ops.VarLenFeature(dtype=dtypes.int64)}
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
# Perform the export.
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base,
serving_input_receiver_fn)
# Restore, to validate that the custom local_init_op runs.
with ops.Graph().as_default() as graph:
with session.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
my_int = graph.get_tensor_by_name('my_int:0')
my_int_value = sess.run(my_int)
self.assertEqual(12345, my_int_value)
def test_features_labels_mode(self):
given_features = {'test-features': constant_op.constant([[1], [1]])}
def serving_input_receiver_fn():
return export.ServingInputReceiver(
given_features, array_ops.placeholder(dtype=dtypes.string))
def _model_fn(features, labels, mode):
self.features, self.labels, self.mode = features, labels, mode
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=constant_op.constant([[0.]]),
export_outputs={
'test': export_output.ClassificationOutput(
constant_op.constant([[0.]]))
})
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
est.export_savedmodel(tempfile.mkdtemp(), serving_input_receiver_fn)
self.assertEqual(given_features, self.features)
self.assertIsNone(self.labels)
self.assertEqual(model_fn_lib.ModeKeys.PREDICT, self.mode)
def test_graph_initialization_global_step_and_random_seed(self):
expected_random_seed = run_config.RunConfig().tf_random_seed
def _model_fn(features, labels, mode):
_, _, _ = features, labels, mode
self.assertIsNotNone(training.get_global_step())
self.assertEqual(expected_random_seed, ops.get_default_graph().seed)
return model_fn_lib.EstimatorSpec(
mode=mode,
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
predictions=constant_op.constant([[0.]]),
export_outputs={
'test': export_output.ClassificationOutput(
constant_op.constant([[0.]]))
})
def serving_input_receiver_fn():
return export.ServingInputReceiver(
{'test-features': constant_op.constant([[1], [1]])},
array_ops.placeholder(dtype=dtypes.string))
est = estimator.Estimator(model_fn=_model_fn)
est.train(dummy_input_fn, steps=1)
est.export_savedmodel(tempfile.mkdtemp(), serving_input_receiver_fn)
class EstimatorIntegrationTest(test.TestCase):
def test_complete_flow_with_a_simple_linear_model(self):
def _model_fn(features, labels, mode):
predictions = layers.dense(
features['x'], 1, kernel_initializer=init_ops.zeros_initializer())
export_outputs = {
'predictions': export_output.RegressionOutput(predictions)
}
if mode == model_fn_lib.ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(
mode, predictions=predictions, export_outputs=export_outputs)
loss = losses.mean_squared_error(labels, predictions)
train_op = training.GradientDescentOptimizer(learning_rate=0.5).minimize(
loss, training.get_global_step())
eval_metric_ops = {
'absolute_error': metrics_lib.mean_absolute_error(
labels, predictions)
}
return model_fn_lib.EstimatorSpec(
mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs)
est = estimator.Estimator(model_fn=_model_fn)
data = np.linspace(0., 1., 100, dtype=np.float32).reshape(-1, 1)
# TRAIN
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=50, num_epochs=None, shuffle=True)
est.train(train_input_fn, steps=200)
# EVALUTE
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=50, num_epochs=1, shuffle=True)
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores['global_step'])
self.assertGreater(0.1, scores['absolute_error'])
# PREDICT
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=None, batch_size=10, num_epochs=1, shuffle=False)
predictions = list(est.predict(predict_input_fn))
self.assertAllClose(data, predictions, atol=0.01)
# EXPORT
feature_spec = {'x': parsing_ops.FixedLenFeature([1], dtypes.float32)}
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
if __name__ == '__main__':
test.main()
| mit |
davidzchen/tensorflow | tensorflow/lite/tutorials/mnist_tflite.py | 18 | 2871 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to evaluate accuracy of TFLite flatbuffer model on mnist dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf # pylint: disable=g-bad-import-order
from tensorflow.lite.tutorials import dataset
flags = tf.app.flags
flags.DEFINE_string('data_dir', '/tmp/data_dir',
'Directory where data is stored.')
flags.DEFINE_string('model_file', '',
'The path to the TFLite flatbuffer model file.')
flags = flags.FLAGS
def test_image_generator():
# Generates an iterator over images
with tf.compat.v1.Session() as sess:
input_data = tf.compat.v1.data.make_one_shot_iterator(dataset.test(
flags.data_dir)).get_next()
try:
while True:
yield sess.run(input_data)
except tf.errors.OutOfRangeError:
pass
def run_eval(interpreter, input_image):
"""Performs evaluation for input image over specified model.
Args:
interpreter: TFLite interpreter initialized with model to execute.
input_image: Image input to the model.
Returns:
output: output tensor of model being executed.
"""
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test model on the input images.
input_image = np.reshape(input_image, input_details[0]['shape'])
interpreter.set_tensor(input_details[0]['index'], input_image)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
output = np.squeeze(output_data)
return output
def main(_):
interpreter = tf.lite.Interpreter(model_path=flags.model_file)
interpreter.allocate_tensors()
num_correct, total = 0, 0
for input_data in test_image_generator():
output = run_eval(interpreter, input_data[0])
total += 1
if output == input_data[1]:
num_correct += 1
if total % 500 == 0:
print('Accuracy after %i images: %f' %
(total, float(num_correct) / float(total)))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.compat.v1.app.run(main)
| apache-2.0 |
shtouff/django | django/core/cache/backends/memcached.py | 25 | 6889 | "Memcached cache backend"
import pickle
import time
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils import six
from django.utils.encoding import force_str
from django.utils.functional import cached_property
class BaseMemcachedCache(BaseCache):
def __init__(self, server, params, library, value_not_found_exception):
super(BaseMemcachedCache, self).__init__(params)
if isinstance(server, six.string_types):
self._servers = server.split(';')
else:
self._servers = server
# The exception type to catch from the underlying library for a key
# that was not found. This is a ValueError for python-memcache,
# pylibmc.NotFound for pylibmc, and cmemcache will return None without
# raising an exception.
self.LibraryValueNotFoundException = value_not_found_exception
self._lib = library
self._options = params.get('OPTIONS')
@property
def _cache(self):
"""
Implements transparent thread-safe access to a memcached client.
"""
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers)
return self._client
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Memcached deals with long (> 30 days) timeouts in a special
way. Call this function to obtain a safe value for your timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
if timeout is None:
# Using 0 in memcache sets a non-expiring timeout.
return 0
elif int(timeout) == 0:
# Other cache backends treat 0 as set-and-expire. To achieve this
# in memcache backends, a negative timeout must be passed.
timeout = -1
if timeout > 2592000: # 60*60*24*30, 30 days
# See http://code.google.com/p/memcached/wiki/FAQ
# "You can set expire times up to 30 days in the future. After that
# memcached interprets it as a date, and will expire the item after
# said date. This is a simple (but obscure) mechanic."
#
# This means that we have to switch to absolute timestamps.
timeout += int(time.time())
return int(timeout)
def make_key(self, key, version=None):
# Python 2 memcache requires the key to be a byte string.
return force_str(super(BaseMemcachedCache, self).make_key(key, version))
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
return self._cache.add(key, value, self.get_backend_timeout(timeout))
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
val = self._cache.get(key)
if val is None:
return default
return val
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
if not self._cache.set(key, value, self.get_backend_timeout(timeout)):
# make sure the key doesn't keep its old value in case of failure to set (memcached's 1MB limit)
self._cache.delete(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self._cache.delete(key)
def get_many(self, keys, version=None):
new_keys = [self.make_key(x, version=version) for x in keys]
ret = self._cache.get_multi(new_keys)
if ret:
_ = {}
m = dict(zip(new_keys, keys))
for k, v in ret.items():
_[m[k]] = v
ret = _
return ret
def close(self, **kwargs):
self._cache.disconnect_all()
def incr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.decr(key, -delta)
try:
val = self._cache.incr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def decr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.incr(key, -delta)
try:
val = self._cache.decr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
safe_data = {}
for key, value in data.items():
key = self.make_key(key, version=version)
safe_data[key] = value
self._cache.set_multi(safe_data, self.get_backend_timeout(timeout))
def delete_many(self, keys, version=None):
l = lambda x: self.make_key(x, version=version)
self._cache.delete_multi(map(l, keys))
def clear(self):
self._cache.flush_all()
class MemcachedCache(BaseMemcachedCache):
"An implementation of a cache binding using python-memcached"
def __init__(self, server, params):
import memcache
super(MemcachedCache, self).__init__(server, params,
library=memcache,
value_not_found_exception=ValueError)
@property
def _cache(self):
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers, pickleProtocol=pickle.HIGHEST_PROTOCOL)
return self._client
class PyLibMCCache(BaseMemcachedCache):
"An implementation of a cache binding using pylibmc"
def __init__(self, server, params):
import pylibmc
super(PyLibMCCache, self).__init__(server, params,
library=pylibmc,
value_not_found_exception=pylibmc.NotFound)
@cached_property
def _cache(self):
client = self._lib.Client(self._servers)
if self._options:
client.behaviors = self._options
return client
| bsd-3-clause |
andyh616/mne-python | mne/gui/_fiducials_gui.py | 14 | 16327 | """Mayavi/traits GUI for setting MRI fiducials"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
from glob import glob
import os
from ..externals.six.moves import map
# allow import without traits
try:
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
import numpy as np
from pyface.api import confirm, FileDialog, OK, YES
from traits.api import (HasTraits, HasPrivateTraits, on_trait_change,
cached_property, DelegatesTo, Event, Instance,
Property, Array, Bool, Button, Enum)
from traitsui.api import HGroup, Item, VGroup, View
from traitsui.menu import NoButtons
from tvtk.pyface.scene_editor import SceneEditor
except:
from ..utils import trait_wraith
HasTraits = HasPrivateTraits = object
cached_property = on_trait_change = MayaviScene = MlabSceneModel = \
Array = Bool = Button = DelegatesTo = Enum = Event = Instance = \
Property = View = Item = HGroup = VGroup = SceneEditor = \
NoButtons = trait_wraith
from ..coreg import fid_fname, fid_fname_general, head_bem_fname
from ..io import write_fiducials
from ..io.constants import FIFF
from ..utils import get_subjects_dir, logger
from ._file_traits import (BemSource, fid_wildcard, FiducialsSource,
MRISubjectSource, SubjectSelectorPanel)
from ._viewer import (defaults, HeadViewController, PointObject, SurfaceObject,
headview_borders)
class MRIHeadWithFiducialsModel(HasPrivateTraits):
"""Represent an MRI head shape with fiducials
Attributes
----------
points : array (n_points, 3)
MRI head surface points.
tris : array (n_tris, 3)
Triangles based on points.
lpa : array (1, 3)
Left peri-auricular point coordinates.
nasion : array (1, 3)
Nasion coordinates.
rpa : array (1, 3)
Right peri-auricular point coordinates.
"""
subject_source = Instance(MRISubjectSource, ())
bem = Instance(BemSource, ())
fid = Instance(FiducialsSource, ())
fid_file = DelegatesTo('fid', 'file')
fid_fname = DelegatesTo('fid', 'fname')
fid_points = DelegatesTo('fid', 'points')
subjects_dir = DelegatesTo('subject_source')
subject = DelegatesTo('subject_source')
subject_has_bem = DelegatesTo('subject_source')
points = DelegatesTo('bem')
norms = DelegatesTo('bem')
tris = DelegatesTo('bem')
lpa = Array(float, (1, 3))
nasion = Array(float, (1, 3))
rpa = Array(float, (1, 3))
reset = Event(desc="Reset fiducials to the file.")
# info
can_save = Property(depends_on=['file', 'can_save_as'])
can_save_as = Property(depends_on=['lpa', 'nasion', 'rpa'])
can_reset = Property(depends_on=['file', 'fid.points', 'lpa', 'nasion',
'rpa'])
fid_ok = Property(depends_on=['lpa', 'nasion', 'rpa'], desc="All points "
"are set")
default_fid_fname = Property(depends_on=['subjects_dir', 'subject'],
desc="the default file name for the "
"fiducials fif file")
# switch for the GUI (has no effect in the model)
lock_fiducials = Bool(False, desc="Used by GIU, has no effect in the "
"model.")
@on_trait_change('fid_points')
def reset_fiducials(self):
if self.fid_points is not None:
self.lpa = self.fid_points[0:1]
self.nasion = self.fid_points[1:2]
self.rpa = self.fid_points[2:3]
def save(self, fname=None):
"""Save the current fiducials to a file
Parameters
----------
fname : str
Destination file path. If None, will use the current fid filename
if available, or else use the default pattern.
"""
if fname is None:
fname = self.fid_file
if not fname:
fname = self.default_fid_fname
dig = [{'kind': 1, 'ident': 1, 'r': np.array(self.lpa[0])},
{'kind': 1, 'ident': 2, 'r': np.array(self.nasion[0])},
{'kind': 1, 'ident': 3, 'r': np.array(self.rpa[0])}]
write_fiducials(fname, dig, FIFF.FIFFV_COORD_MRI)
self.fid_file = fname
@cached_property
def _get_can_reset(self):
if not self.fid_file:
return False
elif np.any(self.lpa != self.fid.points[0:1]):
return True
elif np.any(self.nasion != self.fid.points[1:2]):
return True
elif np.any(self.rpa != self.fid.points[2:3]):
return True
return False
@cached_property
def _get_can_save_as(self):
can = not (np.all(self.nasion == self.lpa) or
np.all(self.nasion == self.rpa) or
np.all(self.lpa == self.rpa))
return can
@cached_property
def _get_can_save(self):
if not self.can_save_as:
return False
elif self.fid_file:
return True
elif self.subjects_dir and self.subject:
return True
else:
return False
@cached_property
def _get_default_fid_fname(self):
fname = fid_fname.format(subjects_dir=self.subjects_dir,
subject=self.subject)
return fname
@cached_property
def _get_fid_ok(self):
return all(np.any(pt) for pt in (self.nasion, self.lpa, self.rpa))
def _reset_fired(self):
self.reset_fiducials()
# if subject changed because of a change of subjects_dir this was not
# triggered
@on_trait_change('subjects_dir,subject')
def _subject_changed(self):
subject = self.subject
subjects_dir = self.subjects_dir
if not subjects_dir or not subject:
return
# update bem head
path = head_bem_fname.format(subjects_dir=subjects_dir,
subject=subject)
self.bem.file = path
# find fiducials file
path = fid_fname.format(subjects_dir=subjects_dir, subject=subject)
if os.path.exists(path):
self.fid_file = path
self.lock_fiducials = True
else:
path = fid_fname_general.format(subjects_dir=subjects_dir,
subject=subject, head='*')
fnames = glob(path)
if fnames:
path = fnames[0]
self.fid.file = path
self.lock_fiducials = True
else:
self.fid.reset_traits(['file'])
self.lock_fiducials = False
# does not seem to happen by itself ... so hard code it:
self.reset_fiducials()
class FiducialsPanel(HasPrivateTraits):
"""Set fiducials on an MRI surface"""
model = Instance(MRIHeadWithFiducialsModel)
fid_file = DelegatesTo('model')
fid_fname = DelegatesTo('model')
lpa = DelegatesTo('model')
nasion = DelegatesTo('model')
rpa = DelegatesTo('model')
can_save = DelegatesTo('model')
can_save_as = DelegatesTo('model')
can_reset = DelegatesTo('model')
fid_ok = DelegatesTo('model')
locked = DelegatesTo('model', 'lock_fiducials')
set = Enum('LPA', 'Nasion', 'RPA')
current_pos = Array(float, (1, 3)) # for editing
save_as = Button(label='Save As...')
save = Button(label='Save')
reset_fid = Button(label="Reset to File")
headview = Instance(HeadViewController)
hsp_obj = Instance(SurfaceObject)
picker = Instance(object)
# the layout of the dialog created
view = View(VGroup(Item('fid_file', label='Fiducials File'),
Item('fid_fname', show_label=False, style='readonly'),
Item('set', style='custom'),
Item('current_pos', label='Pos'),
HGroup(Item('save', enabled_when='can_save',
tooltip="If a filename is currently "
"specified, save to that file, otherwise "
"save to the default file name"),
Item('save_as', enabled_when='can_save_as'),
Item('reset_fid', enabled_when='can_reset'),
show_labels=False),
enabled_when="locked==False"))
def __init__(self, *args, **kwargs):
super(FiducialsPanel, self).__init__(*args, **kwargs)
self.sync_trait('lpa', self, 'current_pos', mutual=True)
def _reset_fid_fired(self):
self.model.reset = True
def _save_fired(self):
self.model.save()
def _save_as_fired(self):
if self.fid_file:
default_path = self.fid_file
else:
default_path = self.model.default_fid_fname
dlg = FileDialog(action="save as", wildcard=fid_wildcard,
default_path=default_path)
dlg.open()
if dlg.return_code != OK:
return
path = dlg.path
if not path.endswith('.fif'):
path = path + '.fif'
if os.path.exists(path):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
self.model.save(path)
def _on_pick(self, picker):
if self.locked:
return
self.picker = picker
n_pos = len(picker.picked_positions)
if n_pos == 0:
logger.debug("GUI: picked empty location")
return
if picker.actor is self.hsp_obj.surf.actor.actor:
idxs = []
idx = None
pt = [picker.pick_position]
elif self.hsp_obj.surf.actor.actor in picker.actors:
idxs = [i for i in range(n_pos) if picker.actors[i] is
self.hsp_obj.surf.actor.actor]
idx = idxs[-1]
pt = [picker.picked_positions[idx]]
else:
logger.debug("GUI: picked object other than MRI")
def round_(x):
return round(x, 3)
poss = [map(round_, pos) for pos in picker.picked_positions]
pos = map(round_, picker.pick_position)
msg = ["Pick Event: %i picked_positions:" % n_pos]
line = str(pos)
if idx is None:
line += " <-pick_position"
msg.append(line)
for i, pos in enumerate(poss):
line = str(pos)
if i == idx:
line += " <- MRI mesh"
elif i in idxs:
line += " (<- also MRI mesh)"
msg.append(line)
logger.debug(os.linesep.join(msg))
if self.set == 'Nasion':
self.nasion = pt
elif self.set == 'LPA':
self.lpa = pt
elif self.set == 'RPA':
self.rpa = pt
else:
raise ValueError("set = %r" % self.set)
@on_trait_change('set')
def _on_set_change(self, obj, name, old, new):
self.sync_trait(old.lower(), self, 'current_pos', mutual=True,
remove=True)
self.sync_trait(new.lower(), self, 'current_pos', mutual=True)
if new == 'Nasion':
self.headview.front = True
elif new == 'LPA':
self.headview.left = True
elif new == 'RPA':
self.headview.right = True
# FiducialsPanel view that allows manipulating all coordinates numerically
view2 = View(VGroup(Item('fid_file', label='Fiducials File'),
Item('fid_fname', show_label=False, style='readonly'),
Item('set', style='custom'), 'lpa', 'nasion', 'rpa',
HGroup(Item('save', enabled_when='can_save'),
Item('save_as', enabled_when='can_save_as'),
Item('reset_fid', enabled_when='can_reset'),
show_labels=False),
enabled_when="locked==False"))
class FiducialsFrame(HasTraits):
"""GUI for interpolating between two KIT marker files
Parameters
----------
subject : None | str
Set the subject which is initially selected.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
model = Instance(MRIHeadWithFiducialsModel, ())
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
spanel = Instance(SubjectSelectorPanel)
panel = Instance(FiducialsPanel)
mri_obj = Instance(SurfaceObject)
point_scale = float(defaults['mri_fid_scale'])
lpa_obj = Instance(PointObject)
nasion_obj = Instance(PointObject)
rpa_obj = Instance(PointObject)
def _headview_default(self):
return HeadViewController(scene=self.scene, system='RAS')
def _panel_default(self):
panel = FiducialsPanel(model=self.model, headview=self.headview)
panel.trait_view('view', view2)
return panel
def _spanel_default(self):
return SubjectSelectorPanel(model=self.model.subject_source)
view = View(HGroup(Item('scene',
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical'),
VGroup(headview_borders,
VGroup(Item('spanel', style='custom'),
label="Subject", show_border=True,
show_labels=False),
VGroup(Item('panel', style="custom"),
label="Fiducials", show_border=True,
show_labels=False),
show_labels=False),
show_labels=False),
resizable=True,
buttons=NoButtons)
def __init__(self, subject=None, subjects_dir=None, **kwargs):
super(FiducialsFrame, self).__init__(**kwargs)
subjects_dir = get_subjects_dir(subjects_dir)
if subjects_dir is not None:
self.spanel.subjects_dir = subjects_dir
if subject is not None:
if subject in self.spanel.subjects:
self.spanel.subject = subject
@on_trait_change('scene.activated')
def _init_plot(self):
self.scene.disable_render = True
lpa_color = defaults['lpa_color']
nasion_color = defaults['nasion_color']
rpa_color = defaults['rpa_color']
# bem
color = defaults['mri_color']
self.mri_obj = SurfaceObject(points=self.model.points, color=color,
tri=self.model.tris, scene=self.scene)
self.model.on_trait_change(self._on_mri_src_change, 'tris')
self.panel.hsp_obj = self.mri_obj
# fiducials
self.lpa_obj = PointObject(scene=self.scene, color=lpa_color,
point_scale=self.point_scale)
self.panel.sync_trait('lpa', self.lpa_obj, 'points', mutual=False)
self.sync_trait('point_scale', self.lpa_obj, mutual=False)
self.nasion_obj = PointObject(scene=self.scene, color=nasion_color,
point_scale=self.point_scale)
self.panel.sync_trait('nasion', self.nasion_obj, 'points',
mutual=False)
self.sync_trait('point_scale', self.nasion_obj, mutual=False)
self.rpa_obj = PointObject(scene=self.scene, color=rpa_color,
point_scale=self.point_scale)
self.panel.sync_trait('rpa', self.rpa_obj, 'points', mutual=False)
self.sync_trait('point_scale', self.rpa_obj, mutual=False)
self.headview.left = True
self.scene.disable_render = False
# picker
self.scene.mayavi_scene.on_mouse_pick(self.panel._on_pick, type='cell')
def _on_mri_src_change(self):
if (not np.any(self.model.points)) or (not np.any(self.model.tris)):
self.mri_obj.clear()
return
self.mri_obj.points = self.model.points
self.mri_obj.tri = self.model.tris
self.mri_obj.plot()
| bsd-3-clause |
bollu/vispy | codegen/headerparser.py | 18 | 11567 | # -*- coding: utf-8 -*-
# Copyright (c) 2013, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
""" Code to parse a header file and create a list of constants,
functions (with arguments). This information can then be used to
autogenerate our OpenGL API.
"""
import os
import sys
def getwords(line):
""" Get words on a line.
"""
line = line.replace('\t', ' ').strip()
return [w for w in line.split(' ') if w]
# Keep track of all constants in case they are "reused" (i.e. aliases)
CONSTANTS = {}
class Parser:
""" Class to parse header files. It can deal with gl2.h and webgl.idl,
as well as some desktop OpenGL header files. It produces a list of
ConstantDefition objects and FunctionDefinition objects, which can
be accessed via a dict.
"""
def __init__(self, header_file, parse_now=True):
# Get filenames for C and Py
self._c_fname = c_fname = os.path.split(header_file)[1]
# Get absolute filenames
self._c_filename = header_file
# Init intermediate results
self._functionDefs = []
self._constantDefs = []
# Init output
self._functions = {}
self._constants = {}
# We are aware of the line number
self._linenr = 0
# Some stats
self.stat_types = set()
if parse_now:
self.parse()
def __iadd__(self, definition):
""" Add an output line. Can be multiple lines.
"""
# Create comment
definition.comment = 'line %i of %s' % (self._linenr, self._c_fname)
# Add to lists
if isinstance(definition, FunctionDefinition):
self._functionDefs.append(definition)
elif isinstance(definition, ConstantDefinition):
self._constantDefs.append(definition)
return self
def _get_line(self):
# Get a stripped line, and keep track of line nr, skip empty lines
line = ''
while not line:
line = self._file.readline()
if not line:
raise StopIteration()
line = line.strip()
self._linenr += 1
return line
def _get_lines(self):
# Easy iterator
while True:
yield self._get_line()
def parse(self):
""" Parse the header file!
"""
# Open file
self._file = open(self._c_filename, 'rt', encoding='utf-8')
# Parse the file
for line in self._get_lines():
if line.startswith('#define'):
self += ConstantDefinition(line)
elif line.startswith('const GLenum'):
self += ConstantDefinition(line)
elif '(' in line:
while ')' not in line:
line += self._get_line()
if line.endswith(');'):
self += FunctionDefinition(line)
# Remove invalid defs
self._functionDefs = [d for d in self._functionDefs if d.isvalid]
self._constantDefs = [d for d in self._constantDefs if d.isvalid]
# Collect multipe similar functions in groups
self._functionDefs.sort(key=lambda x: x.glname)
keyDef = None
keyDefs = []
for funcDef in [f for f in self._functionDefs]:
# Check if we need a new keydef
if funcDef.extrachars:
# Create new keydef or use old one?
if keyDef and keyDef.glname == funcDef.keyname:
pass # Keep same keydef
else:
keyDef = FunctionGroup(funcDef.line) # New keydef
keyDef._set_name(funcDef.keyname)
keyDefs.append(keyDef)
# Add to group
keyDef.group.append(funcDef)
# Process function groups
for keyDef in keyDefs:
if len(keyDef.group) > 1:
self._functionDefs.append(keyDef)
for d in keyDef.group:
self._functionDefs.remove(d)
# Sort constants and functions
self._functionDefs.sort(key=lambda x: x.glname)
self._constantDefs.sort(key=lambda x: x.glname)
# Get dicts
for definition in self._functionDefs:
self._functions[definition.shortname] = definition
for definition in self._constantDefs:
self._constants[definition.shortname] = definition
# Get some stats
for funcDef in self._functionDefs:
for arg in funcDef.args:
self.stat_types.add(arg.ctype)
# Show stats
n1 = len([d for d in self._constantDefs])
n2 = len([d for d in self._functionDefs])
n3 = len([d for d in self._functionDefs if d.group])
n4 = sum([len(d.group) for d in self._functionDefs if d.group])
print('Found %i constants and %i unique functions (%i groups contain %i functions)").' % (
n1, n2, n3, n4))
print('C-types found in args:', self.stat_types)
@property
def constant_names(self):
""" Sorted list of constant names.
"""
return [d.shortname for d in self._constantDefs]
@property
def function_names(self):
""" Sorted list of function names.
"""
return [d.shortname for d in self._functionDefs]
@property
def constants(self):
""" Dict with all the constants.
"""
return self._constants
@property
def functions(self):
""" Dict witj all the functions.
"""
return self._functions
def show_groups(self):
for d in self._functionDefs:
if isinstance(d.group, list):
print(d.keyname)
for d2 in d.group:
print(' ', d2.glname)
class Definition:
""" Abstract class to represent a constant or function definition.
"""
def __init__(self, line):
self.line = line
self.isvalid = True
self.comment = ''
self.oname = '' # original name
self.shortname = self.glname = '' # short and long name
self.parse_line(line)
def parse_line(self, line):
# Do initial parsing of the incoming line
# (which may be multiline, actually)
pass
def _set_name(self, name):
# Store original name
self.oname = name
# Store plain name
if name.startswith('GL_'):
name = name[3:]
elif name.startswith('gl'):
name = name[2].lower() + name[3:]
self.shortname = name
# Store gl name
if name.upper() == name:
name = 'GL_' + name
else:
name = 'gl' + name[0].upper() + name[1:]
self.glname = name
class ConstantDefinition(Definition):
def parse_line(self, line):
""" Set cname and value attributes.
"""
self.value = None
line = line.split('/*', 1)[0]
args = getwords(line)[1:]
self.isvalid = False
if len(args) == 1:
pass
elif len(args) == 2:
# Set name
name, val = args
self.isvalid = bool(name)
self._set_name(name)
self._set_value_from_string(val)
elif '=' in args:
name, val = args[-3], args[-1]
self.isvalid = bool(name)
self._set_name(name)
self._set_value_from_string(val)
else:
print('Dont know what to do with "%s"' % line)
# For when this constant is reused to set another constant
if self.value is not None:
CONSTANTS[self.oname] = self.value
def _set_value_from_string(self, val):
# Set value
val = val.strip(';')
if val.startswith('0x'):
self.value = int(val[2:].rstrip('ul'), 16)
elif val[0] in '0123456789':
self.value = int(val)
elif val.startswith("'"):
self.value = val
elif val in CONSTANTS:
self.value = CONSTANTS[val]
else:
print('Warning: Dont know what to do with "%s"' % line)
class FunctionDefinition(Definition):
SKIPTYPECHARS = 'if' # 'bsifd'
ALLSKIPCHARS = SKIPTYPECHARS + 'v1234'
def parse_line(self, line):
""" Set cname, keyname, cargs attributes.
The list of args always has one entry and the first entry is always
the output (can be void).
"""
# Parse components
beforeBrace, args = line.split('(', 1)
betweenBraces, _ = args.split(')', 1)
outs = getwords(beforeBrace)
prefix, name = outs[:-1], outs[-1]
# Store name
self._set_name(name)
# Possibly, this function belongs to a collection of similar functions,
# which we are going to replace with one function in Python.
self.keyname = self.glname.rstrip('v').rstrip(self.SKIPTYPECHARS).rstrip('1234')
self.extrachars = self.matchKeyName(self.keyname)
# If this is a list, this instance represents the group
# If this is True, this instance is in a group (but not the
# representative)
self.group = None
# Create list of Argument instances
self.cargs = [arg.strip() for arg in betweenBraces.split(',')]
self.args = []
# Set output arg
self.args.append(Argument(' '.join(prefix), False))
# Parse input arguments,
for arg in self.cargs:
if arg and arg != 'void':
self.args.append(Argument(arg))
def matchKeyName(self, keyname):
if self.glname.startswith(keyname):
extrachars = self.glname[len(keyname):]
if all([(c in self.ALLSKIPCHARS) for c in extrachars]):
return extrachars
class FunctionGroup(FunctionDefinition):
def parse_line(self, line):
FunctionDefinition.parse_line(self, line)
self.group = []
class Argument:
def __init__(self, argAsString, cinput=True):
# Parse string
components = [c for c in argAsString.split(' ') if c]
if len(components) == 1:
name = 'unknown_name'
type = components[0]
else:
name = components[-1]
type = components[-2]
if 'const' in type:
type = components[-3] # glShaderSource has "const GLchar* const* string"
# Store stuff
self.orig = tuple(components)
self.name = name.lstrip('*')
self.isptr = argAsString.count('*') # Number of stars
self.ctype = type.strip('*') + '*'*self.isptr
# Status flags
self.cinput = cinput
if __name__ == '__main__':
THISDIR = os.path.abspath(os.path.dirname(__file__))
# Some tests ...
gl2 = Parser(os.path.join(THISDIR, 'headers', 'gl2.h'))
import OpenGL.GL
pygl = set([name for name in dir(OpenGL.GL)])
# Test if all functions are in pyopengl
for keyfunc in gl2._functionDefs:
group = keyfunc.group or [keyfunc]
for f in group:
if f.glname not in pygl:
print('Not in pyopengl:', f.glname)
# Test if constant match with these in pyopengl
for d in gl2._constantDefs:
v1 = d.value
try:
v2 = getattr(OpenGL.GL, d.glname)
except AttributeError:
print(d.glname, 'is not in pyopengl')
else:
if v1 != v2:
print(d.glname, 'does not match: %r vs %r' % (v1, int(v2)))
| bsd-3-clause |
mdboom/pytest | extra/get_issues.py | 195 | 2286 | import json
import py
import textwrap
issues_url = "http://bitbucket.org/api/1.0/repositories/pytest-dev/pytest/issues"
import requests
def get_issues():
chunksize = 50
start = 0
issues = []
while 1:
post_data = {"accountname": "pytest-dev",
"repo_slug": "pytest",
"start": start,
"limit": chunksize}
print ("getting from", start)
r = requests.get(issues_url, params=post_data)
data = r.json()
issues.extend(data["issues"])
if start + chunksize >= data["count"]:
return issues
start += chunksize
kind2num = "bug enhancement task proposal".split()
status2num = "new open resolved duplicate invalid wontfix".split()
def main(args):
cachefile = py.path.local(args.cache)
if not cachefile.exists() or args.refresh:
issues = get_issues()
cachefile.write(json.dumps(issues))
else:
issues = json.loads(cachefile.read())
open_issues = [x for x in issues
if x["status"] in ("new", "open")]
def kind_and_id(x):
kind = x["metadata"]["kind"]
return kind2num.index(kind), len(issues)-int(x["local_id"])
open_issues.sort(key=kind_and_id)
report(open_issues)
def report(issues):
for issue in issues:
metadata = issue["metadata"]
priority = issue["priority"]
title = issue["title"]
content = issue["content"]
kind = metadata["kind"]
status = issue["status"]
id = issue["local_id"]
link = "https://bitbucket.org/pytest-dev/pytest/issue/%s/" % id
print("----")
print(status, kind, link)
print(title)
#print()
#lines = content.split("\n")
#print ("\n".join(lines[:3]))
#if len(lines) > 3 or len(content) > 240:
# print ("...")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("process bitbucket issues")
parser.add_argument("--refresh", action="store_true",
help="invalidate cache, refresh issues")
parser.add_argument("--cache", action="store", default="issues.json",
help="cache file")
args = parser.parse_args()
main(args)
| mit |
gioman/QGIS | tests/src/python/test_qgscolorscheme.py | 20 | 4650 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsColorScheme.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '25/07/2014'
__copyright__ = 'Copyright 2014, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.testing import unittest, start_app
from qgis.core import QgsColorScheme, QgsUserColorScheme, QgsRecentColorScheme, QgsSettings
from qgis.PyQt.QtCore import QCoreApplication
from qgis.PyQt.QtGui import QColor
# Make a dummy color scheme for testing
class DummyColorScheme(QgsColorScheme):
def __init__(self, parent=None):
QgsColorScheme.__init__(self)
def schemeName(self):
return "Dummy scheme"
def fetchColors(self, context='', baseColor=QColor()):
if (context == "testscheme"):
return [[QColor(255, 255, 0), 'schemetest']]
elif baseColor.isValid():
return [[baseColor, 'base']]
else:
return [[QColor(255, 0, 0), 'red'], [QColor(0, 255, 0), None]]
def clone(self):
return DummyColorScheme()
class TestQgsColorScheme(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain("QGIS_TestPyQgsColorScheme.com")
QCoreApplication.setApplicationName("QGIS_TestPyQgsColorScheme")
QgsSettings().clear()
start_app()
def testCreateScheme(self):
"""Test creating a new color scheme"""
dummyScheme = DummyColorScheme()
self.assertTrue(dummyScheme)
def testGetSchemeName(self):
"""Test getting color scheme name"""
dummyScheme = DummyColorScheme()
self.assertEqual(dummyScheme.schemeName(), "Dummy scheme")
def testColorsNoBase(self):
"""Test getting colors without passing a base color"""
dummyScheme = DummyColorScheme()
colors = dummyScheme.fetchColors()
self.assertEqual(len(colors), 2)
self.assertEqual(colors[0][0], QColor(255, 0, 0))
self.assertEqual(colors[0][1], 'red')
self.assertEqual(colors[1][0], QColor(0, 255, 0))
self.assertEqual(colors[1][1], None)
def testColorsWithBase(self):
"""Test getting colors with a base color"""
dummyScheme = DummyColorScheme()
testColor = QColor(0, 0, 255)
colors = dummyScheme.fetchColors(None, testColor)
self.assertEqual(len(colors), 1)
self.assertEqual(colors[0][0], testColor)
self.assertEqual(colors[0][1], 'base')
def testColorsWithScheme(self):
"""Test getting colors when specifying a scheme"""
dummyScheme = DummyColorScheme()
colors = dummyScheme.fetchColors('testscheme')
self.assertEqual(len(colors), 1)
self.assertEqual(colors[0][0], QColor(255, 255, 0))
self.assertEqual(colors[0][1], 'schemetest')
def testClone(self):
"""Test cloning a color scheme"""
dummyScheme = DummyColorScheme()
colors = dummyScheme.fetchColors()
dummySchemeClone = dummyScheme.clone()
colorsClone = dummySchemeClone.fetchColors()
self.assertEqual(colors, colorsClone)
def testUserScheme(self):
""" Tests for user color schemes """
scheme = QgsUserColorScheme("user_test.gpl")
self.assertEqual(scheme.schemeName(), 'user_test.gpl')
self.assertTrue(scheme.isEditable())
self.assertFalse(scheme.flags() & QgsColorScheme.ShowInColorButtonMenu)
scheme.setShowSchemeInMenu(True)
self.assertTrue(scheme.flags() & QgsColorScheme.ShowInColorButtonMenu)
scheme.setShowSchemeInMenu(False)
self.assertFalse(scheme.flags() & QgsColorScheme.ShowInColorButtonMenu)
scheme.erase()
def testRecentColors(self):
""" test retrieving recent colors """
# no colors
self.assertFalse(QgsRecentColorScheme().lastUsedColor().isValid())
# add a recent color
QgsRecentColorScheme().addRecentColor(QColor(255, 0, 0))
self.assertEqual(QgsRecentColorScheme().lastUsedColor(), QColor(255, 0, 0))
QgsRecentColorScheme().addRecentColor(QColor(0, 255, 0))
self.assertEqual(QgsRecentColorScheme().lastUsedColor(), QColor(0, 255, 0))
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
Dunkas12/BeepBoopBot | lib/youtube_dl/extractor/beeg.py | 15 | 4365 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_chr,
compat_ord,
compat_urllib_parse_unquote,
)
from ..utils import (
int_or_none,
parse_iso8601,
)
class BeegIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?beeg\.com/(?P<id>\d+)'
_TEST = {
'url': 'http://beeg.com/5416503',
'md5': '46c384def73b33dbc581262e5ee67cef',
'info_dict': {
'id': '5416503',
'ext': 'mp4',
'title': 'Sultry Striptease',
'description': 'md5:d22219c09da287c14bed3d6c37ce4bc2',
'timestamp': 1391813355,
'upload_date': '20140207',
'duration': 383,
'tags': list,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
cpl_url = self._search_regex(
r'<script[^>]+src=(["\'])(?P<url>(?:https?:)?//static\.beeg\.com/cpl/\d+\.js.*?)\1',
webpage, 'cpl', default=None, group='url')
beeg_version, beeg_salt = [None] * 2
if cpl_url:
cpl = self._download_webpage(
self._proto_relative_url(cpl_url), video_id,
'Downloading cpl JS', fatal=False)
if cpl:
beeg_version = int_or_none(self._search_regex(
r'beeg_version\s*=\s*([^\b]+)', cpl,
'beeg version', default=None)) or self._search_regex(
r'/(\d+)\.js', cpl_url, 'beeg version', default=None)
beeg_salt = self._search_regex(
r'beeg_salt\s*=\s*(["\'])(?P<beeg_salt>.+?)\1', cpl, 'beeg salt',
default=None, group='beeg_salt')
beeg_version = beeg_version or '2000'
beeg_salt = beeg_salt or 'pmweAkq8lAYKdfWcFCUj0yoVgoPlinamH5UE1CB3H'
video = self._download_json(
'https://api.beeg.com/api/v6/%s/video/%s' % (beeg_version, video_id),
video_id)
def split(o, e):
def cut(s, x):
n.append(s[:x])
return s[x:]
n = []
r = len(o) % e
if r > 0:
o = cut(o, r)
while len(o) > e:
o = cut(o, e)
n.append(o)
return n
def decrypt_key(key):
# Reverse engineered from http://static.beeg.com/cpl/1738.js
a = beeg_salt
e = compat_urllib_parse_unquote(key)
o = ''.join([
compat_chr(compat_ord(e[n]) - compat_ord(a[n % len(a)]) % 21)
for n in range(len(e))])
return ''.join(split(o, 3)[::-1])
def decrypt_url(encrypted_url):
encrypted_url = self._proto_relative_url(
encrypted_url.replace('{DATA_MARKERS}', ''), 'https:')
key = self._search_regex(
r'/key=(.*?)%2Cend=', encrypted_url, 'key', default=None)
if not key:
return encrypted_url
return encrypted_url.replace(key, decrypt_key(key))
formats = []
for format_id, video_url in video.items():
if not video_url:
continue
height = self._search_regex(
r'^(\d+)[pP]$', format_id, 'height', default=None)
if not height:
continue
formats.append({
'url': decrypt_url(video_url),
'format_id': format_id,
'height': int(height),
})
self._sort_formats(formats)
title = video['title']
video_id = video.get('id') or video_id
display_id = video.get('code')
description = video.get('desc')
timestamp = parse_iso8601(video.get('date'), ' ')
duration = int_or_none(video.get('duration'))
tags = [tag.strip() for tag in video['tags'].split(',')] if video.get('tags') else None
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'timestamp': timestamp,
'duration': duration,
'tags': tags,
'formats': formats,
'age_limit': self._rta_search(webpage),
}
| gpl-3.0 |
murfz/Sick-Beard | lib/requests/packages/chardet2/euctwfreq.py | 323 | 34864 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = ( \
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
| gpl-3.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/numpy/distutils/command/build.py | 187 | 1618 | from __future__ import division, absolute_import, print_function
import os
import sys
from distutils.command.build import build as old_build
from distutils.util import get_platform
from numpy.distutils.command.config_compiler import show_fortran_compilers
class build(old_build):
sub_commands = [('config_cc', lambda *args: True),
('config_fc', lambda *args: True),
('build_src', old_build.has_ext_modules),
] + old_build.sub_commands
user_options = old_build.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('parallel=', 'j',
"number of parallel jobs"),
]
help_options = old_build.help_options + [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
]
def initialize_options(self):
old_build.initialize_options(self)
self.fcompiler = None
self.parallel = None
def finalize_options(self):
if self.parallel:
try:
self.parallel = int(self.parallel)
except ValueError:
raise ValueError("--parallel/-j argument must be an integer")
build_scripts = self.build_scripts
old_build.finalize_options(self)
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
if build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts' + plat_specifier)
def run(self):
old_build.run(self)
| mit |
tijeco/Fuster | Simulations/makeSeed.py | 2 | 1189 | import sys
import random
import os
def makeCodon():
codon = ""
AA = "ATCG"
# print(len(AA),print(AA[0],AA[19]))
# print( random.randint(0,len(AA)))
codon += AA[random.randint(0,len(AA)-1)]
codon += AA[random.randint(0,len(AA)-1)]
codon += AA[random.randint(0,len(AA)-1)]
return codon
# print(makeCodon())
def makeSeed(numAA):
seed ="ATG"
for i in range(numAA):
codon = makeCodon()
if codon not in ["TAA","TAG","TGA"]:
seed+=codon
seed+="TAA"
return seed
# seedNum = int(sys.argv[1])
for i in range(int(sys.argv[1])):
length = random.randint(350, 500)
# length = random.randint(50, 1000)
branchlength = random.randint(1,15)
# branchlength = random.randint(1,50)
regime = random.choice(["pos","pur","con"])
# regime = "pos"
filename = "seeds/seed"+regime+str(i)+"_len"+str(length)+"BL_"+str(branchlength)+".fa"
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as out:
out.write(makeSeed(length))
# seed0_len100BL_10_True.tre
# seed0_len100BL_10_True_alignment.FASTA
# home/usr/hyphy/res/TemplateBatchFiles/FUBAR.bf
| gpl-3.0 |
geometalab/osmaxx | tests/selenium_tests/test_new_excerpt.py | 2 | 1561 | from urllib.parse import urljoin
import pytest
import requests
from selenium.webdriver.common.keys import Keys
from tests.selenium_tests.conftest import skip_selenium_tests, first_panel_on_excerpts_export_overview_xpath
from tests.selenium_tests.new_excerpt import new_excerpt
@skip_selenium_tests
@pytest.mark.parametrize("file_name, file_format", [("gdb", 'id_formats_1'), ("shp", 'id_formats_2'),
("gpkg", 'id_formats_3'), ("spatialite", 'id_formats_4'),
("img_tdb", 'id_formats_5')])
def test_new_excerpt(base_url, login, file_name, file_format, selenium, reload_until_condition):
new_excerpt(selenium, base_url)
# insert excerpt name
excerpt_name = selenium.find_element_by_id('id_name')
excerpt_name.send_keys(file_name)
# choose the file format
formats = selenium.find_element_by_id(file_format)
formats.click()
# submit
create = selenium.find_element_by_name('submit')
create.send_keys(Keys.RETURN)
# wait until download link appears
selenium.find_element_by_xpath(first_panel_on_excerpts_export_overview_xpath + "div[1]/h3")
first_a = first_panel_on_excerpts_export_overview_xpath + "div[2]/div[1]/div[1]/div[2]/div/div[1]/p/a"
element = reload_until_condition(selenium.find_element_by_xpath, first_a)
# check if the download link is a valid link
url = urljoin(base_url, element.get_attribute('href'))
r = requests.head(url)
assert r.status_code == requests.codes.ok
| mit |
mspublic/openair4G-mirror | targets/TEST/OAI/case01.py | 1 | 11144 | #******************************************************************************
# Eurecom OpenAirInterface
# Copyright(c) 1999 - 2013 Eurecom
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
# Contact Information
# Openair Admin: openair_admin@eurecom.fr
# Openair Tech : openair_tech@eurecom.fr
# Forums : http://forums.eurecom.fsr/openairinterface
# Address : Eurecom, Compus SophiaTech 450, route des chappes, 06451 Biot, France
#*****************************************************************************
# \file case01.py
# \brief test case 01 for OAI: compilations
# \author Navid Nikaein
# \date 2013 - 2014
# \version 0.1
# @ingroup _test
import log
import openair
import core
makerr1 = '***'
makerr2 = 'Error 1'
def execute(oai, user, pw, host, logfile,logdir,debug):
case = '01'
rv = 1
oai.send_recv('cd $OPENAIR_TARGETS;')
try:
test = '00'
name = 'Check oai.svn.add'
conf = 'svn st -q | grep makefile'
diag = 'Makefile(s) changed. If you are adding a new file, make sure that it is added to the svn'
rsp = oai.send_recv('svn st -q | grep -i makefile;')
for item in rsp.split("\n"):
if "Makefile" in item:
rsp2=item.strip() + '\n'
oai.find_false_re(rsp,'Makefile')
except log.err, e:
diag = diag + "\n" + rsp2
#log.skip(case, test, name, conf, e.value, logfile)
log.skip(case, test, name, conf, '', diag, logfile)
else:
log.ok(case, test, name, conf, '', logfile)
oai.send('cd SIMU/USER;')
oai.send('mkdir ' + logdir + ';')
try:
test = '01'
name = 'Compile oai.rel8.make'
conf = 'make'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = "check the compilation errors for oai"
oai.send('make cleanall;')
oai.send('make cleanasn1;')
oai.send('rm -f ./oaisim.rel8.'+host)
oai.send_expect_false('make -j4 JF=1' + tee, makerr1, 1500)
oai.send('cp ./oaisim ./oaisim.rel8.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
try:
test = '02'
name = 'Compile oai.rel8.nas.make'
conf = 'make nasmesh_fix; make NAS=1'
diag = 'check the compilation errors for oai and nas driver'
oai.send('make cleanall;')
oai.send('rm -f ./oaisim.rel8.nas'+host)
oai.send('rm -f ./nasmesh;')
oai.send('make nasmesh_clean;')
trace = logdir + '/log_' + case + test + '_1.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('make nasmesh_fix' + tee, makerr1, 60)
trace = logdir + '/log_' + case + test + '_2.txt;'
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('make NAS=1 JF=1 -j4' + tee, makerr1, 1500)
oai.send('cp ./oaisim ./oaisim.rel8.nas.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
oai.send('cd $OPENAIR_TARGETS;')
oai.send('cd RT/USER;')
try:
test = '03'
name = 'Compile oai.rel8.rf.make'
conf = 'make RTAI=0 EXMIMO=1 Rel8=1'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for Rel8'
oai.send('make cleanall;')
oai.send('rm -f ./oaisim.rel8.rf.'+host)
oai.send_expect_false('make RTAI=0 EXMIMO=1 -j4' + tee, makerr1, 1500)
oai.send('cp ./oaisim ./oaisim.rel8.rf.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
oai.send('cd $OPENAIR1_DIR;')
oai.send('cd SIMULATION/LTE_PHY;')
try:
test = '04'
name = 'Compile oai.rel8.phy.dlsim.make'
conf = 'make dlsim'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for dlsim in $OPENAIR1_DIR/SIMULATION/LTE_PHY'
oai.send('make clean;')
oai.send('rm -f ./dlsim.rel8.'+host)
oai.send_expect_false('make dlsim -j4' + tee, makerr1, 1500)
oai.send('cp ./dlsim ./dlsim.rel8.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
try:
test = '05'
name = 'Compile oai.rel8.phy.ulsim.make'
conf = 'make ulsim'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for ulsim in $OPENAIR1_DIR/SIMULATION/LTE_PHY'
oai.send('make clean;')
oai.send('rm -f ./ulsim.rel8.'+host)
oai.send_expect_false('make ulsim -j4' + tee, makerr1, 1500)
oai.send('cp ./ulsim ./ulsim.rel8.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
oai.send('cd $OPENAIR_TARGETS;')
oai.send('cd SIMU/USER;')
try:
test = '06'
name = 'Compile oai.rel8.itti.make'
conf = 'make DISABLE_XER_PRINT=1 ENABLE_ITTI=1 Rel8=1'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for ITTI Rel8'
oai.send('make clean;')
oai.send('rm -f ./oaisim.rel8.itti.'+host)
oai.send_expect_false('make DISABLE_XER_PRINT=1 ENABLE_ITTI=1 Rel8=1 -j4' + tee, makerr1, 1500)
oai.send('cp ./oaisim ./oaisim.rel8.itti.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
try:
test = '07'
name = 'Compile oai.rel10.make'
conf = 'make RLC_STOP_ON_LOST_PDU=1 Rel10=1'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for Rel10'
oai.send('make clean;')
oai.send('make cleanall;')
oai.send('make cleanasn1;')
oai.send('rm -f ./oaisim.rel10.'+host)
oai.send_expect_false('make RLC_STOP_ON_LOST_PDU=1 Rel10=1 -j4' + tee, makerr1, 1500)
oai.send('cp ./oaisim ./oaisim.rel10.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
try:
test = '08'
name = 'Compile oai.rel10.itti.make'
conf = 'make DISABLE_XER_PRINT=1 ENABLE_ITTI=1 RLC_STOP_ON_LOST_PDU=1 Rel10=1'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for ITTI Rel10'
oai.send('make cleanall;')
oai.send('rm -f ./oaisim.rel10.itti.'+host)
oai.send_expect_false('make DISABLE_XER_PRINT=1 ENABLE_ITTI=1 RLC_STOP_ON_LOST_PDU=1 Rel10=1 -j4' + tee, makerr1, 1500)
oai.send('cp ./oaisim ./oaisim.rel10.itti.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
rv = 0
else:
log.ok(case, test, name, conf, '', logfile)
try:
test = '13'
name = 'Compile oai_nw_ether IP driver'
conf = 'make oai_nw_drv'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for ITTI Rel8'
oai.send('make clean;')
oai.send('make cleanall;')
oai.send('make cleanasn1;')
oai.send('rm -f ./oai_nw_drv;')
oai.send('make oai_nw_drv_clean;')
tee = ' 2>&1 | tee ' + trace
oai.send_expect_false('make oai_nw_drv' + tee, makerr1, 60)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
test = '14'
name = 'Compile oai.rel8.itti.ral.make'
conf = 'make DISABLE_XER_PRINT=1 NAS=1 OAI_NW_DRIVER_TYPE_ETHERNET=1 ENABLE_ITTI=1 USER_MODE=1 OPENAIR2=1 ENABLE_RAL=1 MIH_C_MEDIEVAL_EXTENSIONS=1 RLC_STOP_ON_LOST_PDU=1 Rel8=1'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for ITTI Rel8'
oai.send('make clean;')
oai.send('make cleanall;')
oai.send('make cleanasn1;')
oai.send('rm -f ./oaisim.rel8.itti.ral.'+host)
oai.send_expect_false('make DISABLE_XER_PRINT=1 NAS=1 OAI_NW_DRIVER_TYPE_ETHERNET=1 ENABLE_ITTI=1 USER_MODE=1 OPENAIR2=1 ENABLE_RAL=1 MIH_C_MEDIEVAL_EXTENSIONS=1 RLC_STOP_ON_LOST_PDU=1 Rel8=1 -j4' + tee, makerr1, 1500)
oai.send('cp ./oaisim ./oaisim.rel8.itti.ral.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
try:
test = '15'
name = 'Compile oai.rel10.itti.ral.make'
conf = 'make DISABLE_XER_PRINT=1 NAS=1 OAI_NW_DRIVER_TYPE_ETHERNET=1 ENABLE_ITTI=1 USER_MODE=1 OPENAIR2=1 ENABLE_RAL=1 MIH_C_MEDIEVAL_EXTENSIONS=1 RLC_STOP_ON_LOST_PDU=1 Rel10=1'
trace = logdir + '/log_' + case + test + '.txt;'
tee = ' 2>&1 | tee ' + trace
diag = 'check the compilation errors for ITTI Rel10'
oai.send('make clean;')
oai.send('make cleanall;')
oai.send('make cleanasn1;')
oai.send('rm -f ./oaisim.rel10.itti.ral.'+host)
oai.send_expect_false('make DISABLE_XER_PRINT=1 NAS=1 OAI_NW_DRIVER_TYPE_ETHERNET=1 ENABLE_ITTI=1 USER_MODE=1 OPENAIR2=1 ENABLE_RAL=1 MIH_C_MEDIEVAL_EXTENSIONS=1 RLC_STOP_ON_LOST_PDU=1 Rel10=1 -j4' + tee, makerr1, 1500)
oai.send('cp ./oaisim ./oaisim.rel10.itti.ral.'+host)
except log.err, e:
log.fail(case, test, name, conf, e.value, diag, logfile,trace)
else:
log.ok(case, test, name, conf, '', logfile)
return rv
| gpl-3.0 |
wellesleywatson/INFO3180-lab4-old | lib/flask/testsuite/views.py | 561 | 5068 | # -*- coding: utf-8 -*-
"""
flask.testsuite.views
~~~~~~~~~~~~~~~~~~~~~
Pluggable views.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import flask.views
import unittest
from flask.testsuite import FlaskTestCase
from werkzeug.http import parse_set_header
class ViewTestCase(FlaskTestCase):
def common_test(self, app):
c = app.test_client()
self.assert_equal(c.get('/').data, b'GET')
self.assert_equal(c.post('/').data, b'POST')
self.assert_equal(c.put('/').status_code, 405)
meths = parse_set_header(c.open('/', method='OPTIONS').headers['Allow'])
self.assert_equal(sorted(meths), ['GET', 'HEAD', 'OPTIONS', 'POST'])
def test_basic_view(self):
app = flask.Flask(__name__)
class Index(flask.views.View):
methods = ['GET', 'POST']
def dispatch_request(self):
return flask.request.method
app.add_url_rule('/', view_func=Index.as_view('index'))
self.common_test(app)
def test_method_based_view(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def post(self):
return 'POST'
app.add_url_rule('/', view_func=Index.as_view('index'))
self.common_test(app)
def test_view_patching(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
1 // 0
def post(self):
1 // 0
class Other(Index):
def get(self):
return 'GET'
def post(self):
return 'POST'
view = Index.as_view('index')
view.view_class = Other
app.add_url_rule('/', view_func=view)
self.common_test(app)
def test_view_inheritance(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def post(self):
return 'POST'
class BetterIndex(Index):
def delete(self):
return 'DELETE'
app.add_url_rule('/', view_func=BetterIndex.as_view('index'))
c = app.test_client()
meths = parse_set_header(c.open('/', method='OPTIONS').headers['Allow'])
self.assert_equal(sorted(meths), ['DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST'])
def test_view_decorators(self):
app = flask.Flask(__name__)
def add_x_parachute(f):
def new_function(*args, **kwargs):
resp = flask.make_response(f(*args, **kwargs))
resp.headers['X-Parachute'] = 'awesome'
return resp
return new_function
class Index(flask.views.View):
decorators = [add_x_parachute]
def dispatch_request(self):
return 'Awesome'
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.headers['X-Parachute'], 'awesome')
self.assert_equal(rv.data, b'Awesome')
def test_implicit_head(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return flask.Response('Blub', headers={
'X-Method': flask.request.method
})
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'Blub')
self.assert_equal(rv.headers['X-Method'], 'GET')
rv = c.head('/')
self.assert_equal(rv.data, b'')
self.assert_equal(rv.headers['X-Method'], 'HEAD')
def test_explicit_head(self):
app = flask.Flask(__name__)
class Index(flask.views.MethodView):
def get(self):
return 'GET'
def head(self):
return flask.Response('', headers={'X-Method': 'HEAD'})
app.add_url_rule('/', view_func=Index.as_view('index'))
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'GET')
rv = c.head('/')
self.assert_equal(rv.data, b'')
self.assert_equal(rv.headers['X-Method'], 'HEAD')
def test_endpoint_override(self):
app = flask.Flask(__name__)
app.debug = True
class Index(flask.views.View):
methods = ['GET', 'POST']
def dispatch_request(self):
return flask.request.method
app.add_url_rule('/', view_func=Index.as_view('index'))
with self.assert_raises(AssertionError):
app.add_url_rule('/', view_func=Index.as_view('index'))
# But these tests should still pass. We just log a warning.
self.common_test(app)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ViewTestCase))
return suite
| apache-2.0 |
andyseubert/doorbell | doorBellListener.py | 1 | 1322 | #!/usr/bin/python
from time import sleep
import os
import sys
import subprocess
from subprocess import Popen
import RPi.GPIO as GPIO
# global variables for commands and status
global alertcmd
alertcmd = "/opt/doorbell/ringer.py"
bellButtonPin=26
# board pin numbers are easier for me. If I move to another RPI version though... check the number
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(bellButtonPin, GPIO.IN, pull_up_down = GPIO.PUD_UP) # Front push button
print ("READY")
# 04/05/2017 - removed "event_detect" code and switched to "wait_for_edge"
# in an attempt to remove spurious button presses.
# the code does NOTHING else, so I figured I'd give it a try.
# I didn't realize there was a bouncetime paramater at first
# and adding that definitely stopped the bouncing.
while (True):
GPIO.wait_for_edge(bellButtonPin,GPIO.FALLING,bouncetime=200)
## read the list of hosts listening from a configuration file
with open('/opt/doorbell/listeners.txt') as f:
listeners = f.read().splitlines()
for host in listeners:
print "ringing " + host
subprocess.Popen([sys.executable, alertcmd, host])
#subprocess.Popen([sys.executable,"/opt/doorbell/unlockDoor.py"])
subprocess.Popen([sys.executable,"/opt/doorbell/sendsms.py","DingDong"])
GPIO.cleanup()
| unlicense |
BruceDLong/CodeDog | Docs/source/conf.py | 1 | 5796 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CodeDog documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 1 11:27:30 2020.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'CodeDog'
copyright = '2020, Bruce Long'
author = 'Bruce Long'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'p-main_theme'
from PSphinxTheme import utils
p, html_theme, needs_sphinx = utils.set_psphinxtheme(html_theme)
html_theme_path = p
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'codebgcolor': '#f2f5f2',
'codeblockfont': 'monospace'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'globaltoc.html',
#'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'CodeDogdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CodeDog.tex', 'CodeDog Documentation',
'Bruce Long', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'codedog', 'CodeDog Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CodeDog', 'CodeDog Documentation',
author, 'CodeDog', 'A Helpful Programming Language',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| gpl-2.0 |
mitya57/debian-buildbot | buildbot/status/web/hooks/base.py | 3 | 3038 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
# code inspired/copied from contrib/github_buildbot
# and inspired from code from the Chromium project
# otherwise, Andrew Melo <andrew.melo@gmail.com> wrote the rest
# but "the rest" is pretty minimal
from buildbot.util import json
def getChanges(request, options=None):
"""
Consumes a naive build notification (the default for now)
basically, set POST variables to match commit object parameters:
revision, revlink, comments, branch, who, files, links
files, links and properties will be de-json'd, the rest are interpreted as strings
"""
def firstOrNothing(value):
"""
Small helper function to return the first value (if value is a list)
or return the whole thing otherwise
"""
if (isinstance(value, type([]))):
return value[0]
else:
return value
args = request.args
# first, convert files, links and properties
files = None
if args.get('files'):
files = json.loads(args.get('files')[0])
else:
files = []
properties = None
if args.get('properties'):
properties = json.loads(args.get('properties')[0])
else:
properties = {}
revision = firstOrNothing(args.get('revision'))
when = firstOrNothing(args.get('when'))
if when is not None:
when = float(when)
author = firstOrNothing(args.get('author'))
if not author:
author = firstOrNothing(args.get('who'))
comments = firstOrNothing(args.get('comments'))
isdir = firstOrNothing(args.get('isdir', 0))
branch = firstOrNothing(args.get('branch'))
category = firstOrNothing(args.get('category'))
revlink = firstOrNothing(args.get('revlink'))
repository = firstOrNothing(args.get('repository'))
project = firstOrNothing(args.get('project'))
chdict = dict(author=author, files=files, comments=comments,
isdir=isdir, revision=revision, when=when,
branch=branch, category=category, revlink=revlink,
properties=properties, repository=repository,
project=project)
return ([chdict], None)
| gpl-2.0 |
rdkit/rdkit-orig | rdkit/Chem/SATIS.py | 2 | 3432 | # $Id$
#
# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Functionality for SATIS typing atoms
"""
from rdkit import Chem
_debug = 0
#
# These are SMARTS patterns for the special cases used in
# SATIS typing.
#
aldehydePatt = Chem.MolFromSmarts('[CD2]=[OD1]')
ketonePatt = Chem.MolFromSmarts('[CD3]=[OD1]')
amidePatt = Chem.MolFromSmarts('[CD3](=[OD1])-[#7]')
esterPatt = Chem.MolFromSmarts('C(=[OD1])-O-[#6]')
carboxylatePatt = Chem.MolFromSmarts('C(=[OD1])-[OX1]')
carboxylPatt = Chem.MolFromSmarts('C(=[OD1])-[OX2]')
specialCases = ((carboxylatePatt,97),
(esterPatt,96),
(carboxylPatt,98),
(amidePatt,95),
(ketonePatt,94),
(aldehydePatt,93))
def SATISTypes(mol,neighborsToInclude=4):
""" returns SATIS codes for all atoms in a molecule
The SATIS definition used is from:
J. Chem. Inf. Comput. Sci. _39_ 751-757 (1999)
each SATIS code is a string consisting of _neighborsToInclude_ + 1
2 digit numbers
**Arguments**
- mol: a molecule
- neighborsToInclude (optional): the number of neighbors to include
in the SATIS codes
**Returns**
a list of strings nAtoms long
"""
global specialCases
nAtoms = mol.GetNumAtoms()
atomicNums = [0]*nAtoms
atoms = mol.GetAtoms()
for i in xrange(nAtoms):
atomicNums[i] = atoms[i].GetAtomicNum()
nSpecialCases = len(specialCases)
specialCaseMatches = [None]*nSpecialCases
for i,(patt,idx) in enumerate(specialCases):
if mol.HasSubstructMatch(patt):
specialCaseMatches[i] = mol.GetSubstructMatches(patt)
else:
specialCaseMatches[i] = ()
codes = [None]*nAtoms
for i in range(nAtoms):
code = [99]*(neighborsToInclude+1)
atom = atoms[i]
atomIdx = atom.GetIdx()
code[0] = min(atom.GetAtomicNum(),99)
bonds = atom.GetBonds()
nBonds = len(bonds)
otherIndices = [-1]*nBonds
if _debug: print code[0],
for j in range(nBonds):
otherIndices[j] = bonds[j].GetOtherAtom(atom).GetIdx()
if _debug: print otherIndices[j],
if _debug: print
otherNums = [atomicNums[x] for x in otherIndices] + \
[1]*atom.GetTotalNumHs()
otherNums.sort()
nOthers = len(otherNums)
if nOthers > neighborsToInclude:
otherNums.reverse()
otherNums = otherNums[:neighborsToInclude]
otherNums.reverse()
for j in range(neighborsToInclude):
code[j+1] = min(otherNums[j],99)
else:
for j in range(nOthers):
code[j+1] = min(otherNums[j],99)
if nOthers < neighborsToInclude and code[0] in [6,8]:
found = 0
for j in range(nSpecialCases):
for matchTuple in specialCaseMatches[j]:
if atomIdx in matchTuple:
code[-1] = specialCases[j][1]
found = 1
break
if found:
break
codes[i] = ''.join(['%02d'%(x) for x in code])
return codes
if __name__ == '__main__':
smis = ['CC(=O)NC','CP(F)(Cl)(Br)(O)',
'O=CC(=O)C','C(=O)OCC(=O)O','C(=O)[O-]']
for smi in smis:
print smi
m = Chem.MolFromSmiles(smi)
codes = SATISTypes(m)
print codes
| bsd-3-clause |
caveman-dick/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_organization.py | 34 | 3051 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_organization
version_added: "2.3"
author: "Wayne Witzel III (@wwitzel3)"
short_description: create, update, or destroy Ansible Tower organizations
description:
- Create, update, or destroy Ansible Tower organizations. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the organization.
required: True
description:
description:
- The description to use for the organization.
required: False
default: null
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Create tower organization
tower_organization:
name: "Foo"
description: "Foo bar organization"
state: present
tower_config_file: "~/tower_cli.cfg"
'''
from ansible.module_utils.ansible_tower import tower_argument_spec, tower_auth_config, tower_check_mode, HAS_TOWER_CLI
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = tower_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
description=dict(),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
description = module.params.get('description')
state = module.params.get('state')
json_output = {'organization': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
organization = tower_cli.get_resource('organization')
try:
if state == 'present':
result = organization.modify(name=name, description=description, create_on_missing=True)
json_output['id'] = result['id']
elif state == 'absent':
result = organization.delete(name=name)
except (exc.ConnectionError, exc.BadRequest) as excinfo:
module.fail_json(msg='Failed to update the organization: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
grischa/django-tastypie | tests/basic/tests/resources.py | 20 | 6352 | from django.contrib.auth.models import User
from django.http import HttpRequest
from django.test import TestCase
from tastypie.bundle import Bundle
from tastypie.fields import ToOneField, ToManyField
from tastypie.resources import ModelResource
from basic.api.resources import SlugBasedNoteResource
from basic.models import Note, AnnotatedNote, SlugBasedNote
class InvalidLazyUserResource(ModelResource):
notes = ToManyField('basic.api.resources.FooResource', 'notes')
class Meta:
queryset = User.objects.all()
class NoPathLazyUserResource(ModelResource):
notes = ToManyField('FooResource', 'notes')
class Meta:
queryset = User.objects.all()
class LazyUserResource(ModelResource):
notes = ToManyField('basic.tests.resources.NoteResource', 'notes')
class Meta:
queryset = User.objects.all()
api_name = 'foo'
class NoteResource(ModelResource):
class Meta:
queryset = Note.objects.all()
class AnnotatedNoteResource(ModelResource):
class Meta:
queryset = AnnotatedNote.objects.all()
class NoteWithAnnotationsResource(ModelResource):
annotated = ToOneField(AnnotatedNoteResource, 'annotated', null=True)
class Meta:
queryset = Note.objects.all()
class NoteModelResourceTestCase(TestCase):
def test_init(self):
resource_1 = NoteResource()
self.assertEqual(len(resource_1.fields), 8)
self.assertNotEqual(resource_1._meta.queryset, None)
self.assertEqual(resource_1._meta.resource_name, 'note')
# TextFields should have ``default=''`` to match Django's behavior,
# even though that's not what is on the field proper.
self.assertEqual(resource_1.fields['content'].default, '')
def test_lazy_relations(self):
ilur = InvalidLazyUserResource()
nplur = NoPathLazyUserResource()
lur = LazyUserResource()
self.assertEqual(ilur.notes.to, 'basic.api.resources.FooResource')
self.assertEqual(nplur.notes.to, 'FooResource')
self.assertEqual(lur.notes.to, 'basic.tests.resources.NoteResource')
try:
ilur.notes.to_class()
self.fail("to_class on InvalidLazyUserResource should fail!")
except ImportError:
pass
try:
nplur.notes.to_class()
self.fail("to_class on NoPathLazyUserResource should fail!")
except ImportError:
pass
to_class = lur.notes.to_class()
self.assertTrue(isinstance(to_class, NoteResource))
# This is important, as without passing on the ``api_name``, URL
# reversals will fail. Fakes the instance as ``None``, since for
# testing purposes, we don't care.
related = lur.notes.get_related_resource(None)
self.assertEqual(related._meta.api_name, 'foo')
class AnnotatedNoteModelResourceTestCase(TestCase):
def test_one_to_one_regression(self):
# Make sure bits don't completely blow up if the related model
# is gone.
n1 = Note.objects.get(pk=1)
resource_1 = NoteWithAnnotationsResource()
n1_bundle = resource_1.build_bundle(obj=n1)
dehydrated = resource_1.full_dehydrate(n1_bundle)
class DetailURIKwargsResourceTestCase(TestCase):
def test_correct_detail_uri_model(self):
n1 = Note.objects.get(pk=1)
resource = NoteWithAnnotationsResource()
self.assertEqual(resource.detail_uri_kwargs(n1), {
'pk': 1,
})
def test_correct_detail_uri_bundle(self):
n1 = Note.objects.get(pk=1)
resource = NoteWithAnnotationsResource()
n1_bundle = resource.build_bundle(obj=n1)
self.assertEqual(resource.detail_uri_kwargs(n1_bundle), {
'pk': 1,
})
def test_correct_slug_detail_uri_model(self):
n1 = SlugBasedNote.objects.get(pk='first-post')
resource = SlugBasedNoteResource()
self.assertEqual(resource.detail_uri_kwargs(n1), {
'slug': 'first-post',
})
def test_correct_slug_detail_uri_bundle(self):
n1 = SlugBasedNote.objects.get(pk='first-post')
resource = SlugBasedNoteResource()
n1_bundle = resource.build_bundle(obj=n1)
self.assertEqual(resource.detail_uri_kwargs(n1_bundle), {
'slug': 'first-post',
})
class SlugBasedResourceTestCase(TestCase):
def setUp(self):
super(SlugBasedResourceTestCase, self).setUp()
self.n1 = SlugBasedNote.objects.get(pk='first-post')
self.request = HttpRequest()
self.request.method = 'PUT'
self.resource = SlugBasedNoteResource()
self.n1_bundle = self.resource.build_bundle(obj=self.n1)
def test_bundle_unique_field(self):
self.assertEqual(self.resource.get_bundle_detail_data(self.n1_bundle), u'first-post')
def test_obj_update(self):
bundle = self.resource.build_bundle(obj=self.n1, data={
'title': 'Foo!',
})
updated_bundle = self.resource.obj_update(bundle, slug='first-post')
self.assertEqual(updated_bundle.obj.slug, 'first-post')
self.assertEqual(updated_bundle.obj.title, 'Foo!')
# Again, without the PK this time.
self.n1.slug = None
bundle = self.resource.build_bundle(obj=self.n1, data={
'title': 'Bar!',
})
updated_bundle_2 = self.resource.obj_update(bundle, slug='first-post')
self.assertEqual(updated_bundle_2.obj.slug, 'first-post')
self.assertEqual(updated_bundle_2.obj.title, 'Bar!')
def test_update_in_place(self):
new_data = {
'slug': u'foo',
'title': u'Foo!',
}
new_bundle = self.resource.update_in_place(self.request, self.n1_bundle, new_data)
# Check for updated data.
self.assertEqual(new_bundle.obj.title, u'Foo!')
self.assertEqual(new_bundle.obj.slug, u'foo')
# Make sure it looked up the right instance, even though we didn't
# hand it a PK...
self.assertEqual(new_bundle.obj.pk, self.n1_bundle.obj.pk)
def test_rollback(self):
bundles = [
self.n1_bundle
]
self.resource.rollback(bundles)
# Make sure it's gone.
self.assertRaises(SlugBasedNote.DoesNotExist, SlugBasedNote.objects.get, pk='first-post')
| bsd-3-clause |
daniel-fanjul-alcuten/ice | lib/gmock-1.6.0/gtest/test/gtest_break_on_failure_unittest.py | 1050 | 7214 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-3.0 |
nathanshartmann/portuguese_word_embeddings | sentence_similarity.py | 1 | 3369 |
"""
This script evaluates a embedding model in a semantic similarity perspective.
It uses the dataset of ASSIN sentence similarity shared task and the method
of Hartmann which achieved the best results in the competition.
ASSIN shared-task website:
http://propor2016.di.fc.ul.pt/?page_id=381
Paper of Hartmann can be found at:
http://www.linguamatica.com/index.php/linguamatica/article/download/v8n2-6/365
"""
from sklearn.linear_model import LinearRegression
from sentence_similarity.utils.assin_eval import read_xml, eval_similarity
from gensim.models import KeyedVectors
from xml.dom import minidom
from numpy import array
from os import path
import pickle
import argparse
DATA_DIR = 'sentence_similarity/data/'
TEST_DIR = path.join(DATA_DIR, 'assin-test-gold/')
def gensim_embedding_difference(data, field1, field2):
"""Calculate the similarity between the sum of all embeddings."""
distances = []
for pair in data:
e1 = [i if i in embeddings else 'unk' for i in pair[field1]]
e2 = [i if i in embeddings else 'unk' for i in pair[field2]]
distances.append([embeddings.n_similarity(e1, e2)])
return distances
def evaluate_testset(x, y, test):
"""Docstring."""
l_reg = LinearRegression()
l_reg.fit(x, y)
test_predict = l_reg.predict(test)
return test_predict
def write_xml(filename, pred):
"""Docstring."""
with open(filename) as fp:
xml = minidom.parse(fp)
pairs = xml.getElementsByTagName('pair')
for pair in pairs:
pair.setAttribute('similarity', str(pred[pairs.index(pair)]))
with open(filename, 'w') as fp:
fp.write(xml.toxml())
if __name__ == '__main__':
# Parser descriptors
parser = argparse.ArgumentParser(
description='''Sentence similarity evaluation for word embeddings in
brazilian and european variants of Portuguese language. It is expected
a word embedding model in text format.''')
parser.add_argument('embedding',
type=str,
help='embedding model')
parser.add_argument('lang',
choices=['br', 'eu'],
help='{br, eu} choose PT-BR or PT-EU testset')
args = parser.parse_args()
lang = args.lang
emb = args.embedding
# Loading embedding model
embeddings = KeyedVectors.load_word2vec_format(emb,
binary=False,
unicode_errors="ignore")
# Loading evaluation data and parsing it
with open('%sassin-pt%s-train.pkl' % (DATA_DIR, lang), 'rb') as fp:
data = pickle.load(fp)
with open('%sassin-pt%s-test-gold.pkl' % (DATA_DIR, lang), 'rb') as fp:
test = pickle.load(fp)
# Getting features
features = gensim_embedding_difference(data, 'tokens_t1', 'tokens_t2')
features_test = gensim_embedding_difference(test, 'tokens_t1', 'tokens_t2')
# Predicting similarities
results = array([float(i['result']) for i in data])
results_test = evaluate_testset(features, results, features_test)
write_xml('%soutput.xml' % DATA_DIR, results_test)
# Evaluating
pairs_gold = read_xml('%sassin-pt%s-test.xml' % (TEST_DIR, lang), True)
pairs_sys = read_xml('%soutput.xml' % DATA_DIR, True)
eval_similarity(pairs_gold, pairs_sys)
| gpl-3.0 |
pawaranand/phr-frappe | frappe/app.py | 30 | 4041 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import sys, os
import json
import logging
from werkzeug.wrappers import Request, Response
from werkzeug.local import LocalManager
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.contrib.profiler import ProfilerMiddleware
from werkzeug.wsgi import SharedDataMiddleware
import mimetypes
import frappe
import frappe.handler
import frappe.auth
import frappe.api
import frappe.utils.response
import frappe.website.render
from frappe.utils import get_site_name
from frappe.middlewares import StaticDataMiddleware
local_manager = LocalManager([frappe.local])
_site = None
_sites_path = os.environ.get("SITES_PATH", ".")
logger = frappe.get_logger()
@Request.application
def application(request):
frappe.local.request = request
frappe.local.is_ajax = frappe.get_request_header("X-Requested-With")=="XMLHttpRequest"
response = None
try:
rollback = True
init_site(request)
if frappe.local.conf.get('maintenance_mode'):
raise frappe.SessionStopped
make_form_dict(request)
frappe.local.http_request = frappe.auth.HTTPRequest()
if frappe.local.form_dict.cmd:
response = frappe.handler.handle()
elif frappe.request.path.startswith("/api/"):
response = frappe.api.handle()
elif frappe.request.path.startswith('/backups'):
response = frappe.utils.response.download_backup(request.path)
elif frappe.local.request.method in ('GET', 'HEAD'):
response = frappe.website.render.render(request.path)
else:
raise NotFound
except HTTPException, e:
return e
except frappe.SessionStopped, e:
response = frappe.utils.response.handle_session_stopped()
except Exception, e:
http_status_code = getattr(e, "http_status_code", 500)
if frappe.local.is_ajax:
response = frappe.utils.response.report_error(http_status_code)
else:
frappe.respond_as_web_page("Server Error",
"<pre>"+frappe.get_traceback()+"</pre>",
http_status_code=http_status_code)
response = frappe.website.render.render("message", http_status_code=http_status_code)
if e.__class__ == frappe.AuthenticationError:
if hasattr(frappe.local, "login_manager"):
frappe.local.login_manager.clear_cookies()
if http_status_code==500:
logger.error('Request Error')
else:
if frappe.local.request.method in ("POST", "PUT") and frappe.db:
frappe.db.commit()
rollback = False
# update session
if getattr(frappe.local, "session_obj", None):
updated_in_db = frappe.local.session_obj.update()
if updated_in_db:
frappe.db.commit()
finally:
if frappe.local.request.method in ("POST", "PUT") and frappe.db and rollback:
frappe.db.rollback()
# set cookies
if response and hasattr(frappe.local, 'cookie_manager'):
frappe.local.cookie_manager.flush_cookies(response=response)
frappe.destroy()
return response
def init_site(request):
site = _site or request.headers.get('X-Frappe-Site-Name') or get_site_name(request.host)
frappe.init(site=site, sites_path=_sites_path)
if not (frappe.local.conf and frappe.local.conf.db_name):
# site does not exist
raise NotFound
def make_form_dict(request):
frappe.local.form_dict = frappe._dict({ k:v[0] if isinstance(v, (list, tuple)) else v \
for k, v in (request.form or request.args).iteritems() })
application = local_manager.make_middleware(application)
def serve(port=8000, profile=False, site=None, sites_path='.'):
global application, _site, _sites_path
_site = site
_sites_path = sites_path
from werkzeug.serving import run_simple
if profile:
application = ProfilerMiddleware(application, sort_by=('tottime', 'calls'))
if not os.environ.get('NO_STATICS'):
application = SharedDataMiddleware(application, {
'/assets': os.path.join(sites_path, 'assets'),
})
application = StaticDataMiddleware(application, {
'/files': os.path.abspath(sites_path)
})
run_simple('0.0.0.0', int(port), application, use_reloader=True,
use_debugger=True, use_evalex=True, threaded=True)
| mit |
sankhesh/VTK | Examples/Annotation/Python/annotatePick.py | 9 | 2637 | #!/usr/bin/env python
# This example demonstrates cell picking using vtkCellPicker. It
# displays the results of picking using a vtkTextMapper.
from __future__ import print_function
import vtk
# create a sphere source, mapper, and actor
sphere = vtk.vtkSphereSource()
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInputConnection(sphere.GetOutputPort())
sphereMapper.GlobalImmediateModeRenderingOn()
sphereActor = vtk.vtkLODActor()
sphereActor.SetMapper(sphereMapper)
# create the spikes by glyphing the sphere with a cone. Create the
# mapper and actor for the glyphs.
cone = vtk.vtkConeSource()
glyph = vtk.vtkGlyph3D()
glyph.SetInputConnection(sphere.GetOutputPort())
glyph.SetSourceConnection(cone.GetOutputPort())
glyph.SetVectorModeToUseNormal()
glyph.SetScaleModeToScaleByVector()
glyph.SetScaleFactor(0.25)
spikeMapper = vtk.vtkPolyDataMapper()
spikeMapper.SetInputConnection(glyph.GetOutputPort())
spikeActor = vtk.vtkLODActor()
spikeActor.SetMapper(spikeMapper)
# Create a text mapper and actor to display the results of picking.
textMapper = vtk.vtkTextMapper()
tprop = textMapper.GetTextProperty()
tprop.SetFontFamilyToArial()
tprop.SetFontSize(10)
tprop.BoldOn()
tprop.ShadowOn()
tprop.SetColor(1, 0, 0)
textActor = vtk.vtkActor2D()
textActor.VisibilityOff()
textActor.SetMapper(textMapper)
# Create a cell picker.
picker = vtk.vtkCellPicker()
# Create a Python function to create the text for the text mapper used
# to display the results of picking.
def annotatePick(object, event):
print("pick")
global picker, textActor, textMapper
if picker.GetCellId() < 0:
textActor.VisibilityOff()
else:
selPt = picker.GetSelectionPoint()
pickPos = picker.GetPickPosition()
textMapper.SetInput("(%.6f, %.6f, %.6f)"%pickPos)
textActor.SetPosition(selPt[:2])
textActor.VisibilityOn()
# Now at the end of the pick event call the above function.
picker.AddObserver("EndPickEvent", annotatePick)
# Create the Renderer, RenderWindow, etc. and set the Picker.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.SetPicker(picker)
# Add the actors to the renderer, set the background and size
ren.AddActor2D(textActor)
ren.AddActor(sphereActor)
ren.AddActor(spikeActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(300, 300)
# Get the camera and zoom in closer to the image.
ren.ResetCamera()
cam1 = ren.GetActiveCamera()
cam1.Zoom(1.4)
iren.Initialize()
# Initially pick the cell at this location.
picker.Pick(85, 126, 0, ren)
renWin.Render()
iren.Start()
| bsd-3-clause |
aholkner/bacon | bacon/resource.py | 1 | 1151 | import os
import sys
from bacon import native
#: Path to resources. Set to the script directory by default during development, and the executable
#: directory for frozen applications.
resource_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
if native._mock_native:
resource_dir = ''
# Or use frozen executable path
if hasattr(sys, 'frozen'):
if sys.frozen:
resource_dir = os.path.dirname(sys.executable)
# In PyInstaller --onefile mode, use sys._MEIPASS temporary
# directory to find local files if they are not found in default resource
# directory
_dll_dir = None
if hasattr(sys, '_MEIPASS'):
_dll_dir = sys._MEIPASS
def get_resource_path(filename):
'''Get a path to the given filename to load as a resource. All non-absolute filenames passed to
:class:`Image`, :class:`Font`, :class:`Sound`, etc are transformed through this function.
:param str filename: a relative path to a resource file
:return str: an absolute path to the file
'''
path = os.path.join(resource_dir, filename)
if _dll_dir and not os.path.exists(path):
path = os.path.join(_dll_dir, filename)
return path | mit |
kmonsoor/fabric | fabric/main.py | 27 | 25730 | """
This module contains Fab's `main` method plus related subroutines.
`main` is executed as the command line ``fab`` program and takes care of
parsing options and commands, loading the user settings file, loading a
fabfile, and executing the commands given.
The other callables defined in this module are internal only. Anything useful
to individuals leveraging Fabric as a library, should be kept elsewhere.
"""
import getpass
from operator import isMappingType
from optparse import OptionParser
import os
import sys
import types
# For checking callables against the API, & easy mocking
from fabric import api, state, colors
from fabric.contrib import console, files, project
from fabric.network import disconnect_all, ssh
from fabric.state import env_options
from fabric.tasks import Task, execute, get_task_details
from fabric.task_utils import _Dict, crawl
from fabric.utils import abort, indent, warn, _pty_size
# One-time calculation of "all internal callables" to avoid doing this on every
# check of a given fabfile callable (in is_classic_task()).
_modules = [api, project, files, console, colors]
_internals = reduce(lambda x, y: x + filter(callable, vars(y).values()),
_modules,
[]
)
# Module recursion cache
class _ModuleCache(object):
"""
Set-like object operating on modules and storing __name__s internally.
"""
def __init__(self):
self.cache = set()
def __contains__(self, value):
return value.__name__ in self.cache
def add(self, value):
return self.cache.add(value.__name__)
def clear(self):
return self.cache.clear()
_seen = _ModuleCache()
def load_settings(path):
"""
Take given file path and return dictionary of any key=value pairs found.
Usage docs are in sites/docs/usage/fab.rst, in "Settings files."
"""
if os.path.exists(path):
comments = lambda s: s and not s.startswith("#")
settings = filter(comments, open(path, 'r'))
return dict((k.strip(), v.strip()) for k, _, v in
[s.partition('=') for s in settings])
# Handle nonexistent or empty settings file
return {}
def _is_package(path):
"""
Is the given path a Python package?
"""
return (
os.path.isdir(path)
and os.path.exists(os.path.join(path, '__init__.py'))
)
def find_fabfile(names=None):
"""
Attempt to locate a fabfile, either explicitly or by searching parent dirs.
Usage docs are in sites/docs/usage/fabfiles.rst, in "Fabfile discovery."
"""
# Obtain env value if not given specifically
if names is None:
names = [state.env.fabfile]
# Create .py version if necessary
if not names[0].endswith('.py'):
names += [names[0] + '.py']
# Does the name contain path elements?
if os.path.dirname(names[0]):
# If so, expand home-directory markers and test for existence
for name in names:
expanded = os.path.expanduser(name)
if os.path.exists(expanded):
if name.endswith('.py') or _is_package(expanded):
return os.path.abspath(expanded)
else:
# Otherwise, start in cwd and work downwards towards filesystem root
path = '.'
# Stop before falling off root of filesystem (should be platform
# agnostic)
while os.path.split(os.path.abspath(path))[1]:
for name in names:
joined = os.path.join(path, name)
if os.path.exists(joined):
if name.endswith('.py') or _is_package(joined):
return os.path.abspath(joined)
path = os.path.join('..', path)
# Implicit 'return None' if nothing was found
def is_classic_task(tup):
"""
Takes (name, object) tuple, returns True if it's a non-Fab public callable.
"""
name, func = tup
try:
is_classic = (
callable(func)
and (func not in _internals)
and not name.startswith('_')
)
# Handle poorly behaved __eq__ implementations
except (ValueError, TypeError):
is_classic = False
return is_classic
def load_fabfile(path, importer=None):
"""
Import given fabfile path and return (docstring, callables).
Specifically, the fabfile's ``__doc__`` attribute (a string) and a
dictionary of ``{'name': callable}`` containing all callables which pass
the "is a Fabric task" test.
"""
if importer is None:
importer = __import__
# Get directory and fabfile name
directory, fabfile = os.path.split(path)
# If the directory isn't in the PYTHONPATH, add it so our import will work
added_to_path = False
index = None
if directory not in sys.path:
sys.path.insert(0, directory)
added_to_path = True
# If the directory IS in the PYTHONPATH, move it to the front temporarily,
# otherwise other fabfiles -- like Fabric's own -- may scoop the intended
# one.
else:
i = sys.path.index(directory)
if i != 0:
# Store index for later restoration
index = i
# Add to front, then remove from original position
sys.path.insert(0, directory)
del sys.path[i + 1]
# Perform the import (trimming off the .py)
imported = importer(os.path.splitext(fabfile)[0])
# Remove directory from path if we added it ourselves (just to be neat)
if added_to_path:
del sys.path[0]
# Put back in original index if we moved it
if index is not None:
sys.path.insert(index + 1, directory)
del sys.path[0]
# Actually load tasks
docstring, new_style, classic, default = load_tasks_from_module(imported)
tasks = new_style if state.env.new_style_tasks else classic
# Clean up after ourselves
_seen.clear()
return docstring, tasks, default
def load_tasks_from_module(imported):
"""
Handles loading all of the tasks for a given `imported` module
"""
# Obey the use of <module>.__all__ if it is present
imported_vars = vars(imported)
if "__all__" in imported_vars:
imported_vars = [(name, imported_vars[name]) for name in \
imported_vars if name in imported_vars["__all__"]]
else:
imported_vars = imported_vars.items()
# Return a two-tuple value. First is the documentation, second is a
# dictionary of callables only (and don't include Fab operations or
# underscored callables)
new_style, classic, default = extract_tasks(imported_vars)
return imported.__doc__, new_style, classic, default
def extract_tasks(imported_vars):
"""
Handle extracting tasks from a given list of variables
"""
new_style_tasks = _Dict()
classic_tasks = {}
default_task = None
if 'new_style_tasks' not in state.env:
state.env.new_style_tasks = False
for tup in imported_vars:
name, obj = tup
if is_task_object(obj):
state.env.new_style_tasks = True
# Use instance.name if defined
if obj.name and obj.name != 'undefined':
new_style_tasks[obj.name] = obj
else:
obj.name = name
new_style_tasks[name] = obj
# Handle aliasing
if obj.aliases is not None:
for alias in obj.aliases:
new_style_tasks[alias] = obj
# Handle defaults
if obj.is_default:
default_task = obj
elif is_classic_task(tup):
classic_tasks[name] = obj
elif is_task_module(obj):
docs, newstyle, classic, default = load_tasks_from_module(obj)
for task_name, task in newstyle.items():
if name not in new_style_tasks:
new_style_tasks[name] = _Dict()
new_style_tasks[name][task_name] = task
if default is not None:
new_style_tasks[name].default = default
return new_style_tasks, classic_tasks, default_task
def is_task_module(a):
"""
Determine if the provided value is a task module
"""
#return (type(a) is types.ModuleType and
# any(map(is_task_object, vars(a).values())))
if isinstance(a, types.ModuleType) and a not in _seen:
# Flag module as seen
_seen.add(a)
# Signal that we need to check it out
return True
def is_task_object(a):
"""
Determine if the provided value is a ``Task`` object.
This returning True signals that all tasks within the fabfile
module must be Task objects.
"""
return isinstance(a, Task) and a.use_task_objects
def parse_options():
"""
Handle command-line options with optparse.OptionParser.
Return list of arguments, largely for use in `parse_arguments`.
"""
#
# Initialize
#
parser = OptionParser(
usage=("fab [options] <command>"
"[:arg1,arg2=val2,host=foo,hosts='h1;h2',...] ..."))
#
# Define options that don't become `env` vars (typically ones which cause
# Fabric to do something other than its normal execution, such as
# --version)
#
# Display info about a specific command
parser.add_option('-d', '--display',
metavar='NAME',
help="print detailed info about command NAME"
)
# Control behavior of --list
LIST_FORMAT_OPTIONS = ('short', 'normal', 'nested')
parser.add_option('-F', '--list-format',
choices=LIST_FORMAT_OPTIONS,
default='normal',
metavar='FORMAT',
help="formats --list, choices: %s" % ", ".join(LIST_FORMAT_OPTIONS)
)
parser.add_option('-I', '--initial-password-prompt',
action='store_true',
default=False,
help="Force password prompt up-front"
)
# List Fab commands found in loaded fabfiles/source files
parser.add_option('-l', '--list',
action='store_true',
dest='list_commands',
default=False,
help="print list of possible commands and exit"
)
# Allow setting of arbitrary env vars at runtime.
parser.add_option('--set',
metavar="KEY=VALUE,...",
dest='env_settings',
default="",
help="comma separated KEY=VALUE pairs to set Fab env vars"
)
# Like --list, but text processing friendly
parser.add_option('--shortlist',
action='store_true',
dest='shortlist',
default=False,
help="alias for -F short --list"
)
# Version number (optparse gives you --version but we have to do it
# ourselves to get -V too. sigh)
parser.add_option('-V', '--version',
action='store_true',
dest='show_version',
default=False,
help="show program's version number and exit"
)
#
# Add in options which are also destined to show up as `env` vars.
#
for option in env_options:
parser.add_option(option)
#
# Finalize
#
# Return three-tuple of parser + the output from parse_args (opt obj, args)
opts, args = parser.parse_args()
return parser, opts, args
def _is_task(name, value):
"""
Is the object a task as opposed to e.g. a dict or int?
"""
return is_classic_task((name, value)) or is_task_object(value)
def _sift_tasks(mapping):
tasks, collections = [], []
for name, value in mapping.iteritems():
if _is_task(name, value):
tasks.append(name)
elif isMappingType(value):
collections.append(name)
tasks = sorted(tasks)
collections = sorted(collections)
return tasks, collections
def _task_names(mapping):
"""
Flatten & sort task names in a breadth-first fashion.
Tasks are always listed before submodules at the same level, but within
those two groups, sorting is alphabetical.
"""
tasks, collections = _sift_tasks(mapping)
for collection in collections:
module = mapping[collection]
if hasattr(module, 'default'):
tasks.append(collection)
join = lambda x: ".".join((collection, x))
tasks.extend(map(join, _task_names(module)))
return tasks
def _print_docstring(docstrings, name):
if not docstrings:
return False
docstring = crawl(name, state.commands).__doc__
if isinstance(docstring, basestring):
return docstring
def _normal_list(docstrings=True):
result = []
task_names = _task_names(state.commands)
# Want separator between name, description to be straight col
max_len = reduce(lambda a, b: max(a, len(b)), task_names, 0)
sep = ' '
trail = '...'
max_width = _pty_size()[1] - 1 - len(trail)
for name in task_names:
output = None
docstring = _print_docstring(docstrings, name)
if docstring:
lines = filter(None, docstring.splitlines())
first_line = lines[0].strip()
# Truncate it if it's longer than N chars
size = max_width - (max_len + len(sep) + len(trail))
if len(first_line) > size:
first_line = first_line[:size] + trail
output = name.ljust(max_len) + sep + first_line
# Or nothing (so just the name)
else:
output = name
result.append(indent(output))
return result
def _nested_list(mapping, level=1):
result = []
tasks, collections = _sift_tasks(mapping)
# Tasks come first
result.extend(map(lambda x: indent(x, spaces=level * 4), tasks))
for collection in collections:
module = mapping[collection]
# Section/module "header"
result.append(indent(collection + ":", spaces=level * 4))
# Recurse
result.extend(_nested_list(module, level + 1))
return result
COMMANDS_HEADER = "Available commands"
NESTED_REMINDER = " (remember to call as module.[...].task)"
def list_commands(docstring, format_):
"""
Print all found commands/tasks, then exit. Invoked with ``-l/--list.``
If ``docstring`` is non-empty, it will be printed before the task list.
``format_`` should conform to the options specified in
``LIST_FORMAT_OPTIONS``, e.g. ``"short"``, ``"normal"``.
"""
# Short-circuit with simple short output
if format_ == "short":
return _task_names(state.commands)
# Otherwise, handle more verbose modes
result = []
# Docstring at top, if applicable
if docstring:
trailer = "\n" if not docstring.endswith("\n") else ""
result.append(docstring + trailer)
header = COMMANDS_HEADER
if format_ == "nested":
header += NESTED_REMINDER
result.append(header + ":\n")
c = _normal_list() if format_ == "normal" else _nested_list(state.commands)
result.extend(c)
return result
def display_command(name):
"""
Print command function's docstring, then exit. Invoked with -d/--display.
"""
# Sanity check
command = crawl(name, state.commands)
if command is None:
msg = "Task '%s' does not appear to exist. Valid task names:\n%s"
abort(msg % (name, "\n".join(_normal_list(False))))
# Print out nicely presented docstring if found
if hasattr(command, '__details__'):
task_details = command.__details__()
else:
task_details = get_task_details(command)
if task_details:
print("Displaying detailed information for task '%s':" % name)
print('')
print(indent(task_details, strip=True))
print('')
# Or print notice if not
else:
print("No detailed information available for task '%s':" % name)
sys.exit(0)
def _escape_split(sep, argstr):
"""
Allows for escaping of the separator: e.g. task:arg='foo\, bar'
It should be noted that the way bash et. al. do command line parsing, those
single quotes are required.
"""
escaped_sep = r'\%s' % sep
if escaped_sep not in argstr:
return argstr.split(sep)
before, _, after = argstr.partition(escaped_sep)
startlist = before.split(sep) # a regular split is fine here
unfinished = startlist[-1]
startlist = startlist[:-1]
# recurse because there may be more escaped separators
endlist = _escape_split(sep, after)
# finish building the escaped value. we use endlist[0] becaue the first
# part of the string sent in recursion is the rest of the escaped value.
unfinished += sep + endlist[0]
return startlist + [unfinished] + endlist[1:] # put together all the parts
def parse_arguments(arguments):
"""
Parse string list into list of tuples: command, args, kwargs, hosts, roles.
See sites/docs/usage/fab.rst, section on "per-task arguments" for details.
"""
cmds = []
for cmd in arguments:
args = []
kwargs = {}
hosts = []
roles = []
exclude_hosts = []
if ':' in cmd:
cmd, argstr = cmd.split(':', 1)
for pair in _escape_split(',', argstr):
result = _escape_split('=', pair)
if len(result) > 1:
k, v = result
# Catch, interpret host/hosts/role/roles/exclude_hosts
# kwargs
if k in ['host', 'hosts', 'role', 'roles', 'exclude_hosts']:
if k == 'host':
hosts = [v.strip()]
elif k == 'hosts':
hosts = [x.strip() for x in v.split(';')]
elif k == 'role':
roles = [v.strip()]
elif k == 'roles':
roles = [x.strip() for x in v.split(';')]
elif k == 'exclude_hosts':
exclude_hosts = [x.strip() for x in v.split(';')]
# Otherwise, record as usual
else:
kwargs[k] = v
else:
args.append(result[0])
cmds.append((cmd, args, kwargs, hosts, roles, exclude_hosts))
return cmds
def parse_remainder(arguments):
"""
Merge list of "remainder arguments" into a single command string.
"""
return ' '.join(arguments)
def update_output_levels(show, hide):
"""
Update state.output values as per given comma-separated list of key names.
For example, ``update_output_levels(show='debug,warnings')`` is
functionally equivalent to ``state.output['debug'] = True ;
state.output['warnings'] = True``. Conversely, anything given to ``hide``
sets the values to ``False``.
"""
if show:
for key in show.split(','):
state.output[key] = True
if hide:
for key in hide.split(','):
state.output[key] = False
def show_commands(docstring, format, code=0):
print("\n".join(list_commands(docstring, format)))
sys.exit(code)
def main(fabfile_locations=None):
"""
Main command-line execution loop.
"""
try:
# Parse command line options
parser, options, arguments = parse_options()
# Handle regular args vs -- args
arguments = parser.largs
remainder_arguments = parser.rargs
# Allow setting of arbitrary env keys.
# This comes *before* the "specific" env_options so that those may
# override these ones. Specific should override generic, if somebody
# was silly enough to specify the same key in both places.
# E.g. "fab --set shell=foo --shell=bar" should have env.shell set to
# 'bar', not 'foo'.
for pair in _escape_split(',', options.env_settings):
pair = _escape_split('=', pair)
# "--set x" => set env.x to True
# "--set x=" => set env.x to ""
key = pair[0]
value = True
if len(pair) == 2:
value = pair[1]
state.env[key] = value
# Update env with any overridden option values
# NOTE: This needs to remain the first thing that occurs
# post-parsing, since so many things hinge on the values in env.
for option in env_options:
state.env[option.dest] = getattr(options, option.dest)
# Handle --hosts, --roles, --exclude-hosts (comma separated string =>
# list)
for key in ['hosts', 'roles', 'exclude_hosts']:
if key in state.env and isinstance(state.env[key], basestring):
state.env[key] = state.env[key].split(',')
# Feed the env.tasks : tasks that are asked to be executed.
state.env['tasks'] = arguments
# Handle output control level show/hide
update_output_levels(show=options.show, hide=options.hide)
# Handle version number option
if options.show_version:
print("Fabric %s" % state.env.version)
print("Paramiko %s" % ssh.__version__)
sys.exit(0)
# Load settings from user settings file, into shared env dict.
state.env.update(load_settings(state.env.rcfile))
# Find local fabfile path or abort
fabfile = find_fabfile(fabfile_locations)
if not fabfile and not remainder_arguments:
abort("""Couldn't find any fabfiles!
Remember that -f can be used to specify fabfile path, and use -h for help.""")
# Store absolute path to fabfile in case anyone needs it
state.env.real_fabfile = fabfile
# Load fabfile (which calls its module-level code, including
# tweaks to env values) and put its commands in the shared commands
# dict
default = None
if fabfile:
docstring, callables, default = load_fabfile(fabfile)
state.commands.update(callables)
# Handle case where we were called bare, i.e. just "fab", and print
# a help message.
actions = (options.list_commands, options.shortlist, options.display,
arguments, remainder_arguments, default)
if not any(actions):
parser.print_help()
sys.exit(1)
# Abort if no commands found
if not state.commands and not remainder_arguments:
abort("Fabfile didn't contain any commands!")
# Now that we're settled on a fabfile, inform user.
if state.output.debug:
if fabfile:
print("Using fabfile '%s'" % fabfile)
else:
print("No fabfile loaded -- remainder command only")
# Shortlist is now just an alias for the "short" list format;
# it overrides use of --list-format if somebody were to specify both
if options.shortlist:
options.list_format = 'short'
options.list_commands = True
# List available commands
if options.list_commands:
show_commands(docstring, options.list_format)
# Handle show (command-specific help) option
if options.display:
display_command(options.display)
# If user didn't specify any commands to run, show help
if not (arguments or remainder_arguments or default):
parser.print_help()
sys.exit(0) # Or should it exit with error (1)?
# Parse arguments into commands to run (plus args/kwargs/hosts)
commands_to_run = parse_arguments(arguments)
# Parse remainders into a faux "command" to execute
remainder_command = parse_remainder(remainder_arguments)
# Figure out if any specified task names are invalid
unknown_commands = []
for tup in commands_to_run:
if crawl(tup[0], state.commands) is None:
unknown_commands.append(tup[0])
# Abort if any unknown commands were specified
if unknown_commands and not state.env.get('skip_unknown_tasks', False):
warn("Command(s) not found:\n%s" \
% indent(unknown_commands))
show_commands(None, options.list_format, 1)
# Generate remainder command and insert into commands, commands_to_run
if remainder_command:
r = '<remainder>'
state.commands[r] = lambda: api.run(remainder_command)
commands_to_run.append((r, [], {}, [], [], []))
# Ditto for a default, if found
if not commands_to_run and default:
commands_to_run.append((default.name, [], {}, [], [], []))
# Initial password prompt, if requested
if options.initial_password_prompt:
prompt = "Initial value for env.password: "
state.env.password = getpass.getpass(prompt)
if state.output.debug:
names = ", ".join(x[0] for x in commands_to_run)
print("Commands to run: %s" % names)
# At this point all commands must exist, so execute them in order.
for name, args, kwargs, arg_hosts, arg_roles, arg_exclude_hosts in commands_to_run:
execute(
name,
hosts=arg_hosts,
roles=arg_roles,
exclude_hosts=arg_exclude_hosts,
*args, **kwargs
)
# If we got here, no errors occurred, so print a final note.
if state.output.status:
print("\nDone.")
except SystemExit:
# a number of internal functions might raise this one.
raise
except KeyboardInterrupt:
if state.output.status:
sys.stderr.write("\nStopped.\n")
sys.exit(1)
except:
sys.excepthook(*sys.exc_info())
# we might leave stale threads if we don't explicitly exit()
sys.exit(1)
finally:
disconnect_all()
sys.exit(0)
| bsd-2-clause |
pradiptad/zulip | zerver/lib/debug.py | 122 | 1113 | from __future__ import absolute_import
import code
import traceback
import signal
# Interactive debugging code from
# http://stackoverflow.com/questions/132058/showing-the-stack-trace-from-a-running-python-application
# (that link also points to code for an interactive remote debugger
# setup, which we might want if we move Tornado to run in a daemon
# rather than via screen).
def interactive_debug(sig, frame):
"""Interrupt running process, and provide a python prompt for
interactive debugging."""
d={'_frame':frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
message = "Signal recieved : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i = code.InteractiveConsole(d)
i.interact(message)
# SIGUSR1 => Just print the stack
# SIGUSR2 => Print stack + open interactive debugging shell
def interactive_debug_listen():
signal.signal(signal.SIGUSR1, lambda sig, stack: traceback.print_stack(stack))
signal.signal(signal.SIGUSR2, interactive_debug)
| apache-2.0 |
CallaJun/hackprince | indico/skimage/viewer/widgets/history.py | 37 | 3338 | from textwrap import dedent
from ..qt import QtGui, QtCore
import numpy as np
import skimage
from ... import io, img_as_ubyte
from .core import BaseWidget
from ..utils import dialogs
__all__ = ['OKCancelButtons', 'SaveButtons']
class OKCancelButtons(BaseWidget):
"""Buttons that close the parent plugin.
OK will replace the original image with the current (filtered) image.
Cancel will just close the plugin.
"""
def __init__(self, button_width=80):
name = 'OK/Cancel'
super(OKCancelButtons, self).__init__(name)
self.ok = QtGui.QPushButton('OK')
self.ok.clicked.connect(self.update_original_image)
self.ok.setMaximumWidth(button_width)
self.ok.setFocusPolicy(QtCore.Qt.NoFocus)
self.cancel = QtGui.QPushButton('Cancel')
self.cancel.clicked.connect(self.close_plugin)
self.cancel.setMaximumWidth(button_width)
self.cancel.setFocusPolicy(QtCore.Qt.NoFocus)
self.layout = QtGui.QHBoxLayout(self)
self.layout.addStretch()
self.layout.addWidget(self.cancel)
self.layout.addWidget(self.ok)
def update_original_image(self):
image = self.plugin.image_viewer.image
self.plugin.image_viewer.original_image = image
self.plugin.close()
def close_plugin(self):
# Image viewer will restore original image on close.
self.plugin.close()
class SaveButtons(BaseWidget):
"""Buttons to save image to io.stack or to a file."""
def __init__(self, name='Save to:', default_format='png'):
super(SaveButtons, self).__init__(name)
self.default_format = default_format
self.name_label = QtGui.QLabel()
self.name_label.setText(name)
self.save_file = QtGui.QPushButton('File')
self.save_file.clicked.connect(self.save_to_file)
self.save_file.setFocusPolicy(QtCore.Qt.NoFocus)
self.save_stack = QtGui.QPushButton('Stack')
self.save_stack.clicked.connect(self.save_to_stack)
self.save_stack.setFocusPolicy(QtCore.Qt.NoFocus)
self.layout = QtGui.QHBoxLayout(self)
self.layout.addWidget(self.name_label)
self.layout.addWidget(self.save_stack)
self.layout.addWidget(self.save_file)
def save_to_stack(self):
image = self.plugin.filtered_image.copy()
io.push(image)
msg = dedent('''\
The image has been pushed to the io stack.
Use io.pop() to retrieve the most recently pushed image.
NOTE: The io stack only works in interactive sessions.''')
notify(msg)
def save_to_file(self, filename=None):
if not filename:
filename = dialogs.save_file_dialog()
if not filename:
return
image = self.plugin.filtered_image
if image.dtype == np.bool:
#TODO: This check/conversion should probably be in `imsave`.
image = img_as_ubyte(image)
io.imsave(filename, image)
def notify(msg):
msglabel = QtGui.QLabel(msg)
dialog = QtGui.QDialog()
ok = QtGui.QPushButton('OK', dialog)
ok.clicked.connect(dialog.accept)
ok.setDefault(True)
dialog.layout = QtGui.QGridLayout(dialog)
dialog.layout.addWidget(msglabel, 0, 0, 1, 3)
dialog.layout.addWidget(ok, 1, 1)
dialog.exec_()
| lgpl-3.0 |
meabsence/python-for-android | python-modules/twisted/twisted/protocols/ident.py | 56 | 7774 | # -*- test-case-name: twisted.test.test_ident -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Ident protocol implementation.
@author: Jean-Paul Calderone
"""
from __future__ import generators
import struct
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import log, failure
_MIN_PORT = 1
_MAX_PORT = 2 ** 16 - 1
class IdentError(Exception):
"""
Can't determine connection owner; reason unknown.
"""
identDescription = 'UNKNOWN-ERROR'
def __str__(self):
return self.identDescription
class NoUser(IdentError):
"""
The connection specified by the port pair is not currently in use or
currently not owned by an identifiable entity.
"""
identDescription = 'NO-USER'
class InvalidPort(IdentError):
"""
Either the local or foreign port was improperly specified. This should
be returned if either or both of the port ids were out of range (TCP
port numbers are from 1-65535), negative integers, reals or in any
fashion not recognized as a non-negative integer.
"""
identDescription = 'INVALID-PORT'
class HiddenUser(IdentError):
"""
The server was able to identify the user of this port, but the
information was not returned at the request of the user.
"""
identDescription = 'HIDDEN-USER'
class IdentServer(basic.LineOnlyReceiver):
"""
The Identification Protocol (a.k.a., "ident", a.k.a., "the Ident
Protocol") provides a means to determine the identity of a user of a
particular TCP connection. Given a TCP port number pair, it returns a
character string which identifies the owner of that connection on the
server's system.
Server authors should subclass this class and override the lookup method.
The default implementation returns an UNKNOWN-ERROR response for every
query.
"""
def lineReceived(self, line):
parts = line.split(',')
if len(parts) != 2:
self.invalidQuery()
else:
try:
portOnServer, portOnClient = map(int, parts)
except ValueError:
self.invalidQuery()
else:
if _MIN_PORT <= portOnServer <= _MAX_PORT and _MIN_PORT <= portOnClient <= _MAX_PORT:
self.validQuery(portOnServer, portOnClient)
else:
self._ebLookup(failure.Failure(InvalidPort()), portOnServer, portOnClient)
def invalidQuery(self):
self.transport.loseConnection()
def validQuery(self, portOnServer, portOnClient):
"""
Called when a valid query is received to look up and deliver the
response.
@param portOnServer: The server port from the query.
@param portOnClient: The client port from the query.
"""
serverAddr = self.transport.getHost().host, portOnServer
clientAddr = self.transport.getPeer().host, portOnClient
defer.maybeDeferred(self.lookup, serverAddr, clientAddr
).addCallback(self._cbLookup, portOnServer, portOnClient
).addErrback(self._ebLookup, portOnServer, portOnClient
)
def _cbLookup(self, (sysName, userId), sport, cport):
self.sendLine('%d, %d : USERID : %s : %s' % (sport, cport, sysName, userId))
def _ebLookup(self, failure, sport, cport):
if failure.check(IdentError):
self.sendLine('%d, %d : ERROR : %s' % (sport, cport, failure.value))
else:
log.err(failure)
self.sendLine('%d, %d : ERROR : %s' % (sport, cport, IdentError(failure.value)))
def lookup(self, serverAddress, clientAddress):
"""Lookup user information about the specified address pair.
Return value should be a two-tuple of system name and username.
Acceptable values for the system name may be found online at::
U{http://www.iana.org/assignments/operating-system-names}
This method may also raise any IdentError subclass (or IdentError
itself) to indicate user information will not be provided for the
given query.
A Deferred may also be returned.
@param serverAddress: A two-tuple representing the server endpoint
of the address being queried. The first element is a string holding
a dotted-quad IP address. The second element is an integer
representing the port.
@param clientAddress: Like L{serverAddress}, but represents the
client endpoint of the address being queried.
"""
raise IdentError()
class ProcServerMixin:
"""Implements lookup() to grab entries for responses from /proc/net/tcp
"""
SYSTEM_NAME = 'LINUX'
try:
from pwd import getpwuid
def getUsername(self, uid, getpwuid=getpwuid):
return getpwuid(uid)[0]
del getpwuid
except ImportError:
def getUsername(self, uid):
raise IdentError()
def entries(self):
f = file('/proc/net/tcp')
f.readline()
for L in f:
yield L.strip()
def dottedQuadFromHexString(self, hexstr):
return '.'.join(map(str, struct.unpack('4B', struct.pack('=L', int(hexstr, 16)))))
def unpackAddress(self, packed):
addr, port = packed.split(':')
addr = self.dottedQuadFromHexString(addr)
port = int(port, 16)
return addr, port
def parseLine(self, line):
parts = line.strip().split()
localAddr, localPort = self.unpackAddress(parts[1])
remoteAddr, remotePort = self.unpackAddress(parts[2])
uid = int(parts[7])
return (localAddr, localPort), (remoteAddr, remotePort), uid
def lookup(self, serverAddress, clientAddress):
for ent in self.entries():
localAddr, remoteAddr, uid = self.parseLine(ent)
if remoteAddr == clientAddress and localAddr[1] == serverAddress[1]:
return (self.SYSTEM_NAME, self.getUsername(uid))
raise NoUser()
class IdentClient(basic.LineOnlyReceiver):
errorTypes = (IdentError, NoUser, InvalidPort, HiddenUser)
def __init__(self):
self.queries = []
def lookup(self, portOnServer, portOnClient):
"""Lookup user information about the specified address pair.
"""
self.queries.append((defer.Deferred(), portOnServer, portOnClient))
if len(self.queries) > 1:
return self.queries[-1][0]
self.sendLine('%d, %d' % (portOnServer, portOnClient))
return self.queries[-1][0]
def lineReceived(self, line):
if not self.queries:
log.msg("Unexpected server response: %r" % (line,))
else:
d, _, _ = self.queries.pop(0)
self.parseResponse(d, line)
if self.queries:
self.sendLine('%d, %d' % (self.queries[0][1], self.queries[0][2]))
def connectionLost(self, reason):
for q in self.queries:
q[0].errback(IdentError(reason))
self.queries = []
def parseResponse(self, deferred, line):
parts = line.split(':', 2)
if len(parts) != 3:
deferred.errback(IdentError(line))
else:
ports, type, addInfo = map(str.strip, parts)
if type == 'ERROR':
for et in self.errorTypes:
if et.identDescription == addInfo:
deferred.errback(et(line))
return
deferred.errback(IdentError(line))
else:
deferred.callback((type, addInfo))
__all__ = ['IdentError', 'NoUser', 'InvalidPort', 'HiddenUser',
'IdentServer', 'IdentClient',
'ProcServerMixin']
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.